1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "drmP.h" 24 #include "amdgpu.h" 25 #include "amdgpu_pm.h" 26 #include "amdgpu_i2c.h" 27 #include "vid.h" 28 #include "atom.h" 29 #include "amdgpu_atombios.h" 30 #include "atombios_crtc.h" 31 #include "atombios_encoders.h" 32 #include "amdgpu_pll.h" 33 #include "amdgpu_connectors.h" 34 35 #include "dce/dce_11_0_d.h" 36 #include "dce/dce_11_0_sh_mask.h" 37 #include "dce/dce_11_0_enum.h" 38 #include "oss/oss_3_0_d.h" 39 #include "oss/oss_3_0_sh_mask.h" 40 #include "gmc/gmc_8_1_d.h" 41 #include "gmc/gmc_8_1_sh_mask.h" 42 43 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev); 44 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev); 45 46 static const u32 crtc_offsets[] = 47 { 48 CRTC0_REGISTER_OFFSET, 49 CRTC1_REGISTER_OFFSET, 50 CRTC2_REGISTER_OFFSET, 51 CRTC3_REGISTER_OFFSET, 52 CRTC4_REGISTER_OFFSET, 53 CRTC5_REGISTER_OFFSET, 54 CRTC6_REGISTER_OFFSET 55 }; 56 57 static const u32 hpd_offsets[] = 58 { 59 HPD0_REGISTER_OFFSET, 60 HPD1_REGISTER_OFFSET, 61 HPD2_REGISTER_OFFSET, 62 HPD3_REGISTER_OFFSET, 63 HPD4_REGISTER_OFFSET, 64 HPD5_REGISTER_OFFSET 65 }; 66 67 static const uint32_t dig_offsets[] = { 68 DIG0_REGISTER_OFFSET, 69 DIG1_REGISTER_OFFSET, 70 DIG2_REGISTER_OFFSET, 71 DIG3_REGISTER_OFFSET, 72 DIG4_REGISTER_OFFSET, 73 DIG5_REGISTER_OFFSET, 74 DIG6_REGISTER_OFFSET, 75 DIG7_REGISTER_OFFSET, 76 DIG8_REGISTER_OFFSET 77 }; 78 79 static const struct { 80 uint32_t reg; 81 uint32_t vblank; 82 uint32_t vline; 83 uint32_t hpd; 84 85 } interrupt_status_offsets[] = { { 86 .reg = mmDISP_INTERRUPT_STATUS, 87 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, 88 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, 89 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 90 }, { 91 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, 92 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, 93 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, 94 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 95 }, { 96 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, 97 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, 98 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, 99 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 100 }, { 101 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, 102 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, 103 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, 104 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 105 }, { 106 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, 107 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, 108 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, 109 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 110 }, { 111 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, 112 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, 113 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, 114 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 115 } }; 116 117 static const u32 cz_golden_settings_a11[] = 118 { 119 mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000, 120 mmFBC_MISC, 0x1f311fff, 0x14300000, 121 }; 122 123 static const u32 cz_mgcg_cgcg_init[] = 124 { 125 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, 126 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, 127 }; 128 129 static const u32 stoney_golden_settings_a11[] = 130 { 131 mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000, 132 mmFBC_MISC, 0x1f311fff, 0x14302000, 133 }; 134 135 136 static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) 137 { 138 switch (adev->asic_type) { 139 case CHIP_CARRIZO: 140 amdgpu_program_register_sequence(adev, 141 cz_mgcg_cgcg_init, 142 (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); 143 amdgpu_program_register_sequence(adev, 144 cz_golden_settings_a11, 145 (const u32)ARRAY_SIZE(cz_golden_settings_a11)); 146 break; 147 case CHIP_STONEY: 148 amdgpu_program_register_sequence(adev, 149 stoney_golden_settings_a11, 150 (const u32)ARRAY_SIZE(stoney_golden_settings_a11)); 151 break; 152 default: 153 break; 154 } 155 } 156 157 static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev, 158 u32 block_offset, u32 reg) 159 { 160 unsigned long flags; 161 u32 r; 162 163 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 164 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 165 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); 166 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 167 168 return r; 169 } 170 171 static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev, 172 u32 block_offset, u32 reg, u32 v) 173 { 174 unsigned long flags; 175 176 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 177 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 178 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); 179 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 180 } 181 182 static bool dce_v11_0_is_in_vblank(struct amdgpu_device *adev, int crtc) 183 { 184 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & 185 CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK) 186 return true; 187 else 188 return false; 189 } 190 191 static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc) 192 { 193 u32 pos1, pos2; 194 195 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 196 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 197 198 if (pos1 != pos2) 199 return true; 200 else 201 return false; 202 } 203 204 /** 205 * dce_v11_0_vblank_wait - vblank wait asic callback. 206 * 207 * @adev: amdgpu_device pointer 208 * @crtc: crtc to wait for vblank on 209 * 210 * Wait for vblank on the requested crtc (evergreen+). 211 */ 212 static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc) 213 { 214 unsigned i = 100; 215 216 if (crtc < 0 || crtc >= adev->mode_info.num_crtc) 217 return; 218 219 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK)) 220 return; 221 222 /* depending on when we hit vblank, we may be close to active; if so, 223 * wait for another frame. 224 */ 225 while (dce_v11_0_is_in_vblank(adev, crtc)) { 226 if (i++ == 100) { 227 i = 0; 228 if (!dce_v11_0_is_counter_moving(adev, crtc)) 229 break; 230 } 231 } 232 233 while (!dce_v11_0_is_in_vblank(adev, crtc)) { 234 if (i++ == 100) { 235 i = 0; 236 if (!dce_v11_0_is_counter_moving(adev, crtc)) 237 break; 238 } 239 } 240 } 241 242 static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 243 { 244 if (crtc < 0 || crtc >= adev->mode_info.num_crtc) 245 return 0; 246 else 247 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 248 } 249 250 static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev) 251 { 252 unsigned i; 253 254 /* Enable pflip interrupts */ 255 for (i = 0; i < adev->mode_info.num_crtc; i++) 256 amdgpu_irq_get(adev, &adev->pageflip_irq, i); 257 } 258 259 static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev) 260 { 261 unsigned i; 262 263 /* Disable pflip interrupts */ 264 for (i = 0; i < adev->mode_info.num_crtc; i++) 265 amdgpu_irq_put(adev, &adev->pageflip_irq, i); 266 } 267 268 /** 269 * dce_v11_0_page_flip - pageflip callback. 270 * 271 * @adev: amdgpu_device pointer 272 * @crtc_id: crtc to cleanup pageflip on 273 * @crtc_base: new address of the crtc (GPU MC address) 274 * 275 * Triggers the actual pageflip by updating the primary 276 * surface base address. 277 */ 278 static void dce_v11_0_page_flip(struct amdgpu_device *adev, 279 int crtc_id, u64 crtc_base) 280 { 281 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 282 283 /* update the scanout addresses */ 284 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 285 upper_32_bits(crtc_base)); 286 /* writing to the low address triggers the update */ 287 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 288 lower_32_bits(crtc_base)); 289 /* post the write */ 290 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); 291 } 292 293 static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 294 u32 *vbl, u32 *position) 295 { 296 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 297 return -EINVAL; 298 299 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); 300 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 301 302 return 0; 303 } 304 305 /** 306 * dce_v11_0_hpd_sense - hpd sense callback. 307 * 308 * @adev: amdgpu_device pointer 309 * @hpd: hpd (hotplug detect) pin 310 * 311 * Checks if a digital monitor is connected (evergreen+). 312 * Returns true if connected, false if not connected. 313 */ 314 static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev, 315 enum amdgpu_hpd_id hpd) 316 { 317 int idx; 318 bool connected = false; 319 320 switch (hpd) { 321 case AMDGPU_HPD_1: 322 idx = 0; 323 break; 324 case AMDGPU_HPD_2: 325 idx = 1; 326 break; 327 case AMDGPU_HPD_3: 328 idx = 2; 329 break; 330 case AMDGPU_HPD_4: 331 idx = 3; 332 break; 333 case AMDGPU_HPD_5: 334 idx = 4; 335 break; 336 case AMDGPU_HPD_6: 337 idx = 5; 338 break; 339 default: 340 return connected; 341 } 342 343 if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) & 344 DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) 345 connected = true; 346 347 return connected; 348 } 349 350 /** 351 * dce_v11_0_hpd_set_polarity - hpd set polarity callback. 352 * 353 * @adev: amdgpu_device pointer 354 * @hpd: hpd (hotplug detect) pin 355 * 356 * Set the polarity of the hpd pin (evergreen+). 357 */ 358 static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev, 359 enum amdgpu_hpd_id hpd) 360 { 361 u32 tmp; 362 bool connected = dce_v11_0_hpd_sense(adev, hpd); 363 int idx; 364 365 switch (hpd) { 366 case AMDGPU_HPD_1: 367 idx = 0; 368 break; 369 case AMDGPU_HPD_2: 370 idx = 1; 371 break; 372 case AMDGPU_HPD_3: 373 idx = 2; 374 break; 375 case AMDGPU_HPD_4: 376 idx = 3; 377 break; 378 case AMDGPU_HPD_5: 379 idx = 4; 380 break; 381 case AMDGPU_HPD_6: 382 idx = 5; 383 break; 384 default: 385 return; 386 } 387 388 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); 389 if (connected) 390 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); 391 else 392 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); 393 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); 394 } 395 396 /** 397 * dce_v11_0_hpd_init - hpd setup callback. 398 * 399 * @adev: amdgpu_device pointer 400 * 401 * Setup the hpd pins used by the card (evergreen+). 402 * Enable the pin, set the polarity, and enable the hpd interrupts. 403 */ 404 static void dce_v11_0_hpd_init(struct amdgpu_device *adev) 405 { 406 struct drm_device *dev = adev->ddev; 407 struct drm_connector *connector; 408 u32 tmp; 409 int idx; 410 411 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 412 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 413 414 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 415 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 416 /* don't try to enable hpd on eDP or LVDS avoid breaking the 417 * aux dp channel on imac and help (but not completely fix) 418 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 419 * also avoid interrupt storms during dpms. 420 */ 421 continue; 422 } 423 424 switch (amdgpu_connector->hpd.hpd) { 425 case AMDGPU_HPD_1: 426 idx = 0; 427 break; 428 case AMDGPU_HPD_2: 429 idx = 1; 430 break; 431 case AMDGPU_HPD_3: 432 idx = 2; 433 break; 434 case AMDGPU_HPD_4: 435 idx = 3; 436 break; 437 case AMDGPU_HPD_5: 438 idx = 4; 439 break; 440 case AMDGPU_HPD_6: 441 idx = 5; 442 break; 443 default: 444 continue; 445 } 446 447 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); 448 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); 449 WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); 450 451 tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]); 452 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, 453 DC_HPD_CONNECT_INT_DELAY, 454 AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); 455 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, 456 DC_HPD_DISCONNECT_INT_DELAY, 457 AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); 458 WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp); 459 460 dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); 461 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 462 } 463 } 464 465 /** 466 * dce_v11_0_hpd_fini - hpd tear down callback. 467 * 468 * @adev: amdgpu_device pointer 469 * 470 * Tear down the hpd pins used by the card (evergreen+). 471 * Disable the hpd interrupts. 472 */ 473 static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) 474 { 475 struct drm_device *dev = adev->ddev; 476 struct drm_connector *connector; 477 u32 tmp; 478 int idx; 479 480 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 481 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 482 483 switch (amdgpu_connector->hpd.hpd) { 484 case AMDGPU_HPD_1: 485 idx = 0; 486 break; 487 case AMDGPU_HPD_2: 488 idx = 1; 489 break; 490 case AMDGPU_HPD_3: 491 idx = 2; 492 break; 493 case AMDGPU_HPD_4: 494 idx = 3; 495 break; 496 case AMDGPU_HPD_5: 497 idx = 4; 498 break; 499 case AMDGPU_HPD_6: 500 idx = 5; 501 break; 502 default: 503 continue; 504 } 505 506 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); 507 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); 508 WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); 509 510 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 511 } 512 } 513 514 static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev) 515 { 516 return mmDC_GPIO_HPD_A; 517 } 518 519 static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev) 520 { 521 u32 crtc_hung = 0; 522 u32 crtc_status[6]; 523 u32 i, j, tmp; 524 525 for (i = 0; i < adev->mode_info.num_crtc; i++) { 526 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 527 if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) { 528 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 529 crtc_hung |= (1 << i); 530 } 531 } 532 533 for (j = 0; j < 10; j++) { 534 for (i = 0; i < adev->mode_info.num_crtc; i++) { 535 if (crtc_hung & (1 << i)) { 536 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 537 if (tmp != crtc_status[i]) 538 crtc_hung &= ~(1 << i); 539 } 540 } 541 if (crtc_hung == 0) 542 return false; 543 udelay(100); 544 } 545 546 return true; 547 } 548 549 static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev, 550 struct amdgpu_mode_mc_save *save) 551 { 552 u32 crtc_enabled, tmp; 553 int i; 554 555 save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 556 save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); 557 558 /* disable VGA render */ 559 tmp = RREG32(mmVGA_RENDER_CONTROL); 560 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 561 WREG32(mmVGA_RENDER_CONTROL, tmp); 562 563 /* blank the display controllers */ 564 for (i = 0; i < adev->mode_info.num_crtc; i++) { 565 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), 566 CRTC_CONTROL, CRTC_MASTER_EN); 567 if (crtc_enabled) { 568 #if 0 569 u32 frame_count; 570 int j; 571 572 save->crtc_enabled[i] = true; 573 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); 574 if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { 575 amdgpu_display_vblank_wait(adev, i); 576 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 577 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); 578 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 579 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 580 } 581 /* wait for the next frame */ 582 frame_count = amdgpu_display_vblank_get_counter(adev, i); 583 for (j = 0; j < adev->usec_timeout; j++) { 584 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) 585 break; 586 udelay(1); 587 } 588 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 589 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { 590 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); 591 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); 592 } 593 tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]); 594 if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { 595 tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); 596 WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); 597 } 598 #else 599 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ 600 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 601 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 602 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); 603 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); 604 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 605 save->crtc_enabled[i] = false; 606 /* ***** */ 607 #endif 608 } else { 609 save->crtc_enabled[i] = false; 610 } 611 } 612 } 613 614 static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev, 615 struct amdgpu_mode_mc_save *save) 616 { 617 u32 tmp, frame_count; 618 int i, j; 619 620 /* update crtc base addresses */ 621 for (i = 0; i < adev->mode_info.num_crtc; i++) { 622 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 623 upper_32_bits(adev->mc.vram_start)); 624 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 625 upper_32_bits(adev->mc.vram_start)); 626 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], 627 (u32)adev->mc.vram_start); 628 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], 629 (u32)adev->mc.vram_start); 630 631 if (save->crtc_enabled[i]) { 632 tmp = RREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i]); 633 if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { 634 tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); 635 WREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); 636 } 637 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 638 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { 639 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); 640 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); 641 } 642 tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]); 643 if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { 644 tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); 645 WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); 646 } 647 for (j = 0; j < adev->usec_timeout; j++) { 648 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 649 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) 650 break; 651 udelay(1); 652 } 653 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); 654 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); 655 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 656 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 657 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 658 /* wait for the next frame */ 659 frame_count = amdgpu_display_vblank_get_counter(adev, i); 660 for (j = 0; j < adev->usec_timeout; j++) { 661 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) 662 break; 663 udelay(1); 664 } 665 } 666 } 667 668 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); 669 WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); 670 671 /* Unlock vga access */ 672 WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); 673 mdelay(1); 674 WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); 675 } 676 677 static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, 678 bool render) 679 { 680 u32 tmp; 681 682 /* Lockout access through VGA aperture*/ 683 tmp = RREG32(mmVGA_HDP_CONTROL); 684 if (render) 685 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); 686 else 687 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 688 WREG32(mmVGA_HDP_CONTROL, tmp); 689 690 /* disable VGA render */ 691 tmp = RREG32(mmVGA_RENDER_CONTROL); 692 if (render) 693 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); 694 else 695 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 696 WREG32(mmVGA_RENDER_CONTROL, tmp); 697 } 698 699 static void dce_v11_0_program_fmt(struct drm_encoder *encoder) 700 { 701 struct drm_device *dev = encoder->dev; 702 struct amdgpu_device *adev = dev->dev_private; 703 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 704 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 705 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 706 int bpc = 0; 707 u32 tmp = 0; 708 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; 709 710 if (connector) { 711 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 712 bpc = amdgpu_connector_get_monitor_bpc(connector); 713 dither = amdgpu_connector->dither; 714 } 715 716 /* LVDS/eDP FMT is set up by atom */ 717 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) 718 return; 719 720 /* not needed for analog */ 721 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || 722 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) 723 return; 724 725 if (bpc == 0) 726 return; 727 728 switch (bpc) { 729 case 6: 730 if (dither == AMDGPU_FMT_DITHER_ENABLE) { 731 /* XXX sort out optimal dither settings */ 732 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); 733 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); 734 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); 735 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0); 736 } else { 737 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); 738 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0); 739 } 740 break; 741 case 8: 742 if (dither == AMDGPU_FMT_DITHER_ENABLE) { 743 /* XXX sort out optimal dither settings */ 744 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); 745 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); 746 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); 747 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); 748 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1); 749 } else { 750 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); 751 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1); 752 } 753 break; 754 case 10: 755 if (dither == AMDGPU_FMT_DITHER_ENABLE) { 756 /* XXX sort out optimal dither settings */ 757 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); 758 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); 759 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); 760 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); 761 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2); 762 } else { 763 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); 764 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2); 765 } 766 break; 767 default: 768 /* not needed */ 769 break; 770 } 771 772 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 773 } 774 775 776 /* display watermark setup */ 777 /** 778 * dce_v11_0_line_buffer_adjust - Set up the line buffer 779 * 780 * @adev: amdgpu_device pointer 781 * @amdgpu_crtc: the selected display controller 782 * @mode: the current display mode on the selected display 783 * controller 784 * 785 * Setup up the line buffer allocation for 786 * the selected display controller (CIK). 787 * Returns the line buffer size in pixels. 788 */ 789 static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev, 790 struct amdgpu_crtc *amdgpu_crtc, 791 struct drm_display_mode *mode) 792 { 793 u32 tmp, buffer_alloc, i, mem_cfg; 794 u32 pipe_offset = amdgpu_crtc->crtc_id; 795 /* 796 * Line Buffer Setup 797 * There are 6 line buffers, one for each display controllers. 798 * There are 3 partitions per LB. Select the number of partitions 799 * to enable based on the display width. For display widths larger 800 * than 4096, you need use to use 2 display controllers and combine 801 * them using the stereo blender. 802 */ 803 if (amdgpu_crtc->base.enabled && mode) { 804 if (mode->crtc_hdisplay < 1920) { 805 mem_cfg = 1; 806 buffer_alloc = 2; 807 } else if (mode->crtc_hdisplay < 2560) { 808 mem_cfg = 2; 809 buffer_alloc = 2; 810 } else if (mode->crtc_hdisplay < 4096) { 811 mem_cfg = 0; 812 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 813 } else { 814 DRM_DEBUG_KMS("Mode too big for LB!\n"); 815 mem_cfg = 0; 816 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 817 } 818 } else { 819 mem_cfg = 1; 820 buffer_alloc = 0; 821 } 822 823 tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset); 824 tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg); 825 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp); 826 827 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); 828 tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc); 829 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp); 830 831 for (i = 0; i < adev->usec_timeout; i++) { 832 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); 833 if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED)) 834 break; 835 udelay(1); 836 } 837 838 if (amdgpu_crtc->base.enabled && mode) { 839 switch (mem_cfg) { 840 case 0: 841 default: 842 return 4096 * 2; 843 case 1: 844 return 1920 * 2; 845 case 2: 846 return 2560 * 2; 847 } 848 } 849 850 /* controller not enabled, so no lb used */ 851 return 0; 852 } 853 854 /** 855 * cik_get_number_of_dram_channels - get the number of dram channels 856 * 857 * @adev: amdgpu_device pointer 858 * 859 * Look up the number of video ram channels (CIK). 860 * Used for display watermark bandwidth calculations 861 * Returns the number of dram channels 862 */ 863 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) 864 { 865 u32 tmp = RREG32(mmMC_SHARED_CHMAP); 866 867 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { 868 case 0: 869 default: 870 return 1; 871 case 1: 872 return 2; 873 case 2: 874 return 4; 875 case 3: 876 return 8; 877 case 4: 878 return 3; 879 case 5: 880 return 6; 881 case 6: 882 return 10; 883 case 7: 884 return 12; 885 case 8: 886 return 16; 887 } 888 } 889 890 struct dce10_wm_params { 891 u32 dram_channels; /* number of dram channels */ 892 u32 yclk; /* bandwidth per dram data pin in kHz */ 893 u32 sclk; /* engine clock in kHz */ 894 u32 disp_clk; /* display clock in kHz */ 895 u32 src_width; /* viewport width */ 896 u32 active_time; /* active display time in ns */ 897 u32 blank_time; /* blank time in ns */ 898 bool interlaced; /* mode is interlaced */ 899 fixed20_12 vsc; /* vertical scale ratio */ 900 u32 num_heads; /* number of active crtcs */ 901 u32 bytes_per_pixel; /* bytes per pixel display + overlay */ 902 u32 lb_size; /* line buffer allocated to pipe */ 903 u32 vtaps; /* vertical scaler taps */ 904 }; 905 906 /** 907 * dce_v11_0_dram_bandwidth - get the dram bandwidth 908 * 909 * @wm: watermark calculation data 910 * 911 * Calculate the raw dram bandwidth (CIK). 912 * Used for display watermark bandwidth calculations 913 * Returns the dram bandwidth in MBytes/s 914 */ 915 static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm) 916 { 917 /* Calculate raw DRAM Bandwidth */ 918 fixed20_12 dram_efficiency; /* 0.7 */ 919 fixed20_12 yclk, dram_channels, bandwidth; 920 fixed20_12 a; 921 922 a.full = dfixed_const(1000); 923 yclk.full = dfixed_const(wm->yclk); 924 yclk.full = dfixed_div(yclk, a); 925 dram_channels.full = dfixed_const(wm->dram_channels * 4); 926 a.full = dfixed_const(10); 927 dram_efficiency.full = dfixed_const(7); 928 dram_efficiency.full = dfixed_div(dram_efficiency, a); 929 bandwidth.full = dfixed_mul(dram_channels, yclk); 930 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); 931 932 return dfixed_trunc(bandwidth); 933 } 934 935 /** 936 * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display 937 * 938 * @wm: watermark calculation data 939 * 940 * Calculate the dram bandwidth used for display (CIK). 941 * Used for display watermark bandwidth calculations 942 * Returns the dram bandwidth for display in MBytes/s 943 */ 944 static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm) 945 { 946 /* Calculate DRAM Bandwidth and the part allocated to display. */ 947 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ 948 fixed20_12 yclk, dram_channels, bandwidth; 949 fixed20_12 a; 950 951 a.full = dfixed_const(1000); 952 yclk.full = dfixed_const(wm->yclk); 953 yclk.full = dfixed_div(yclk, a); 954 dram_channels.full = dfixed_const(wm->dram_channels * 4); 955 a.full = dfixed_const(10); 956 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ 957 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); 958 bandwidth.full = dfixed_mul(dram_channels, yclk); 959 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); 960 961 return dfixed_trunc(bandwidth); 962 } 963 964 /** 965 * dce_v11_0_data_return_bandwidth - get the data return bandwidth 966 * 967 * @wm: watermark calculation data 968 * 969 * Calculate the data return bandwidth used for display (CIK). 970 * Used for display watermark bandwidth calculations 971 * Returns the data return bandwidth in MBytes/s 972 */ 973 static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm) 974 { 975 /* Calculate the display Data return Bandwidth */ 976 fixed20_12 return_efficiency; /* 0.8 */ 977 fixed20_12 sclk, bandwidth; 978 fixed20_12 a; 979 980 a.full = dfixed_const(1000); 981 sclk.full = dfixed_const(wm->sclk); 982 sclk.full = dfixed_div(sclk, a); 983 a.full = dfixed_const(10); 984 return_efficiency.full = dfixed_const(8); 985 return_efficiency.full = dfixed_div(return_efficiency, a); 986 a.full = dfixed_const(32); 987 bandwidth.full = dfixed_mul(a, sclk); 988 bandwidth.full = dfixed_mul(bandwidth, return_efficiency); 989 990 return dfixed_trunc(bandwidth); 991 } 992 993 /** 994 * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth 995 * 996 * @wm: watermark calculation data 997 * 998 * Calculate the dmif bandwidth used for display (CIK). 999 * Used for display watermark bandwidth calculations 1000 * Returns the dmif bandwidth in MBytes/s 1001 */ 1002 static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm) 1003 { 1004 /* Calculate the DMIF Request Bandwidth */ 1005 fixed20_12 disp_clk_request_efficiency; /* 0.8 */ 1006 fixed20_12 disp_clk, bandwidth; 1007 fixed20_12 a, b; 1008 1009 a.full = dfixed_const(1000); 1010 disp_clk.full = dfixed_const(wm->disp_clk); 1011 disp_clk.full = dfixed_div(disp_clk, a); 1012 a.full = dfixed_const(32); 1013 b.full = dfixed_mul(a, disp_clk); 1014 1015 a.full = dfixed_const(10); 1016 disp_clk_request_efficiency.full = dfixed_const(8); 1017 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); 1018 1019 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); 1020 1021 return dfixed_trunc(bandwidth); 1022 } 1023 1024 /** 1025 * dce_v11_0_available_bandwidth - get the min available bandwidth 1026 * 1027 * @wm: watermark calculation data 1028 * 1029 * Calculate the min available bandwidth used for display (CIK). 1030 * Used for display watermark bandwidth calculations 1031 * Returns the min available bandwidth in MBytes/s 1032 */ 1033 static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm) 1034 { 1035 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ 1036 u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm); 1037 u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm); 1038 u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm); 1039 1040 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); 1041 } 1042 1043 /** 1044 * dce_v11_0_average_bandwidth - get the average available bandwidth 1045 * 1046 * @wm: watermark calculation data 1047 * 1048 * Calculate the average available bandwidth used for display (CIK). 1049 * Used for display watermark bandwidth calculations 1050 * Returns the average available bandwidth in MBytes/s 1051 */ 1052 static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm) 1053 { 1054 /* Calculate the display mode Average Bandwidth 1055 * DisplayMode should contain the source and destination dimensions, 1056 * timing, etc. 1057 */ 1058 fixed20_12 bpp; 1059 fixed20_12 line_time; 1060 fixed20_12 src_width; 1061 fixed20_12 bandwidth; 1062 fixed20_12 a; 1063 1064 a.full = dfixed_const(1000); 1065 line_time.full = dfixed_const(wm->active_time + wm->blank_time); 1066 line_time.full = dfixed_div(line_time, a); 1067 bpp.full = dfixed_const(wm->bytes_per_pixel); 1068 src_width.full = dfixed_const(wm->src_width); 1069 bandwidth.full = dfixed_mul(src_width, bpp); 1070 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); 1071 bandwidth.full = dfixed_div(bandwidth, line_time); 1072 1073 return dfixed_trunc(bandwidth); 1074 } 1075 1076 /** 1077 * dce_v11_0_latency_watermark - get the latency watermark 1078 * 1079 * @wm: watermark calculation data 1080 * 1081 * Calculate the latency watermark (CIK). 1082 * Used for display watermark bandwidth calculations 1083 * Returns the latency watermark in ns 1084 */ 1085 static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm) 1086 { 1087 /* First calculate the latency in ns */ 1088 u32 mc_latency = 2000; /* 2000 ns. */ 1089 u32 available_bandwidth = dce_v11_0_available_bandwidth(wm); 1090 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; 1091 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; 1092 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ 1093 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + 1094 (wm->num_heads * cursor_line_pair_return_time); 1095 u32 latency = mc_latency + other_heads_data_return_time + dc_latency; 1096 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; 1097 u32 tmp, dmif_size = 12288; 1098 fixed20_12 a, b, c; 1099 1100 if (wm->num_heads == 0) 1101 return 0; 1102 1103 a.full = dfixed_const(2); 1104 b.full = dfixed_const(1); 1105 if ((wm->vsc.full > a.full) || 1106 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || 1107 (wm->vtaps >= 5) || 1108 ((wm->vsc.full >= a.full) && wm->interlaced)) 1109 max_src_lines_per_dst_line = 4; 1110 else 1111 max_src_lines_per_dst_line = 2; 1112 1113 a.full = dfixed_const(available_bandwidth); 1114 b.full = dfixed_const(wm->num_heads); 1115 a.full = dfixed_div(a, b); 1116 1117 b.full = dfixed_const(mc_latency + 512); 1118 c.full = dfixed_const(wm->disp_clk); 1119 b.full = dfixed_div(b, c); 1120 1121 c.full = dfixed_const(dmif_size); 1122 b.full = dfixed_div(c, b); 1123 1124 tmp = min(dfixed_trunc(a), dfixed_trunc(b)); 1125 1126 b.full = dfixed_const(1000); 1127 c.full = dfixed_const(wm->disp_clk); 1128 b.full = dfixed_div(c, b); 1129 c.full = dfixed_const(wm->bytes_per_pixel); 1130 b.full = dfixed_mul(b, c); 1131 1132 lb_fill_bw = min(tmp, dfixed_trunc(b)); 1133 1134 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 1135 b.full = dfixed_const(1000); 1136 c.full = dfixed_const(lb_fill_bw); 1137 b.full = dfixed_div(c, b); 1138 a.full = dfixed_div(a, b); 1139 line_fill_time = dfixed_trunc(a); 1140 1141 if (line_fill_time < wm->active_time) 1142 return latency; 1143 else 1144 return latency + (line_fill_time - wm->active_time); 1145 1146 } 1147 1148 /** 1149 * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check 1150 * average and available dram bandwidth 1151 * 1152 * @wm: watermark calculation data 1153 * 1154 * Check if the display average bandwidth fits in the display 1155 * dram bandwidth (CIK). 1156 * Used for display watermark bandwidth calculations 1157 * Returns true if the display fits, false if not. 1158 */ 1159 static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm) 1160 { 1161 if (dce_v11_0_average_bandwidth(wm) <= 1162 (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads)) 1163 return true; 1164 else 1165 return false; 1166 } 1167 1168 /** 1169 * dce_v11_0_average_bandwidth_vs_available_bandwidth - check 1170 * average and available bandwidth 1171 * 1172 * @wm: watermark calculation data 1173 * 1174 * Check if the display average bandwidth fits in the display 1175 * available bandwidth (CIK). 1176 * Used for display watermark bandwidth calculations 1177 * Returns true if the display fits, false if not. 1178 */ 1179 static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm) 1180 { 1181 if (dce_v11_0_average_bandwidth(wm) <= 1182 (dce_v11_0_available_bandwidth(wm) / wm->num_heads)) 1183 return true; 1184 else 1185 return false; 1186 } 1187 1188 /** 1189 * dce_v11_0_check_latency_hiding - check latency hiding 1190 * 1191 * @wm: watermark calculation data 1192 * 1193 * Check latency hiding (CIK). 1194 * Used for display watermark bandwidth calculations 1195 * Returns true if the display fits, false if not. 1196 */ 1197 static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm) 1198 { 1199 u32 lb_partitions = wm->lb_size / wm->src_width; 1200 u32 line_time = wm->active_time + wm->blank_time; 1201 u32 latency_tolerant_lines; 1202 u32 latency_hiding; 1203 fixed20_12 a; 1204 1205 a.full = dfixed_const(1); 1206 if (wm->vsc.full > a.full) 1207 latency_tolerant_lines = 1; 1208 else { 1209 if (lb_partitions <= (wm->vtaps + 1)) 1210 latency_tolerant_lines = 1; 1211 else 1212 latency_tolerant_lines = 2; 1213 } 1214 1215 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); 1216 1217 if (dce_v11_0_latency_watermark(wm) <= latency_hiding) 1218 return true; 1219 else 1220 return false; 1221 } 1222 1223 /** 1224 * dce_v11_0_program_watermarks - program display watermarks 1225 * 1226 * @adev: amdgpu_device pointer 1227 * @amdgpu_crtc: the selected display controller 1228 * @lb_size: line buffer size 1229 * @num_heads: number of display controllers in use 1230 * 1231 * Calculate and program the display watermarks for the 1232 * selected display controller (CIK). 1233 */ 1234 static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, 1235 struct amdgpu_crtc *amdgpu_crtc, 1236 u32 lb_size, u32 num_heads) 1237 { 1238 struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 1239 struct dce10_wm_params wm_low, wm_high; 1240 u32 pixel_period; 1241 u32 line_time = 0; 1242 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1243 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1244 1245 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1246 pixel_period = 1000000 / (u32)mode->clock; 1247 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 1248 1249 /* watermark for high clocks */ 1250 if (adev->pm.dpm_enabled) { 1251 wm_high.yclk = 1252 amdgpu_dpm_get_mclk(adev, false) * 10; 1253 wm_high.sclk = 1254 amdgpu_dpm_get_sclk(adev, false) * 10; 1255 } else { 1256 wm_high.yclk = adev->pm.current_mclk * 10; 1257 wm_high.sclk = adev->pm.current_sclk * 10; 1258 } 1259 1260 wm_high.disp_clk = mode->clock; 1261 wm_high.src_width = mode->crtc_hdisplay; 1262 wm_high.active_time = mode->crtc_hdisplay * pixel_period; 1263 wm_high.blank_time = line_time - wm_high.active_time; 1264 wm_high.interlaced = false; 1265 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1266 wm_high.interlaced = true; 1267 wm_high.vsc = amdgpu_crtc->vsc; 1268 wm_high.vtaps = 1; 1269 if (amdgpu_crtc->rmx_type != RMX_OFF) 1270 wm_high.vtaps = 2; 1271 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1272 wm_high.lb_size = lb_size; 1273 wm_high.dram_channels = cik_get_number_of_dram_channels(adev); 1274 wm_high.num_heads = num_heads; 1275 1276 /* set for high clocks */ 1277 latency_watermark_a = min(dce_v11_0_latency_watermark(&wm_high), (u32)65535); 1278 1279 /* possibly force display priority to high */ 1280 /* should really do this at mode validation time... */ 1281 if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || 1282 !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) || 1283 !dce_v11_0_check_latency_hiding(&wm_high) || 1284 (adev->mode_info.disp_priority == 2)) { 1285 DRM_DEBUG_KMS("force priority to high\n"); 1286 } 1287 1288 /* watermark for low clocks */ 1289 if (adev->pm.dpm_enabled) { 1290 wm_low.yclk = 1291 amdgpu_dpm_get_mclk(adev, true) * 10; 1292 wm_low.sclk = 1293 amdgpu_dpm_get_sclk(adev, true) * 10; 1294 } else { 1295 wm_low.yclk = adev->pm.current_mclk * 10; 1296 wm_low.sclk = adev->pm.current_sclk * 10; 1297 } 1298 1299 wm_low.disp_clk = mode->clock; 1300 wm_low.src_width = mode->crtc_hdisplay; 1301 wm_low.active_time = mode->crtc_hdisplay * pixel_period; 1302 wm_low.blank_time = line_time - wm_low.active_time; 1303 wm_low.interlaced = false; 1304 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1305 wm_low.interlaced = true; 1306 wm_low.vsc = amdgpu_crtc->vsc; 1307 wm_low.vtaps = 1; 1308 if (amdgpu_crtc->rmx_type != RMX_OFF) 1309 wm_low.vtaps = 2; 1310 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1311 wm_low.lb_size = lb_size; 1312 wm_low.dram_channels = cik_get_number_of_dram_channels(adev); 1313 wm_low.num_heads = num_heads; 1314 1315 /* set for low clocks */ 1316 latency_watermark_b = min(dce_v11_0_latency_watermark(&wm_low), (u32)65535); 1317 1318 /* possibly force display priority to high */ 1319 /* should really do this at mode validation time... */ 1320 if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || 1321 !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) || 1322 !dce_v11_0_check_latency_hiding(&wm_low) || 1323 (adev->mode_info.disp_priority == 2)) { 1324 DRM_DEBUG_KMS("force priority to high\n"); 1325 } 1326 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); 1327 } 1328 1329 /* select wm A */ 1330 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); 1331 tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1); 1332 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1333 tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); 1334 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); 1335 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); 1336 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1337 /* select wm B */ 1338 tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2); 1339 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1340 tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); 1341 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b); 1342 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); 1343 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1344 /* restore original selection */ 1345 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); 1346 1347 /* save values for DPM */ 1348 amdgpu_crtc->line_time = line_time; 1349 amdgpu_crtc->wm_high = latency_watermark_a; 1350 amdgpu_crtc->wm_low = latency_watermark_b; 1351 /* Save number of lines the linebuffer leads before the scanout */ 1352 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; 1353 } 1354 1355 /** 1356 * dce_v11_0_bandwidth_update - program display watermarks 1357 * 1358 * @adev: amdgpu_device pointer 1359 * 1360 * Calculate and program the display watermarks and line 1361 * buffer allocation (CIK). 1362 */ 1363 static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev) 1364 { 1365 struct drm_display_mode *mode = NULL; 1366 u32 num_heads = 0, lb_size; 1367 int i; 1368 1369 amdgpu_update_display_priority(adev); 1370 1371 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1372 if (adev->mode_info.crtcs[i]->base.enabled) 1373 num_heads++; 1374 } 1375 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1376 mode = &adev->mode_info.crtcs[i]->base.mode; 1377 lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); 1378 dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i], 1379 lb_size, num_heads); 1380 } 1381 } 1382 1383 static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev) 1384 { 1385 int i; 1386 u32 offset, tmp; 1387 1388 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1389 offset = adev->mode_info.audio.pin[i].offset; 1390 tmp = RREG32_AUDIO_ENDPT(offset, 1391 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); 1392 if (((tmp & 1393 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> 1394 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) 1395 adev->mode_info.audio.pin[i].connected = false; 1396 else 1397 adev->mode_info.audio.pin[i].connected = true; 1398 } 1399 } 1400 1401 static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev) 1402 { 1403 int i; 1404 1405 dce_v11_0_audio_get_connected_pins(adev); 1406 1407 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1408 if (adev->mode_info.audio.pin[i].connected) 1409 return &adev->mode_info.audio.pin[i]; 1410 } 1411 DRM_ERROR("No connected audio pins found!\n"); 1412 return NULL; 1413 } 1414 1415 static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder) 1416 { 1417 struct amdgpu_device *adev = encoder->dev->dev_private; 1418 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1419 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1420 u32 tmp; 1421 1422 if (!dig || !dig->afmt || !dig->afmt->pin) 1423 return; 1424 1425 tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset); 1426 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id); 1427 WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp); 1428 } 1429 1430 static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, 1431 struct drm_display_mode *mode) 1432 { 1433 struct amdgpu_device *adev = encoder->dev->dev_private; 1434 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1435 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1436 struct drm_connector *connector; 1437 struct amdgpu_connector *amdgpu_connector = NULL; 1438 u32 tmp; 1439 int interlace = 0; 1440 1441 if (!dig || !dig->afmt || !dig->afmt->pin) 1442 return; 1443 1444 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1445 if (connector->encoder == encoder) { 1446 amdgpu_connector = to_amdgpu_connector(connector); 1447 break; 1448 } 1449 } 1450 1451 if (!amdgpu_connector) { 1452 DRM_ERROR("Couldn't find encoder's connector\n"); 1453 return; 1454 } 1455 1456 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1457 interlace = 1; 1458 if (connector->latency_present[interlace]) { 1459 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1460 VIDEO_LIPSYNC, connector->video_latency[interlace]); 1461 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1462 AUDIO_LIPSYNC, connector->audio_latency[interlace]); 1463 } else { 1464 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1465 VIDEO_LIPSYNC, 0); 1466 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1467 AUDIO_LIPSYNC, 0); 1468 } 1469 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1470 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 1471 } 1472 1473 static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder) 1474 { 1475 struct amdgpu_device *adev = encoder->dev->dev_private; 1476 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1477 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1478 struct drm_connector *connector; 1479 struct amdgpu_connector *amdgpu_connector = NULL; 1480 u32 tmp; 1481 u8 *sadb = NULL; 1482 int sad_count; 1483 1484 if (!dig || !dig->afmt || !dig->afmt->pin) 1485 return; 1486 1487 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1488 if (connector->encoder == encoder) { 1489 amdgpu_connector = to_amdgpu_connector(connector); 1490 break; 1491 } 1492 } 1493 1494 if (!amdgpu_connector) { 1495 DRM_ERROR("Couldn't find encoder's connector\n"); 1496 return; 1497 } 1498 1499 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb); 1500 if (sad_count < 0) { 1501 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 1502 sad_count = 0; 1503 } 1504 1505 /* program the speaker allocation */ 1506 tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1507 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 1508 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1509 DP_CONNECTION, 0); 1510 /* set HDMI mode */ 1511 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1512 HDMI_CONNECTION, 1); 1513 if (sad_count) 1514 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1515 SPEAKER_ALLOCATION, sadb[0]); 1516 else 1517 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1518 SPEAKER_ALLOCATION, 5); /* stereo */ 1519 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1520 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 1521 1522 kfree(sadb); 1523 } 1524 1525 static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) 1526 { 1527 struct amdgpu_device *adev = encoder->dev->dev_private; 1528 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1529 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1530 struct drm_connector *connector; 1531 struct amdgpu_connector *amdgpu_connector = NULL; 1532 struct cea_sad *sads; 1533 int i, sad_count; 1534 1535 static const u16 eld_reg_to_type[][2] = { 1536 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, 1537 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, 1538 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, 1539 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, 1540 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, 1541 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, 1542 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, 1543 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, 1544 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, 1545 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, 1546 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, 1547 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 1548 }; 1549 1550 if (!dig || !dig->afmt || !dig->afmt->pin) 1551 return; 1552 1553 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1554 if (connector->encoder == encoder) { 1555 amdgpu_connector = to_amdgpu_connector(connector); 1556 break; 1557 } 1558 } 1559 1560 if (!amdgpu_connector) { 1561 DRM_ERROR("Couldn't find encoder's connector\n"); 1562 return; 1563 } 1564 1565 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); 1566 if (sad_count <= 0) { 1567 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 1568 return; 1569 } 1570 BUG_ON(!sads); 1571 1572 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 1573 u32 tmp = 0; 1574 u8 stereo_freqs = 0; 1575 int max_channels = -1; 1576 int j; 1577 1578 for (j = 0; j < sad_count; j++) { 1579 struct cea_sad *sad = &sads[j]; 1580 1581 if (sad->format == eld_reg_to_type[i][1]) { 1582 if (sad->channels > max_channels) { 1583 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1584 MAX_CHANNELS, sad->channels); 1585 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1586 DESCRIPTOR_BYTE_2, sad->byte2); 1587 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1588 SUPPORTED_FREQUENCIES, sad->freq); 1589 max_channels = sad->channels; 1590 } 1591 1592 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 1593 stereo_freqs |= sad->freq; 1594 else 1595 break; 1596 } 1597 } 1598 1599 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1600 SUPPORTED_FREQUENCIES_STEREO, stereo_freqs); 1601 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp); 1602 } 1603 1604 kfree(sads); 1605 } 1606 1607 static void dce_v11_0_audio_enable(struct amdgpu_device *adev, 1608 struct amdgpu_audio_pin *pin, 1609 bool enable) 1610 { 1611 if (!pin) 1612 return; 1613 1614 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 1615 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); 1616 } 1617 1618 static const u32 pin_offsets[] = 1619 { 1620 AUD0_REGISTER_OFFSET, 1621 AUD1_REGISTER_OFFSET, 1622 AUD2_REGISTER_OFFSET, 1623 AUD3_REGISTER_OFFSET, 1624 AUD4_REGISTER_OFFSET, 1625 AUD5_REGISTER_OFFSET, 1626 AUD6_REGISTER_OFFSET, 1627 }; 1628 1629 static int dce_v11_0_audio_init(struct amdgpu_device *adev) 1630 { 1631 int i; 1632 1633 if (!amdgpu_audio) 1634 return 0; 1635 1636 adev->mode_info.audio.enabled = true; 1637 1638 adev->mode_info.audio.num_pins = 7; 1639 1640 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1641 adev->mode_info.audio.pin[i].channels = -1; 1642 adev->mode_info.audio.pin[i].rate = -1; 1643 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1644 adev->mode_info.audio.pin[i].status_bits = 0; 1645 adev->mode_info.audio.pin[i].category_code = 0; 1646 adev->mode_info.audio.pin[i].connected = false; 1647 adev->mode_info.audio.pin[i].offset = pin_offsets[i]; 1648 adev->mode_info.audio.pin[i].id = i; 1649 /* disable audio. it will be set up later */ 1650 /* XXX remove once we switch to ip funcs */ 1651 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1652 } 1653 1654 return 0; 1655 } 1656 1657 static void dce_v11_0_audio_fini(struct amdgpu_device *adev) 1658 { 1659 int i; 1660 1661 if (!adev->mode_info.audio.enabled) 1662 return; 1663 1664 for (i = 0; i < adev->mode_info.audio.num_pins; i++) 1665 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1666 1667 adev->mode_info.audio.enabled = false; 1668 } 1669 1670 /* 1671 * update the N and CTS parameters for a given pixel clock rate 1672 */ 1673 static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) 1674 { 1675 struct drm_device *dev = encoder->dev; 1676 struct amdgpu_device *adev = dev->dev_private; 1677 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); 1678 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1679 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1680 u32 tmp; 1681 1682 tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset); 1683 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz); 1684 WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp); 1685 tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset); 1686 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz); 1687 WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp); 1688 1689 tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset); 1690 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz); 1691 WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp); 1692 tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset); 1693 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz); 1694 WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp); 1695 1696 tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset); 1697 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz); 1698 WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp); 1699 tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset); 1700 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz); 1701 WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp); 1702 1703 } 1704 1705 /* 1706 * build a HDMI Video Info Frame 1707 */ 1708 static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, 1709 void *buffer, size_t size) 1710 { 1711 struct drm_device *dev = encoder->dev; 1712 struct amdgpu_device *adev = dev->dev_private; 1713 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1714 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1715 uint8_t *frame = buffer + 3; 1716 uint8_t *header = buffer; 1717 1718 WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset, 1719 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 1720 WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset, 1721 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); 1722 WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset, 1723 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); 1724 WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset, 1725 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); 1726 } 1727 1728 static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) 1729 { 1730 struct drm_device *dev = encoder->dev; 1731 struct amdgpu_device *adev = dev->dev_private; 1732 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1733 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1734 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1735 u32 dto_phase = 24 * 1000; 1736 u32 dto_modulo = clock; 1737 u32 tmp; 1738 1739 if (!dig || !dig->afmt) 1740 return; 1741 1742 /* XXX two dtos; generally use dto0 for hdmi */ 1743 /* Express [24MHz / target pixel clock] as an exact rational 1744 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 1745 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 1746 */ 1747 tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE); 1748 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, 1749 amdgpu_crtc->crtc_id); 1750 WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp); 1751 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); 1752 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); 1753 } 1754 1755 /* 1756 * update the info frames with the data from the current display mode 1757 */ 1758 static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder, 1759 struct drm_display_mode *mode) 1760 { 1761 struct drm_device *dev = encoder->dev; 1762 struct amdgpu_device *adev = dev->dev_private; 1763 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1764 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1765 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 1766 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 1767 struct hdmi_avi_infoframe frame; 1768 ssize_t err; 1769 u32 tmp; 1770 int bpc = 8; 1771 1772 if (!dig || !dig->afmt) 1773 return; 1774 1775 /* Silent, r600_hdmi_enable will raise WARN for us */ 1776 if (!dig->afmt->enabled) 1777 return; 1778 1779 /* hdmi deep color mode general control packets setup, if bpc > 8 */ 1780 if (encoder->crtc) { 1781 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1782 bpc = amdgpu_crtc->bpc; 1783 } 1784 1785 /* disable audio prior to setting up hw */ 1786 dig->afmt->pin = dce_v11_0_audio_get_pin(adev); 1787 dce_v11_0_audio_enable(adev, dig->afmt->pin, false); 1788 1789 dce_v11_0_audio_set_dto(encoder, mode->clock); 1790 1791 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); 1792 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); 1793 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */ 1794 1795 WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000); 1796 1797 tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset); 1798 switch (bpc) { 1799 case 0: 1800 case 6: 1801 case 8: 1802 case 16: 1803 default: 1804 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0); 1805 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); 1806 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", 1807 connector->name, bpc); 1808 break; 1809 case 10: 1810 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); 1811 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1); 1812 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", 1813 connector->name); 1814 break; 1815 case 12: 1816 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); 1817 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2); 1818 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", 1819 connector->name); 1820 break; 1821 } 1822 WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp); 1823 1824 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); 1825 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */ 1826 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */ 1827 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */ 1828 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); 1829 1830 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1831 /* enable audio info frames (frames won't be set until audio is enabled) */ 1832 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); 1833 /* required for audio info values to be updated */ 1834 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1); 1835 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1836 1837 tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset); 1838 /* required for audio info values to be updated */ 1839 tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); 1840 WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1841 1842 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1843 /* anything other than 0 */ 1844 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2); 1845 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1846 1847 WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */ 1848 1849 tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1850 /* set the default audio delay */ 1851 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); 1852 /* should be suffient for all audio modes and small enough for all hblanks */ 1853 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3); 1854 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1855 1856 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1857 /* allow 60958 channel status fields to be updated */ 1858 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); 1859 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1860 1861 tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset); 1862 if (bpc > 8) 1863 /* clear SW CTS value */ 1864 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0); 1865 else 1866 /* select SW CTS value */ 1867 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1); 1868 /* allow hw to sent ACR packets when required */ 1869 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1); 1870 WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp); 1871 1872 dce_v11_0_afmt_update_ACR(encoder, mode->clock); 1873 1874 tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset); 1875 tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1); 1876 WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp); 1877 1878 tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset); 1879 tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); 1880 WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp); 1881 1882 tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset); 1883 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3); 1884 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4); 1885 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5); 1886 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6); 1887 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7); 1888 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); 1889 WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp); 1890 1891 dce_v11_0_audio_write_speaker_allocation(encoder); 1892 1893 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, 1894 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); 1895 1896 dce_v11_0_afmt_audio_select_pin(encoder); 1897 dce_v11_0_audio_write_sad_regs(encoder); 1898 dce_v11_0_audio_write_latency_fields(encoder, mode); 1899 1900 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 1901 if (err < 0) { 1902 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1903 return; 1904 } 1905 1906 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); 1907 if (err < 0) { 1908 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); 1909 return; 1910 } 1911 1912 dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); 1913 1914 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1915 /* enable AVI info frames */ 1916 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1); 1917 /* required for audio info values to be updated */ 1918 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1); 1919 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1920 1921 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1922 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2); 1923 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1924 1925 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1926 /* send audio packets */ 1927 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); 1928 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1929 1930 WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF); 1931 WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF); 1932 WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001); 1933 WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001); 1934 1935 /* enable audio after to setting up hw */ 1936 dce_v11_0_audio_enable(adev, dig->afmt->pin, true); 1937 } 1938 1939 static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable) 1940 { 1941 struct drm_device *dev = encoder->dev; 1942 struct amdgpu_device *adev = dev->dev_private; 1943 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1944 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1945 1946 if (!dig || !dig->afmt) 1947 return; 1948 1949 /* Silent, r600_hdmi_enable will raise WARN for us */ 1950 if (enable && dig->afmt->enabled) 1951 return; 1952 if (!enable && !dig->afmt->enabled) 1953 return; 1954 1955 if (!enable && dig->afmt->pin) { 1956 dce_v11_0_audio_enable(adev, dig->afmt->pin, false); 1957 dig->afmt->pin = NULL; 1958 } 1959 1960 dig->afmt->enabled = enable; 1961 1962 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", 1963 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); 1964 } 1965 1966 static void dce_v11_0_afmt_init(struct amdgpu_device *adev) 1967 { 1968 int i; 1969 1970 for (i = 0; i < adev->mode_info.num_dig; i++) 1971 adev->mode_info.afmt[i] = NULL; 1972 1973 /* DCE11 has audio blocks tied to DIG encoders */ 1974 for (i = 0; i < adev->mode_info.num_dig; i++) { 1975 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); 1976 if (adev->mode_info.afmt[i]) { 1977 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1978 adev->mode_info.afmt[i]->id = i; 1979 } 1980 } 1981 } 1982 1983 static void dce_v11_0_afmt_fini(struct amdgpu_device *adev) 1984 { 1985 int i; 1986 1987 for (i = 0; i < adev->mode_info.num_dig; i++) { 1988 kfree(adev->mode_info.afmt[i]); 1989 adev->mode_info.afmt[i] = NULL; 1990 } 1991 } 1992 1993 static const u32 vga_control_regs[6] = 1994 { 1995 mmD1VGA_CONTROL, 1996 mmD2VGA_CONTROL, 1997 mmD3VGA_CONTROL, 1998 mmD4VGA_CONTROL, 1999 mmD5VGA_CONTROL, 2000 mmD6VGA_CONTROL, 2001 }; 2002 2003 static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable) 2004 { 2005 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2006 struct drm_device *dev = crtc->dev; 2007 struct amdgpu_device *adev = dev->dev_private; 2008 u32 vga_control; 2009 2010 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; 2011 if (enable) 2012 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); 2013 else 2014 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); 2015 } 2016 2017 static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable) 2018 { 2019 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2020 struct drm_device *dev = crtc->dev; 2021 struct amdgpu_device *adev = dev->dev_private; 2022 2023 if (enable) 2024 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); 2025 else 2026 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); 2027 } 2028 2029 static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, 2030 struct drm_framebuffer *fb, 2031 int x, int y, int atomic) 2032 { 2033 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2034 struct drm_device *dev = crtc->dev; 2035 struct amdgpu_device *adev = dev->dev_private; 2036 struct amdgpu_framebuffer *amdgpu_fb; 2037 struct drm_framebuffer *target_fb; 2038 struct drm_gem_object *obj; 2039 struct amdgpu_bo *rbo; 2040 uint64_t fb_location, tiling_flags; 2041 uint32_t fb_format, fb_pitch_pixels; 2042 u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); 2043 u32 pipe_config; 2044 u32 tmp, viewport_w, viewport_h; 2045 int r; 2046 bool bypass_lut = false; 2047 2048 /* no fb bound */ 2049 if (!atomic && !crtc->primary->fb) { 2050 DRM_DEBUG_KMS("No FB bound\n"); 2051 return 0; 2052 } 2053 2054 if (atomic) { 2055 amdgpu_fb = to_amdgpu_framebuffer(fb); 2056 target_fb = fb; 2057 } 2058 else { 2059 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 2060 target_fb = crtc->primary->fb; 2061 } 2062 2063 /* If atomic, assume fb object is pinned & idle & fenced and 2064 * just update base pointers 2065 */ 2066 obj = amdgpu_fb->obj; 2067 rbo = gem_to_amdgpu_bo(obj); 2068 r = amdgpu_bo_reserve(rbo, false); 2069 if (unlikely(r != 0)) 2070 return r; 2071 2072 if (atomic) 2073 fb_location = amdgpu_bo_gpu_offset(rbo); 2074 else { 2075 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); 2076 if (unlikely(r != 0)) { 2077 amdgpu_bo_unreserve(rbo); 2078 return -EINVAL; 2079 } 2080 } 2081 2082 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); 2083 amdgpu_bo_unreserve(rbo); 2084 2085 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 2086 2087 switch (target_fb->pixel_format) { 2088 case DRM_FORMAT_C8: 2089 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); 2090 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 2091 break; 2092 case DRM_FORMAT_XRGB4444: 2093 case DRM_FORMAT_ARGB4444: 2094 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 2095 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2); 2096 #ifdef __BIG_ENDIAN 2097 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2098 ENDIAN_8IN16); 2099 #endif 2100 break; 2101 case DRM_FORMAT_XRGB1555: 2102 case DRM_FORMAT_ARGB1555: 2103 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 2104 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 2105 #ifdef __BIG_ENDIAN 2106 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2107 ENDIAN_8IN16); 2108 #endif 2109 break; 2110 case DRM_FORMAT_BGRX5551: 2111 case DRM_FORMAT_BGRA5551: 2112 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 2113 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5); 2114 #ifdef __BIG_ENDIAN 2115 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2116 ENDIAN_8IN16); 2117 #endif 2118 break; 2119 case DRM_FORMAT_RGB565: 2120 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 2121 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); 2122 #ifdef __BIG_ENDIAN 2123 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2124 ENDIAN_8IN16); 2125 #endif 2126 break; 2127 case DRM_FORMAT_XRGB8888: 2128 case DRM_FORMAT_ARGB8888: 2129 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 2130 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 2131 #ifdef __BIG_ENDIAN 2132 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2133 ENDIAN_8IN32); 2134 #endif 2135 break; 2136 case DRM_FORMAT_XRGB2101010: 2137 case DRM_FORMAT_ARGB2101010: 2138 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 2139 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); 2140 #ifdef __BIG_ENDIAN 2141 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2142 ENDIAN_8IN32); 2143 #endif 2144 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 2145 bypass_lut = true; 2146 break; 2147 case DRM_FORMAT_BGRX1010102: 2148 case DRM_FORMAT_BGRA1010102: 2149 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 2150 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4); 2151 #ifdef __BIG_ENDIAN 2152 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2153 ENDIAN_8IN32); 2154 #endif 2155 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 2156 bypass_lut = true; 2157 break; 2158 default: 2159 DRM_ERROR("Unsupported screen format %s\n", 2160 drm_get_format_name(target_fb->pixel_format)); 2161 return -EINVAL; 2162 } 2163 2164 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { 2165 unsigned bankw, bankh, mtaspect, tile_split, num_banks; 2166 2167 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 2168 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 2169 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 2170 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 2171 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 2172 2173 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks); 2174 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, 2175 ARRAY_2D_TILED_THIN1); 2176 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT, 2177 tile_split); 2178 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw); 2179 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh); 2180 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, 2181 mtaspect); 2182 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, 2183 ADDR_SURF_MICRO_TILING_DISPLAY); 2184 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { 2185 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, 2186 ARRAY_1D_TILED_THIN1); 2187 } 2188 2189 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG, 2190 pipe_config); 2191 2192 dce_v11_0_vga_enable(crtc, false); 2193 2194 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2195 upper_32_bits(fb_location)); 2196 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2197 upper_32_bits(fb_location)); 2198 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2199 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 2200 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2201 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); 2202 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 2203 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); 2204 2205 /* 2206 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT 2207 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to 2208 * retain the full precision throughout the pipeline. 2209 */ 2210 tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset); 2211 if (bypass_lut) 2212 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1); 2213 else 2214 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0); 2215 WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp); 2216 2217 if (bypass_lut) 2218 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); 2219 2220 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); 2221 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); 2222 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); 2223 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); 2224 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); 2225 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); 2226 2227 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); 2228 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); 2229 2230 dce_v11_0_grph_enable(crtc, true); 2231 2232 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, 2233 target_fb->height); 2234 2235 x &= ~3; 2236 y &= ~1; 2237 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, 2238 (x << 16) | y); 2239 viewport_w = crtc->mode.hdisplay; 2240 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 2241 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, 2242 (viewport_w << 16) | viewport_h); 2243 2244 /* pageflip setup */ 2245 /* make sure flip is at vb rather than hb */ 2246 tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); 2247 tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, 2248 GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); 2249 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2250 2251 /* set pageflip to happen only at start of vblank interval (front porch) */ 2252 WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); 2253 2254 if (!atomic && fb && fb != crtc->primary->fb) { 2255 amdgpu_fb = to_amdgpu_framebuffer(fb); 2256 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); 2257 r = amdgpu_bo_reserve(rbo, false); 2258 if (unlikely(r != 0)) 2259 return r; 2260 amdgpu_bo_unpin(rbo); 2261 amdgpu_bo_unreserve(rbo); 2262 } 2263 2264 /* Bytes per pixel may have changed */ 2265 dce_v11_0_bandwidth_update(adev); 2266 2267 return 0; 2268 } 2269 2270 static void dce_v11_0_set_interleave(struct drm_crtc *crtc, 2271 struct drm_display_mode *mode) 2272 { 2273 struct drm_device *dev = crtc->dev; 2274 struct amdgpu_device *adev = dev->dev_private; 2275 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2276 u32 tmp; 2277 2278 tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset); 2279 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 2280 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1); 2281 else 2282 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0); 2283 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp); 2284 } 2285 2286 static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc) 2287 { 2288 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2289 struct drm_device *dev = crtc->dev; 2290 struct amdgpu_device *adev = dev->dev_private; 2291 int i; 2292 u32 tmp; 2293 2294 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); 2295 2296 tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); 2297 tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0); 2298 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2299 2300 tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset); 2301 tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1); 2302 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2303 2304 tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset); 2305 tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0); 2306 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2307 2308 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); 2309 2310 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); 2311 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); 2312 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); 2313 2314 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); 2315 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); 2316 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); 2317 2318 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); 2319 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); 2320 2321 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); 2322 for (i = 0; i < 256; i++) { 2323 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, 2324 (amdgpu_crtc->lut_r[i] << 20) | 2325 (amdgpu_crtc->lut_g[i] << 10) | 2326 (amdgpu_crtc->lut_b[i] << 0)); 2327 } 2328 2329 tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset); 2330 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0); 2331 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0); 2332 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0); 2333 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2334 2335 tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset); 2336 tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0); 2337 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2338 2339 tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset); 2340 tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0); 2341 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2342 2343 tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); 2344 tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0); 2345 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2346 2347 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 2348 WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0); 2349 /* XXX this only needs to be programmed once per crtc at startup, 2350 * not sure where the best place for it is 2351 */ 2352 tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset); 2353 tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1); 2354 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2355 } 2356 2357 static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder) 2358 { 2359 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 2360 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 2361 2362 switch (amdgpu_encoder->encoder_id) { 2363 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2364 if (dig->linkb) 2365 return 1; 2366 else 2367 return 0; 2368 break; 2369 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2370 if (dig->linkb) 2371 return 3; 2372 else 2373 return 2; 2374 break; 2375 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2376 if (dig->linkb) 2377 return 5; 2378 else 2379 return 4; 2380 break; 2381 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2382 return 6; 2383 break; 2384 default: 2385 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); 2386 return 0; 2387 } 2388 } 2389 2390 /** 2391 * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc. 2392 * 2393 * @crtc: drm crtc 2394 * 2395 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors 2396 * a single PPLL can be used for all DP crtcs/encoders. For non-DP 2397 * monitors a dedicated PPLL must be used. If a particular board has 2398 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming 2399 * as there is no need to program the PLL itself. If we are not able to 2400 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to 2401 * avoid messing up an existing monitor. 2402 * 2403 * Asic specific PLL information 2404 * 2405 * DCE 10.x 2406 * Tonga 2407 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) 2408 * CI 2409 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC 2410 * 2411 */ 2412 static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) 2413 { 2414 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2415 struct drm_device *dev = crtc->dev; 2416 struct amdgpu_device *adev = dev->dev_private; 2417 u32 pll_in_use; 2418 int pll; 2419 2420 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { 2421 if (adev->clock.dp_extclk) 2422 /* skip PPLL programming if using ext clock */ 2423 return ATOM_PPLL_INVALID; 2424 else { 2425 /* use the same PPLL for all DP monitors */ 2426 pll = amdgpu_pll_get_shared_dp_ppll(crtc); 2427 if (pll != ATOM_PPLL_INVALID) 2428 return pll; 2429 } 2430 } else { 2431 /* use the same PPLL for all monitors with the same clock */ 2432 pll = amdgpu_pll_get_shared_nondp_ppll(crtc); 2433 if (pll != ATOM_PPLL_INVALID) 2434 return pll; 2435 } 2436 2437 /* XXX need to determine what plls are available on each DCE11 part */ 2438 pll_in_use = amdgpu_pll_get_use_mask(crtc); 2439 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) { 2440 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2441 return ATOM_PPLL1; 2442 if (!(pll_in_use & (1 << ATOM_PPLL0))) 2443 return ATOM_PPLL0; 2444 DRM_ERROR("unable to allocate a PPLL\n"); 2445 return ATOM_PPLL_INVALID; 2446 } else { 2447 if (!(pll_in_use & (1 << ATOM_PPLL2))) 2448 return ATOM_PPLL2; 2449 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2450 return ATOM_PPLL1; 2451 if (!(pll_in_use & (1 << ATOM_PPLL0))) 2452 return ATOM_PPLL0; 2453 DRM_ERROR("unable to allocate a PPLL\n"); 2454 return ATOM_PPLL_INVALID; 2455 } 2456 return ATOM_PPLL_INVALID; 2457 } 2458 2459 static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock) 2460 { 2461 struct amdgpu_device *adev = crtc->dev->dev_private; 2462 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2463 uint32_t cur_lock; 2464 2465 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); 2466 if (lock) 2467 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1); 2468 else 2469 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0); 2470 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); 2471 } 2472 2473 static void dce_v11_0_hide_cursor(struct drm_crtc *crtc) 2474 { 2475 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2476 struct amdgpu_device *adev = crtc->dev->dev_private; 2477 u32 tmp; 2478 2479 tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); 2480 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); 2481 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2482 } 2483 2484 static void dce_v11_0_show_cursor(struct drm_crtc *crtc) 2485 { 2486 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2487 struct amdgpu_device *adev = crtc->dev->dev_private; 2488 u32 tmp; 2489 2490 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2491 upper_32_bits(amdgpu_crtc->cursor_addr)); 2492 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2493 lower_32_bits(amdgpu_crtc->cursor_addr)); 2494 2495 tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); 2496 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); 2497 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); 2498 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2499 } 2500 2501 static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc, 2502 int x, int y) 2503 { 2504 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2505 struct amdgpu_device *adev = crtc->dev->dev_private; 2506 int xorigin = 0, yorigin = 0; 2507 2508 /* avivo cursor are offset into the total surface */ 2509 x += crtc->x; 2510 y += crtc->y; 2511 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 2512 2513 if (x < 0) { 2514 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 2515 x = 0; 2516 } 2517 if (y < 0) { 2518 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 2519 y = 0; 2520 } 2521 2522 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2523 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2524 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2525 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2526 2527 amdgpu_crtc->cursor_x = x; 2528 amdgpu_crtc->cursor_y = y; 2529 2530 return 0; 2531 } 2532 2533 static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc, 2534 int x, int y) 2535 { 2536 int ret; 2537 2538 dce_v11_0_lock_cursor(crtc, true); 2539 ret = dce_v11_0_cursor_move_locked(crtc, x, y); 2540 dce_v11_0_lock_cursor(crtc, false); 2541 2542 return ret; 2543 } 2544 2545 static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, 2546 struct drm_file *file_priv, 2547 uint32_t handle, 2548 uint32_t width, 2549 uint32_t height, 2550 int32_t hot_x, 2551 int32_t hot_y) 2552 { 2553 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2554 struct drm_gem_object *obj; 2555 struct amdgpu_bo *aobj; 2556 int ret; 2557 2558 if (!handle) { 2559 /* turn off cursor */ 2560 dce_v11_0_hide_cursor(crtc); 2561 obj = NULL; 2562 goto unpin; 2563 } 2564 2565 if ((width > amdgpu_crtc->max_cursor_width) || 2566 (height > amdgpu_crtc->max_cursor_height)) { 2567 DRM_ERROR("bad cursor width or height %d x %d\n", width, height); 2568 return -EINVAL; 2569 } 2570 2571 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); 2572 if (!obj) { 2573 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); 2574 return -ENOENT; 2575 } 2576 2577 aobj = gem_to_amdgpu_bo(obj); 2578 ret = amdgpu_bo_reserve(aobj, false); 2579 if (ret != 0) { 2580 drm_gem_object_unreference_unlocked(obj); 2581 return ret; 2582 } 2583 2584 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr); 2585 amdgpu_bo_unreserve(aobj); 2586 if (ret) { 2587 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 2588 drm_gem_object_unreference_unlocked(obj); 2589 return ret; 2590 } 2591 2592 amdgpu_crtc->cursor_width = width; 2593 amdgpu_crtc->cursor_height = height; 2594 2595 dce_v11_0_lock_cursor(crtc, true); 2596 2597 if (hot_x != amdgpu_crtc->cursor_hot_x || 2598 hot_y != amdgpu_crtc->cursor_hot_y) { 2599 int x, y; 2600 2601 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x; 2602 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y; 2603 2604 dce_v11_0_cursor_move_locked(crtc, x, y); 2605 2606 amdgpu_crtc->cursor_hot_x = hot_x; 2607 amdgpu_crtc->cursor_hot_y = hot_y; 2608 } 2609 2610 dce_v11_0_show_cursor(crtc); 2611 dce_v11_0_lock_cursor(crtc, false); 2612 2613 unpin: 2614 if (amdgpu_crtc->cursor_bo) { 2615 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2616 ret = amdgpu_bo_reserve(aobj, false); 2617 if (likely(ret == 0)) { 2618 amdgpu_bo_unpin(aobj); 2619 amdgpu_bo_unreserve(aobj); 2620 } 2621 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); 2622 } 2623 2624 amdgpu_crtc->cursor_bo = obj; 2625 return 0; 2626 } 2627 2628 static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) 2629 { 2630 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2631 2632 if (amdgpu_crtc->cursor_bo) { 2633 dce_v11_0_lock_cursor(crtc, true); 2634 2635 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2636 amdgpu_crtc->cursor_y); 2637 2638 dce_v11_0_show_cursor(crtc); 2639 2640 dce_v11_0_lock_cursor(crtc, false); 2641 } 2642 } 2643 2644 static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2645 u16 *blue, uint32_t start, uint32_t size) 2646 { 2647 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2648 int end = (start + size > 256) ? 256 : start + size, i; 2649 2650 /* userspace palettes are always correct as is */ 2651 for (i = start; i < end; i++) { 2652 amdgpu_crtc->lut_r[i] = red[i] >> 6; 2653 amdgpu_crtc->lut_g[i] = green[i] >> 6; 2654 amdgpu_crtc->lut_b[i] = blue[i] >> 6; 2655 } 2656 dce_v11_0_crtc_load_lut(crtc); 2657 } 2658 2659 static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) 2660 { 2661 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2662 2663 drm_crtc_cleanup(crtc); 2664 destroy_workqueue(amdgpu_crtc->pflip_queue); 2665 kfree(amdgpu_crtc); 2666 } 2667 2668 static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { 2669 .cursor_set2 = dce_v11_0_crtc_cursor_set2, 2670 .cursor_move = dce_v11_0_crtc_cursor_move, 2671 .gamma_set = dce_v11_0_crtc_gamma_set, 2672 .set_config = amdgpu_crtc_set_config, 2673 .destroy = dce_v11_0_crtc_destroy, 2674 .page_flip = amdgpu_crtc_page_flip, 2675 }; 2676 2677 static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2678 { 2679 struct drm_device *dev = crtc->dev; 2680 struct amdgpu_device *adev = dev->dev_private; 2681 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2682 unsigned type; 2683 2684 switch (mode) { 2685 case DRM_MODE_DPMS_ON: 2686 amdgpu_crtc->enabled = true; 2687 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); 2688 dce_v11_0_vga_enable(crtc, true); 2689 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2690 dce_v11_0_vga_enable(crtc, false); 2691 /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2692 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2693 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2694 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2695 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2696 dce_v11_0_crtc_load_lut(crtc); 2697 break; 2698 case DRM_MODE_DPMS_STANDBY: 2699 case DRM_MODE_DPMS_SUSPEND: 2700 case DRM_MODE_DPMS_OFF: 2701 drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id); 2702 if (amdgpu_crtc->enabled) { 2703 dce_v11_0_vga_enable(crtc, true); 2704 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2705 dce_v11_0_vga_enable(crtc, false); 2706 } 2707 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); 2708 amdgpu_crtc->enabled = false; 2709 break; 2710 } 2711 /* adjust pm to dpms */ 2712 amdgpu_pm_compute_clocks(adev); 2713 } 2714 2715 static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc) 2716 { 2717 /* disable crtc pair power gating before programming */ 2718 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); 2719 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); 2720 dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2721 } 2722 2723 static void dce_v11_0_crtc_commit(struct drm_crtc *crtc) 2724 { 2725 dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 2726 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); 2727 } 2728 2729 static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) 2730 { 2731 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2732 struct drm_device *dev = crtc->dev; 2733 struct amdgpu_device *adev = dev->dev_private; 2734 struct amdgpu_atom_ss ss; 2735 int i; 2736 2737 dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2738 if (crtc->primary->fb) { 2739 int r; 2740 struct amdgpu_framebuffer *amdgpu_fb; 2741 struct amdgpu_bo *rbo; 2742 2743 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 2744 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); 2745 r = amdgpu_bo_reserve(rbo, false); 2746 if (unlikely(r)) 2747 DRM_ERROR("failed to reserve rbo before unpin\n"); 2748 else { 2749 amdgpu_bo_unpin(rbo); 2750 amdgpu_bo_unreserve(rbo); 2751 } 2752 } 2753 /* disable the GRPH */ 2754 dce_v11_0_grph_enable(crtc, false); 2755 2756 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); 2757 2758 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2759 if (adev->mode_info.crtcs[i] && 2760 adev->mode_info.crtcs[i]->enabled && 2761 i != amdgpu_crtc->crtc_id && 2762 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { 2763 /* one other crtc is using this pll don't turn 2764 * off the pll 2765 */ 2766 goto done; 2767 } 2768 } 2769 2770 switch (amdgpu_crtc->pll_id) { 2771 case ATOM_PPLL0: 2772 case ATOM_PPLL1: 2773 case ATOM_PPLL2: 2774 /* disable the ppll */ 2775 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, 2776 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2777 break; 2778 default: 2779 break; 2780 } 2781 done: 2782 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2783 amdgpu_crtc->adjusted_clock = 0; 2784 amdgpu_crtc->encoder = NULL; 2785 amdgpu_crtc->connector = NULL; 2786 } 2787 2788 static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc, 2789 struct drm_display_mode *mode, 2790 struct drm_display_mode *adjusted_mode, 2791 int x, int y, struct drm_framebuffer *old_fb) 2792 { 2793 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2794 2795 if (!amdgpu_crtc->adjusted_clock) 2796 return -EINVAL; 2797 2798 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); 2799 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); 2800 dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2801 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); 2802 amdgpu_atombios_crtc_scaler_setup(crtc); 2803 dce_v11_0_cursor_reset(crtc); 2804 /* update the hw version fpr dpm */ 2805 amdgpu_crtc->hw_mode = *adjusted_mode; 2806 2807 return 0; 2808 } 2809 2810 static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc, 2811 const struct drm_display_mode *mode, 2812 struct drm_display_mode *adjusted_mode) 2813 { 2814 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2815 struct drm_device *dev = crtc->dev; 2816 struct drm_encoder *encoder; 2817 2818 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ 2819 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2820 if (encoder->crtc == crtc) { 2821 amdgpu_crtc->encoder = encoder; 2822 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); 2823 break; 2824 } 2825 } 2826 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { 2827 amdgpu_crtc->encoder = NULL; 2828 amdgpu_crtc->connector = NULL; 2829 return false; 2830 } 2831 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2832 return false; 2833 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2834 return false; 2835 /* pick pll */ 2836 amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc); 2837 /* if we can't get a PPLL for a non-DP encoder, fail */ 2838 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && 2839 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) 2840 return false; 2841 2842 return true; 2843 } 2844 2845 static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, 2846 struct drm_framebuffer *old_fb) 2847 { 2848 return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2849 } 2850 2851 static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc, 2852 struct drm_framebuffer *fb, 2853 int x, int y, enum mode_set_atomic state) 2854 { 2855 return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1); 2856 } 2857 2858 static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = { 2859 .dpms = dce_v11_0_crtc_dpms, 2860 .mode_fixup = dce_v11_0_crtc_mode_fixup, 2861 .mode_set = dce_v11_0_crtc_mode_set, 2862 .mode_set_base = dce_v11_0_crtc_set_base, 2863 .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic, 2864 .prepare = dce_v11_0_crtc_prepare, 2865 .commit = dce_v11_0_crtc_commit, 2866 .load_lut = dce_v11_0_crtc_load_lut, 2867 .disable = dce_v11_0_crtc_disable, 2868 }; 2869 2870 static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) 2871 { 2872 struct amdgpu_crtc *amdgpu_crtc; 2873 int i; 2874 2875 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + 2876 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 2877 if (amdgpu_crtc == NULL) 2878 return -ENOMEM; 2879 2880 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs); 2881 2882 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); 2883 amdgpu_crtc->crtc_id = index; 2884 amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); 2885 adev->mode_info.crtcs[index] = amdgpu_crtc; 2886 2887 amdgpu_crtc->max_cursor_width = 128; 2888 amdgpu_crtc->max_cursor_height = 128; 2889 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; 2890 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; 2891 2892 for (i = 0; i < 256; i++) { 2893 amdgpu_crtc->lut_r[i] = i << 2; 2894 amdgpu_crtc->lut_g[i] = i << 2; 2895 amdgpu_crtc->lut_b[i] = i << 2; 2896 } 2897 2898 switch (amdgpu_crtc->crtc_id) { 2899 case 0: 2900 default: 2901 amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET; 2902 break; 2903 case 1: 2904 amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET; 2905 break; 2906 case 2: 2907 amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET; 2908 break; 2909 case 3: 2910 amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET; 2911 break; 2912 case 4: 2913 amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET; 2914 break; 2915 case 5: 2916 amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET; 2917 break; 2918 } 2919 2920 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2921 amdgpu_crtc->adjusted_clock = 0; 2922 amdgpu_crtc->encoder = NULL; 2923 amdgpu_crtc->connector = NULL; 2924 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs); 2925 2926 return 0; 2927 } 2928 2929 static int dce_v11_0_early_init(void *handle) 2930 { 2931 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2932 2933 adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg; 2934 adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg; 2935 2936 dce_v11_0_set_display_funcs(adev); 2937 dce_v11_0_set_irq_funcs(adev); 2938 2939 switch (adev->asic_type) { 2940 case CHIP_CARRIZO: 2941 adev->mode_info.num_crtc = 3; 2942 adev->mode_info.num_hpd = 6; 2943 adev->mode_info.num_dig = 9; 2944 break; 2945 case CHIP_STONEY: 2946 adev->mode_info.num_crtc = 2; 2947 adev->mode_info.num_hpd = 6; 2948 adev->mode_info.num_dig = 9; 2949 break; 2950 default: 2951 /* FIXME: not supported yet */ 2952 return -EINVAL; 2953 } 2954 2955 return 0; 2956 } 2957 2958 static int dce_v11_0_sw_init(void *handle) 2959 { 2960 int r, i; 2961 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2962 2963 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2964 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq); 2965 if (r) 2966 return r; 2967 } 2968 2969 for (i = 8; i < 20; i += 2) { 2970 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq); 2971 if (r) 2972 return r; 2973 } 2974 2975 /* HPD hotplug */ 2976 r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq); 2977 if (r) 2978 return r; 2979 2980 adev->mode_info.mode_config_initialized = true; 2981 2982 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; 2983 2984 adev->ddev->mode_config.max_width = 16384; 2985 adev->ddev->mode_config.max_height = 16384; 2986 2987 adev->ddev->mode_config.preferred_depth = 24; 2988 adev->ddev->mode_config.prefer_shadow = 1; 2989 2990 adev->ddev->mode_config.fb_base = adev->mc.aper_base; 2991 2992 r = amdgpu_modeset_create_props(adev); 2993 if (r) 2994 return r; 2995 2996 adev->ddev->mode_config.max_width = 16384; 2997 adev->ddev->mode_config.max_height = 16384; 2998 2999 /* allocate crtcs */ 3000 for (i = 0; i < adev->mode_info.num_crtc; i++) { 3001 r = dce_v11_0_crtc_init(adev, i); 3002 if (r) 3003 return r; 3004 } 3005 3006 if (amdgpu_atombios_get_connector_info_from_object_table(adev)) 3007 amdgpu_print_display_setup(adev->ddev); 3008 else 3009 return -EINVAL; 3010 3011 /* setup afmt */ 3012 dce_v11_0_afmt_init(adev); 3013 3014 r = dce_v11_0_audio_init(adev); 3015 if (r) 3016 return r; 3017 3018 drm_kms_helper_poll_init(adev->ddev); 3019 3020 return r; 3021 } 3022 3023 static int dce_v11_0_sw_fini(void *handle) 3024 { 3025 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3026 3027 kfree(adev->mode_info.bios_hardcoded_edid); 3028 3029 drm_kms_helper_poll_fini(adev->ddev); 3030 3031 dce_v11_0_audio_fini(adev); 3032 3033 dce_v11_0_afmt_fini(adev); 3034 3035 adev->mode_info.mode_config_initialized = false; 3036 3037 return 0; 3038 } 3039 3040 static int dce_v11_0_hw_init(void *handle) 3041 { 3042 int i; 3043 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3044 3045 dce_v11_0_init_golden_registers(adev); 3046 3047 /* init dig PHYs, disp eng pll */ 3048 amdgpu_atombios_crtc_powergate_init(adev); 3049 amdgpu_atombios_encoder_init_dig(adev); 3050 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); 3051 3052 /* initialize hpd */ 3053 dce_v11_0_hpd_init(adev); 3054 3055 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 3056 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 3057 } 3058 3059 dce_v11_0_pageflip_interrupt_init(adev); 3060 3061 return 0; 3062 } 3063 3064 static int dce_v11_0_hw_fini(void *handle) 3065 { 3066 int i; 3067 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3068 3069 dce_v11_0_hpd_fini(adev); 3070 3071 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 3072 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 3073 } 3074 3075 dce_v11_0_pageflip_interrupt_fini(adev); 3076 3077 return 0; 3078 } 3079 3080 static int dce_v11_0_suspend(void *handle) 3081 { 3082 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3083 3084 amdgpu_atombios_scratch_regs_save(adev); 3085 3086 return dce_v11_0_hw_fini(handle); 3087 } 3088 3089 static int dce_v11_0_resume(void *handle) 3090 { 3091 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3092 int ret; 3093 3094 ret = dce_v11_0_hw_init(handle); 3095 3096 amdgpu_atombios_scratch_regs_restore(adev); 3097 3098 /* turn on the BL */ 3099 if (adev->mode_info.bl_encoder) { 3100 u8 bl_level = amdgpu_display_backlight_get_level(adev, 3101 adev->mode_info.bl_encoder); 3102 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 3103 bl_level); 3104 } 3105 3106 return ret; 3107 } 3108 3109 static bool dce_v11_0_is_idle(void *handle) 3110 { 3111 return true; 3112 } 3113 3114 static int dce_v11_0_wait_for_idle(void *handle) 3115 { 3116 return 0; 3117 } 3118 3119 static void dce_v11_0_print_status(void *handle) 3120 { 3121 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3122 3123 dev_info(adev->dev, "DCE 10.x registers\n"); 3124 /* XXX todo */ 3125 } 3126 3127 static int dce_v11_0_soft_reset(void *handle) 3128 { 3129 u32 srbm_soft_reset = 0, tmp; 3130 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3131 3132 if (dce_v11_0_is_display_hung(adev)) 3133 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; 3134 3135 if (srbm_soft_reset) { 3136 dce_v11_0_print_status((void *)adev); 3137 3138 tmp = RREG32(mmSRBM_SOFT_RESET); 3139 tmp |= srbm_soft_reset; 3140 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 3141 WREG32(mmSRBM_SOFT_RESET, tmp); 3142 tmp = RREG32(mmSRBM_SOFT_RESET); 3143 3144 udelay(50); 3145 3146 tmp &= ~srbm_soft_reset; 3147 WREG32(mmSRBM_SOFT_RESET, tmp); 3148 tmp = RREG32(mmSRBM_SOFT_RESET); 3149 3150 /* Wait a little for things to settle down */ 3151 udelay(50); 3152 dce_v11_0_print_status((void *)adev); 3153 } 3154 return 0; 3155 } 3156 3157 static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, 3158 int crtc, 3159 enum amdgpu_interrupt_state state) 3160 { 3161 u32 lb_interrupt_mask; 3162 3163 if (crtc >= adev->mode_info.num_crtc) { 3164 DRM_DEBUG("invalid crtc %d\n", crtc); 3165 return; 3166 } 3167 3168 switch (state) { 3169 case AMDGPU_IRQ_STATE_DISABLE: 3170 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3171 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3172 VBLANK_INTERRUPT_MASK, 0); 3173 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3174 break; 3175 case AMDGPU_IRQ_STATE_ENABLE: 3176 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3177 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3178 VBLANK_INTERRUPT_MASK, 1); 3179 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3180 break; 3181 default: 3182 break; 3183 } 3184 } 3185 3186 static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, 3187 int crtc, 3188 enum amdgpu_interrupt_state state) 3189 { 3190 u32 lb_interrupt_mask; 3191 3192 if (crtc >= adev->mode_info.num_crtc) { 3193 DRM_DEBUG("invalid crtc %d\n", crtc); 3194 return; 3195 } 3196 3197 switch (state) { 3198 case AMDGPU_IRQ_STATE_DISABLE: 3199 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3200 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3201 VLINE_INTERRUPT_MASK, 0); 3202 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3203 break; 3204 case AMDGPU_IRQ_STATE_ENABLE: 3205 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3206 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3207 VLINE_INTERRUPT_MASK, 1); 3208 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3209 break; 3210 default: 3211 break; 3212 } 3213 } 3214 3215 static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev, 3216 struct amdgpu_irq_src *source, 3217 unsigned hpd, 3218 enum amdgpu_interrupt_state state) 3219 { 3220 u32 tmp; 3221 3222 if (hpd >= adev->mode_info.num_hpd) { 3223 DRM_DEBUG("invalid hdp %d\n", hpd); 3224 return 0; 3225 } 3226 3227 switch (state) { 3228 case AMDGPU_IRQ_STATE_DISABLE: 3229 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 3230 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); 3231 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 3232 break; 3233 case AMDGPU_IRQ_STATE_ENABLE: 3234 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 3235 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1); 3236 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 3237 break; 3238 default: 3239 break; 3240 } 3241 3242 return 0; 3243 } 3244 3245 static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev, 3246 struct amdgpu_irq_src *source, 3247 unsigned type, 3248 enum amdgpu_interrupt_state state) 3249 { 3250 switch (type) { 3251 case AMDGPU_CRTC_IRQ_VBLANK1: 3252 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state); 3253 break; 3254 case AMDGPU_CRTC_IRQ_VBLANK2: 3255 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state); 3256 break; 3257 case AMDGPU_CRTC_IRQ_VBLANK3: 3258 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state); 3259 break; 3260 case AMDGPU_CRTC_IRQ_VBLANK4: 3261 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state); 3262 break; 3263 case AMDGPU_CRTC_IRQ_VBLANK5: 3264 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state); 3265 break; 3266 case AMDGPU_CRTC_IRQ_VBLANK6: 3267 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state); 3268 break; 3269 case AMDGPU_CRTC_IRQ_VLINE1: 3270 dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state); 3271 break; 3272 case AMDGPU_CRTC_IRQ_VLINE2: 3273 dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state); 3274 break; 3275 case AMDGPU_CRTC_IRQ_VLINE3: 3276 dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state); 3277 break; 3278 case AMDGPU_CRTC_IRQ_VLINE4: 3279 dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state); 3280 break; 3281 case AMDGPU_CRTC_IRQ_VLINE5: 3282 dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state); 3283 break; 3284 case AMDGPU_CRTC_IRQ_VLINE6: 3285 dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state); 3286 break; 3287 default: 3288 break; 3289 } 3290 return 0; 3291 } 3292 3293 static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev, 3294 struct amdgpu_irq_src *src, 3295 unsigned type, 3296 enum amdgpu_interrupt_state state) 3297 { 3298 u32 reg; 3299 3300 if (type >= adev->mode_info.num_crtc) { 3301 DRM_ERROR("invalid pageflip crtc %d\n", type); 3302 return -EINVAL; 3303 } 3304 3305 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]); 3306 if (state == AMDGPU_IRQ_STATE_DISABLE) 3307 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3308 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3309 else 3310 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3311 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3312 3313 return 0; 3314 } 3315 3316 static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, 3317 struct amdgpu_irq_src *source, 3318 struct amdgpu_iv_entry *entry) 3319 { 3320 unsigned long flags; 3321 unsigned crtc_id; 3322 struct amdgpu_crtc *amdgpu_crtc; 3323 struct amdgpu_flip_work *works; 3324 3325 crtc_id = (entry->src_id - 8) >> 1; 3326 amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 3327 3328 if (crtc_id >= adev->mode_info.num_crtc) { 3329 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); 3330 return -EINVAL; 3331 } 3332 3333 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) & 3334 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 3335 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id], 3336 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 3337 3338 /* IRQ could occur when in initial stage */ 3339 if(amdgpu_crtc == NULL) 3340 return 0; 3341 3342 spin_lock_irqsave(&adev->ddev->event_lock, flags); 3343 works = amdgpu_crtc->pflip_works; 3344 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ 3345 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " 3346 "AMDGPU_FLIP_SUBMITTED(%d)\n", 3347 amdgpu_crtc->pflip_status, 3348 AMDGPU_FLIP_SUBMITTED); 3349 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3350 return 0; 3351 } 3352 3353 /* page flip completed. clean up */ 3354 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 3355 amdgpu_crtc->pflip_works = NULL; 3356 3357 /* wakeup usersapce */ 3358 if(works->event) 3359 drm_send_vblank_event(adev->ddev, crtc_id, works->event); 3360 3361 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3362 3363 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3364 queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); 3365 3366 return 0; 3367 } 3368 3369 static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, 3370 int hpd) 3371 { 3372 u32 tmp; 3373 3374 if (hpd >= adev->mode_info.num_hpd) { 3375 DRM_DEBUG("invalid hdp %d\n", hpd); 3376 return; 3377 } 3378 3379 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 3380 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1); 3381 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 3382 } 3383 3384 static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev, 3385 int crtc) 3386 { 3387 u32 tmp; 3388 3389 if (crtc < 0 || crtc >= adev->mode_info.num_crtc) { 3390 DRM_DEBUG("invalid crtc %d\n", crtc); 3391 return; 3392 } 3393 3394 tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]); 3395 tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1); 3396 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp); 3397 } 3398 3399 static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev, 3400 int crtc) 3401 { 3402 u32 tmp; 3403 3404 if (crtc < 0 || crtc >= adev->mode_info.num_crtc) { 3405 DRM_DEBUG("invalid crtc %d\n", crtc); 3406 return; 3407 } 3408 3409 tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]); 3410 tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1); 3411 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp); 3412 } 3413 3414 static int dce_v11_0_crtc_irq(struct amdgpu_device *adev, 3415 struct amdgpu_irq_src *source, 3416 struct amdgpu_iv_entry *entry) 3417 { 3418 unsigned crtc = entry->src_id - 1; 3419 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 3420 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); 3421 3422 switch (entry->src_data) { 3423 case 0: /* vblank */ 3424 if (disp_int & interrupt_status_offsets[crtc].vblank) 3425 dce_v11_0_crtc_vblank_int_ack(adev, crtc); 3426 else 3427 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3428 3429 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3430 drm_handle_vblank(adev->ddev, crtc); 3431 } 3432 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3433 3434 break; 3435 case 1: /* vline */ 3436 if (disp_int & interrupt_status_offsets[crtc].vline) 3437 dce_v11_0_crtc_vline_int_ack(adev, crtc); 3438 else 3439 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3440 3441 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3442 3443 break; 3444 default: 3445 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3446 break; 3447 } 3448 3449 return 0; 3450 } 3451 3452 static int dce_v11_0_hpd_irq(struct amdgpu_device *adev, 3453 struct amdgpu_irq_src *source, 3454 struct amdgpu_iv_entry *entry) 3455 { 3456 uint32_t disp_int, mask; 3457 unsigned hpd; 3458 3459 if (entry->src_data >= adev->mode_info.num_hpd) { 3460 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3461 return 0; 3462 } 3463 3464 hpd = entry->src_data; 3465 disp_int = RREG32(interrupt_status_offsets[hpd].reg); 3466 mask = interrupt_status_offsets[hpd].hpd; 3467 3468 if (disp_int & mask) { 3469 dce_v11_0_hpd_int_ack(adev, hpd); 3470 schedule_work(&adev->hotplug_work); 3471 DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3472 } 3473 3474 return 0; 3475 } 3476 3477 static int dce_v11_0_set_clockgating_state(void *handle, 3478 enum amd_clockgating_state state) 3479 { 3480 return 0; 3481 } 3482 3483 static int dce_v11_0_set_powergating_state(void *handle, 3484 enum amd_powergating_state state) 3485 { 3486 return 0; 3487 } 3488 3489 const struct amd_ip_funcs dce_v11_0_ip_funcs = { 3490 .early_init = dce_v11_0_early_init, 3491 .late_init = NULL, 3492 .sw_init = dce_v11_0_sw_init, 3493 .sw_fini = dce_v11_0_sw_fini, 3494 .hw_init = dce_v11_0_hw_init, 3495 .hw_fini = dce_v11_0_hw_fini, 3496 .suspend = dce_v11_0_suspend, 3497 .resume = dce_v11_0_resume, 3498 .is_idle = dce_v11_0_is_idle, 3499 .wait_for_idle = dce_v11_0_wait_for_idle, 3500 .soft_reset = dce_v11_0_soft_reset, 3501 .print_status = dce_v11_0_print_status, 3502 .set_clockgating_state = dce_v11_0_set_clockgating_state, 3503 .set_powergating_state = dce_v11_0_set_powergating_state, 3504 }; 3505 3506 static void 3507 dce_v11_0_encoder_mode_set(struct drm_encoder *encoder, 3508 struct drm_display_mode *mode, 3509 struct drm_display_mode *adjusted_mode) 3510 { 3511 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3512 3513 amdgpu_encoder->pixel_clock = adjusted_mode->clock; 3514 3515 /* need to call this here rather than in prepare() since we need some crtc info */ 3516 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3517 3518 /* set scaler clears this on some chips */ 3519 dce_v11_0_set_interleave(encoder->crtc, mode); 3520 3521 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 3522 dce_v11_0_afmt_enable(encoder, true); 3523 dce_v11_0_afmt_setmode(encoder, adjusted_mode); 3524 } 3525 } 3526 3527 static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder) 3528 { 3529 struct amdgpu_device *adev = encoder->dev->dev_private; 3530 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3531 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 3532 3533 if ((amdgpu_encoder->active_device & 3534 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || 3535 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != 3536 ENCODER_OBJECT_ID_NONE)) { 3537 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 3538 if (dig) { 3539 dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder); 3540 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) 3541 dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; 3542 } 3543 } 3544 3545 amdgpu_atombios_scratch_regs_lock(adev, true); 3546 3547 if (connector) { 3548 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 3549 3550 /* select the clock/data port if it uses a router */ 3551 if (amdgpu_connector->router.cd_valid) 3552 amdgpu_i2c_router_select_cd_port(amdgpu_connector); 3553 3554 /* turn eDP panel on for mode set */ 3555 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3556 amdgpu_atombios_encoder_set_edp_panel_power(connector, 3557 ATOM_TRANSMITTER_ACTION_POWER_ON); 3558 } 3559 3560 /* this is needed for the pll/ss setup to work correctly in some cases */ 3561 amdgpu_atombios_encoder_set_crtc_source(encoder); 3562 /* set up the FMT blocks */ 3563 dce_v11_0_program_fmt(encoder); 3564 } 3565 3566 static void dce_v11_0_encoder_commit(struct drm_encoder *encoder) 3567 { 3568 struct drm_device *dev = encoder->dev; 3569 struct amdgpu_device *adev = dev->dev_private; 3570 3571 /* need to call this here as we need the crtc set up */ 3572 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 3573 amdgpu_atombios_scratch_regs_lock(adev, false); 3574 } 3575 3576 static void dce_v11_0_encoder_disable(struct drm_encoder *encoder) 3577 { 3578 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3579 struct amdgpu_encoder_atom_dig *dig; 3580 3581 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3582 3583 if (amdgpu_atombios_encoder_is_digital(encoder)) { 3584 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 3585 dce_v11_0_afmt_enable(encoder, false); 3586 dig = amdgpu_encoder->enc_priv; 3587 dig->dig_encoder = -1; 3588 } 3589 amdgpu_encoder->active_device = 0; 3590 } 3591 3592 /* these are handled by the primary encoders */ 3593 static void dce_v11_0_ext_prepare(struct drm_encoder *encoder) 3594 { 3595 3596 } 3597 3598 static void dce_v11_0_ext_commit(struct drm_encoder *encoder) 3599 { 3600 3601 } 3602 3603 static void 3604 dce_v11_0_ext_mode_set(struct drm_encoder *encoder, 3605 struct drm_display_mode *mode, 3606 struct drm_display_mode *adjusted_mode) 3607 { 3608 3609 } 3610 3611 static void dce_v11_0_ext_disable(struct drm_encoder *encoder) 3612 { 3613 3614 } 3615 3616 static void 3617 dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode) 3618 { 3619 3620 } 3621 3622 static bool dce_v11_0_ext_mode_fixup(struct drm_encoder *encoder, 3623 const struct drm_display_mode *mode, 3624 struct drm_display_mode *adjusted_mode) 3625 { 3626 return true; 3627 } 3628 3629 static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = { 3630 .dpms = dce_v11_0_ext_dpms, 3631 .mode_fixup = dce_v11_0_ext_mode_fixup, 3632 .prepare = dce_v11_0_ext_prepare, 3633 .mode_set = dce_v11_0_ext_mode_set, 3634 .commit = dce_v11_0_ext_commit, 3635 .disable = dce_v11_0_ext_disable, 3636 /* no detect for TMDS/LVDS yet */ 3637 }; 3638 3639 static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = { 3640 .dpms = amdgpu_atombios_encoder_dpms, 3641 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3642 .prepare = dce_v11_0_encoder_prepare, 3643 .mode_set = dce_v11_0_encoder_mode_set, 3644 .commit = dce_v11_0_encoder_commit, 3645 .disable = dce_v11_0_encoder_disable, 3646 .detect = amdgpu_atombios_encoder_dig_detect, 3647 }; 3648 3649 static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = { 3650 .dpms = amdgpu_atombios_encoder_dpms, 3651 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3652 .prepare = dce_v11_0_encoder_prepare, 3653 .mode_set = dce_v11_0_encoder_mode_set, 3654 .commit = dce_v11_0_encoder_commit, 3655 .detect = amdgpu_atombios_encoder_dac_detect, 3656 }; 3657 3658 static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder) 3659 { 3660 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3661 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3662 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); 3663 kfree(amdgpu_encoder->enc_priv); 3664 drm_encoder_cleanup(encoder); 3665 kfree(amdgpu_encoder); 3666 } 3667 3668 static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = { 3669 .destroy = dce_v11_0_encoder_destroy, 3670 }; 3671 3672 static void dce_v11_0_encoder_add(struct amdgpu_device *adev, 3673 uint32_t encoder_enum, 3674 uint32_t supported_device, 3675 u16 caps) 3676 { 3677 struct drm_device *dev = adev->ddev; 3678 struct drm_encoder *encoder; 3679 struct amdgpu_encoder *amdgpu_encoder; 3680 3681 /* see if we already added it */ 3682 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3683 amdgpu_encoder = to_amdgpu_encoder(encoder); 3684 if (amdgpu_encoder->encoder_enum == encoder_enum) { 3685 amdgpu_encoder->devices |= supported_device; 3686 return; 3687 } 3688 3689 } 3690 3691 /* add a new one */ 3692 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); 3693 if (!amdgpu_encoder) 3694 return; 3695 3696 encoder = &amdgpu_encoder->base; 3697 switch (adev->mode_info.num_crtc) { 3698 case 1: 3699 encoder->possible_crtcs = 0x1; 3700 break; 3701 case 2: 3702 default: 3703 encoder->possible_crtcs = 0x3; 3704 break; 3705 case 4: 3706 encoder->possible_crtcs = 0xf; 3707 break; 3708 case 6: 3709 encoder->possible_crtcs = 0x3f; 3710 break; 3711 } 3712 3713 amdgpu_encoder->enc_priv = NULL; 3714 3715 amdgpu_encoder->encoder_enum = encoder_enum; 3716 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 3717 amdgpu_encoder->devices = supported_device; 3718 amdgpu_encoder->rmx_type = RMX_OFF; 3719 amdgpu_encoder->underscan_type = UNDERSCAN_OFF; 3720 amdgpu_encoder->is_ext_encoder = false; 3721 amdgpu_encoder->caps = caps; 3722 3723 switch (amdgpu_encoder->encoder_id) { 3724 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3725 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3726 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3727 DRM_MODE_ENCODER_DAC, NULL); 3728 drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs); 3729 break; 3730 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3731 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 3732 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 3733 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 3734 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 3735 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3736 amdgpu_encoder->rmx_type = RMX_FULL; 3737 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3738 DRM_MODE_ENCODER_LVDS, NULL); 3739 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3740 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3741 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3742 DRM_MODE_ENCODER_DAC, NULL); 3743 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3744 } else { 3745 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3746 DRM_MODE_ENCODER_TMDS, NULL); 3747 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3748 } 3749 drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs); 3750 break; 3751 case ENCODER_OBJECT_ID_SI170B: 3752 case ENCODER_OBJECT_ID_CH7303: 3753 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: 3754 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: 3755 case ENCODER_OBJECT_ID_TITFP513: 3756 case ENCODER_OBJECT_ID_VT1623: 3757 case ENCODER_OBJECT_ID_HDMI_SI1930: 3758 case ENCODER_OBJECT_ID_TRAVIS: 3759 case ENCODER_OBJECT_ID_NUTMEG: 3760 /* these are handled by the primary encoders */ 3761 amdgpu_encoder->is_ext_encoder = true; 3762 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3763 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3764 DRM_MODE_ENCODER_LVDS, NULL); 3765 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3766 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3767 DRM_MODE_ENCODER_DAC, NULL); 3768 else 3769 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3770 DRM_MODE_ENCODER_TMDS, NULL); 3771 drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs); 3772 break; 3773 } 3774 } 3775 3776 static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { 3777 .set_vga_render_state = &dce_v11_0_set_vga_render_state, 3778 .bandwidth_update = &dce_v11_0_bandwidth_update, 3779 .vblank_get_counter = &dce_v11_0_vblank_get_counter, 3780 .vblank_wait = &dce_v11_0_vblank_wait, 3781 .is_display_hung = &dce_v11_0_is_display_hung, 3782 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3783 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3784 .hpd_sense = &dce_v11_0_hpd_sense, 3785 .hpd_set_polarity = &dce_v11_0_hpd_set_polarity, 3786 .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg, 3787 .page_flip = &dce_v11_0_page_flip, 3788 .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos, 3789 .add_encoder = &dce_v11_0_encoder_add, 3790 .add_connector = &amdgpu_connector_add, 3791 .stop_mc_access = &dce_v11_0_stop_mc_access, 3792 .resume_mc_access = &dce_v11_0_resume_mc_access, 3793 }; 3794 3795 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev) 3796 { 3797 if (adev->mode_info.funcs == NULL) 3798 adev->mode_info.funcs = &dce_v11_0_display_funcs; 3799 } 3800 3801 static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = { 3802 .set = dce_v11_0_set_crtc_irq_state, 3803 .process = dce_v11_0_crtc_irq, 3804 }; 3805 3806 static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = { 3807 .set = dce_v11_0_set_pageflip_irq_state, 3808 .process = dce_v11_0_pageflip_irq, 3809 }; 3810 3811 static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = { 3812 .set = dce_v11_0_set_hpd_irq_state, 3813 .process = dce_v11_0_hpd_irq, 3814 }; 3815 3816 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev) 3817 { 3818 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; 3819 adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs; 3820 3821 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; 3822 adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs; 3823 3824 adev->hpd_irq.num_types = AMDGPU_HPD_LAST; 3825 adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs; 3826 } 3827