1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "drmP.h" 24 #include "amdgpu.h" 25 #include "amdgpu_pm.h" 26 #include "amdgpu_i2c.h" 27 #include "vid.h" 28 #include "atom.h" 29 #include "amdgpu_atombios.h" 30 #include "atombios_crtc.h" 31 #include "atombios_encoders.h" 32 #include "amdgpu_pll.h" 33 #include "amdgpu_connectors.h" 34 35 #include "dce/dce_10_0_d.h" 36 #include "dce/dce_10_0_sh_mask.h" 37 #include "dce/dce_10_0_enum.h" 38 #include "oss/oss_3_0_d.h" 39 #include "oss/oss_3_0_sh_mask.h" 40 #include "gmc/gmc_8_1_d.h" 41 #include "gmc/gmc_8_1_sh_mask.h" 42 43 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev); 44 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev); 45 46 static const u32 crtc_offsets[] = 47 { 48 CRTC0_REGISTER_OFFSET, 49 CRTC1_REGISTER_OFFSET, 50 CRTC2_REGISTER_OFFSET, 51 CRTC3_REGISTER_OFFSET, 52 CRTC4_REGISTER_OFFSET, 53 CRTC5_REGISTER_OFFSET, 54 CRTC6_REGISTER_OFFSET 55 }; 56 57 static const u32 hpd_offsets[] = 58 { 59 HPD0_REGISTER_OFFSET, 60 HPD1_REGISTER_OFFSET, 61 HPD2_REGISTER_OFFSET, 62 HPD3_REGISTER_OFFSET, 63 HPD4_REGISTER_OFFSET, 64 HPD5_REGISTER_OFFSET 65 }; 66 67 static const uint32_t dig_offsets[] = { 68 DIG0_REGISTER_OFFSET, 69 DIG1_REGISTER_OFFSET, 70 DIG2_REGISTER_OFFSET, 71 DIG3_REGISTER_OFFSET, 72 DIG4_REGISTER_OFFSET, 73 DIG5_REGISTER_OFFSET, 74 DIG6_REGISTER_OFFSET 75 }; 76 77 static const struct { 78 uint32_t reg; 79 uint32_t vblank; 80 uint32_t vline; 81 uint32_t hpd; 82 83 } interrupt_status_offsets[] = { { 84 .reg = mmDISP_INTERRUPT_STATUS, 85 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, 86 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, 87 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 88 }, { 89 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, 90 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, 91 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, 92 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 93 }, { 94 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, 95 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, 96 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, 97 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 98 }, { 99 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, 100 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, 101 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, 102 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 103 }, { 104 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, 105 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, 106 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, 107 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 108 }, { 109 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, 110 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, 111 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, 112 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 113 } }; 114 115 static const u32 golden_settings_tonga_a11[] = 116 { 117 mmDCI_CLK_CNTL, 0x00000080, 0x00000000, 118 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, 119 mmFBC_MISC, 0x1f311fff, 0x12300000, 120 mmHDMI_CONTROL, 0x31000111, 0x00000011, 121 }; 122 123 static const u32 tonga_mgcg_cgcg_init[] = 124 { 125 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, 126 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, 127 }; 128 129 static const u32 golden_settings_fiji_a10[] = 130 { 131 mmDCI_CLK_CNTL, 0x00000080, 0x00000000, 132 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, 133 mmFBC_MISC, 0x1f311fff, 0x12300000, 134 mmHDMI_CONTROL, 0x31000111, 0x00000011, 135 }; 136 137 static const u32 fiji_mgcg_cgcg_init[] = 138 { 139 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, 140 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, 141 }; 142 143 static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev) 144 { 145 switch (adev->asic_type) { 146 case CHIP_FIJI: 147 amdgpu_program_register_sequence(adev, 148 fiji_mgcg_cgcg_init, 149 (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init)); 150 amdgpu_program_register_sequence(adev, 151 golden_settings_fiji_a10, 152 (const u32)ARRAY_SIZE(golden_settings_fiji_a10)); 153 break; 154 case CHIP_TONGA: 155 amdgpu_program_register_sequence(adev, 156 tonga_mgcg_cgcg_init, 157 (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); 158 amdgpu_program_register_sequence(adev, 159 golden_settings_tonga_a11, 160 (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); 161 break; 162 default: 163 break; 164 } 165 } 166 167 static u32 dce_v10_0_audio_endpt_rreg(struct amdgpu_device *adev, 168 u32 block_offset, u32 reg) 169 { 170 unsigned long flags; 171 u32 r; 172 173 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 174 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 175 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); 176 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 177 178 return r; 179 } 180 181 static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev, 182 u32 block_offset, u32 reg, u32 v) 183 { 184 unsigned long flags; 185 186 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 187 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 188 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); 189 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 190 } 191 192 static bool dce_v10_0_is_in_vblank(struct amdgpu_device *adev, int crtc) 193 { 194 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & 195 CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK) 196 return true; 197 else 198 return false; 199 } 200 201 static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc) 202 { 203 u32 pos1, pos2; 204 205 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 206 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 207 208 if (pos1 != pos2) 209 return true; 210 else 211 return false; 212 } 213 214 /** 215 * dce_v10_0_vblank_wait - vblank wait asic callback. 216 * 217 * @adev: amdgpu_device pointer 218 * @crtc: crtc to wait for vblank on 219 * 220 * Wait for vblank on the requested crtc (evergreen+). 221 */ 222 static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc) 223 { 224 unsigned i = 0; 225 226 if (crtc >= adev->mode_info.num_crtc) 227 return; 228 229 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK)) 230 return; 231 232 /* depending on when we hit vblank, we may be close to active; if so, 233 * wait for another frame. 234 */ 235 while (dce_v10_0_is_in_vblank(adev, crtc)) { 236 if (i++ % 100 == 0) { 237 if (!dce_v10_0_is_counter_moving(adev, crtc)) 238 break; 239 } 240 } 241 242 while (!dce_v10_0_is_in_vblank(adev, crtc)) { 243 if (i++ % 100 == 0) { 244 if (!dce_v10_0_is_counter_moving(adev, crtc)) 245 break; 246 } 247 } 248 } 249 250 static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 251 { 252 if (crtc >= adev->mode_info.num_crtc) 253 return 0; 254 else 255 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 256 } 257 258 static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev) 259 { 260 unsigned i; 261 262 /* Enable pflip interrupts */ 263 for (i = 0; i < adev->mode_info.num_crtc; i++) 264 amdgpu_irq_get(adev, &adev->pageflip_irq, i); 265 } 266 267 static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev) 268 { 269 unsigned i; 270 271 /* Disable pflip interrupts */ 272 for (i = 0; i < adev->mode_info.num_crtc; i++) 273 amdgpu_irq_put(adev, &adev->pageflip_irq, i); 274 } 275 276 /** 277 * dce_v10_0_page_flip - pageflip callback. 278 * 279 * @adev: amdgpu_device pointer 280 * @crtc_id: crtc to cleanup pageflip on 281 * @crtc_base: new address of the crtc (GPU MC address) 282 * 283 * Triggers the actual pageflip by updating the primary 284 * surface base address. 285 */ 286 static void dce_v10_0_page_flip(struct amdgpu_device *adev, 287 int crtc_id, u64 crtc_base) 288 { 289 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 290 291 /* update the primary scanout address */ 292 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 293 upper_32_bits(crtc_base)); 294 /* writing to the low address triggers the update */ 295 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 296 lower_32_bits(crtc_base)); 297 /* post the write */ 298 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); 299 } 300 301 static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 302 u32 *vbl, u32 *position) 303 { 304 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 305 return -EINVAL; 306 307 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); 308 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 309 310 return 0; 311 } 312 313 /** 314 * dce_v10_0_hpd_sense - hpd sense callback. 315 * 316 * @adev: amdgpu_device pointer 317 * @hpd: hpd (hotplug detect) pin 318 * 319 * Checks if a digital monitor is connected (evergreen+). 320 * Returns true if connected, false if not connected. 321 */ 322 static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev, 323 enum amdgpu_hpd_id hpd) 324 { 325 int idx; 326 bool connected = false; 327 328 switch (hpd) { 329 case AMDGPU_HPD_1: 330 idx = 0; 331 break; 332 case AMDGPU_HPD_2: 333 idx = 1; 334 break; 335 case AMDGPU_HPD_3: 336 idx = 2; 337 break; 338 case AMDGPU_HPD_4: 339 idx = 3; 340 break; 341 case AMDGPU_HPD_5: 342 idx = 4; 343 break; 344 case AMDGPU_HPD_6: 345 idx = 5; 346 break; 347 default: 348 return connected; 349 } 350 351 if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) & 352 DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) 353 connected = true; 354 355 return connected; 356 } 357 358 /** 359 * dce_v10_0_hpd_set_polarity - hpd set polarity callback. 360 * 361 * @adev: amdgpu_device pointer 362 * @hpd: hpd (hotplug detect) pin 363 * 364 * Set the polarity of the hpd pin (evergreen+). 365 */ 366 static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev, 367 enum amdgpu_hpd_id hpd) 368 { 369 u32 tmp; 370 bool connected = dce_v10_0_hpd_sense(adev, hpd); 371 int idx; 372 373 switch (hpd) { 374 case AMDGPU_HPD_1: 375 idx = 0; 376 break; 377 case AMDGPU_HPD_2: 378 idx = 1; 379 break; 380 case AMDGPU_HPD_3: 381 idx = 2; 382 break; 383 case AMDGPU_HPD_4: 384 idx = 3; 385 break; 386 case AMDGPU_HPD_5: 387 idx = 4; 388 break; 389 case AMDGPU_HPD_6: 390 idx = 5; 391 break; 392 default: 393 return; 394 } 395 396 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); 397 if (connected) 398 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); 399 else 400 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); 401 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); 402 } 403 404 /** 405 * dce_v10_0_hpd_init - hpd setup callback. 406 * 407 * @adev: amdgpu_device pointer 408 * 409 * Setup the hpd pins used by the card (evergreen+). 410 * Enable the pin, set the polarity, and enable the hpd interrupts. 411 */ 412 static void dce_v10_0_hpd_init(struct amdgpu_device *adev) 413 { 414 struct drm_device *dev = adev->ddev; 415 struct drm_connector *connector; 416 u32 tmp; 417 int idx; 418 419 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 420 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 421 422 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 423 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 424 /* don't try to enable hpd on eDP or LVDS avoid breaking the 425 * aux dp channel on imac and help (but not completely fix) 426 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 427 * also avoid interrupt storms during dpms. 428 */ 429 continue; 430 } 431 432 switch (amdgpu_connector->hpd.hpd) { 433 case AMDGPU_HPD_1: 434 idx = 0; 435 break; 436 case AMDGPU_HPD_2: 437 idx = 1; 438 break; 439 case AMDGPU_HPD_3: 440 idx = 2; 441 break; 442 case AMDGPU_HPD_4: 443 idx = 3; 444 break; 445 case AMDGPU_HPD_5: 446 idx = 4; 447 break; 448 case AMDGPU_HPD_6: 449 idx = 5; 450 break; 451 default: 452 continue; 453 } 454 455 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); 456 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); 457 WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); 458 459 tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]); 460 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, 461 DC_HPD_CONNECT_INT_DELAY, 462 AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); 463 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, 464 DC_HPD_DISCONNECT_INT_DELAY, 465 AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); 466 WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp); 467 468 dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); 469 amdgpu_irq_get(adev, &adev->hpd_irq, 470 amdgpu_connector->hpd.hpd); 471 } 472 } 473 474 /** 475 * dce_v10_0_hpd_fini - hpd tear down callback. 476 * 477 * @adev: amdgpu_device pointer 478 * 479 * Tear down the hpd pins used by the card (evergreen+). 480 * Disable the hpd interrupts. 481 */ 482 static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) 483 { 484 struct drm_device *dev = adev->ddev; 485 struct drm_connector *connector; 486 u32 tmp; 487 int idx; 488 489 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 490 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 491 492 switch (amdgpu_connector->hpd.hpd) { 493 case AMDGPU_HPD_1: 494 idx = 0; 495 break; 496 case AMDGPU_HPD_2: 497 idx = 1; 498 break; 499 case AMDGPU_HPD_3: 500 idx = 2; 501 break; 502 case AMDGPU_HPD_4: 503 idx = 3; 504 break; 505 case AMDGPU_HPD_5: 506 idx = 4; 507 break; 508 case AMDGPU_HPD_6: 509 idx = 5; 510 break; 511 default: 512 continue; 513 } 514 515 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); 516 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); 517 WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); 518 519 amdgpu_irq_put(adev, &adev->hpd_irq, 520 amdgpu_connector->hpd.hpd); 521 } 522 } 523 524 static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev) 525 { 526 return mmDC_GPIO_HPD_A; 527 } 528 529 static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev) 530 { 531 u32 crtc_hung = 0; 532 u32 crtc_status[6]; 533 u32 i, j, tmp; 534 535 for (i = 0; i < adev->mode_info.num_crtc; i++) { 536 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 537 if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) { 538 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 539 crtc_hung |= (1 << i); 540 } 541 } 542 543 for (j = 0; j < 10; j++) { 544 for (i = 0; i < adev->mode_info.num_crtc; i++) { 545 if (crtc_hung & (1 << i)) { 546 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 547 if (tmp != crtc_status[i]) 548 crtc_hung &= ~(1 << i); 549 } 550 } 551 if (crtc_hung == 0) 552 return false; 553 udelay(100); 554 } 555 556 return true; 557 } 558 559 static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev, 560 struct amdgpu_mode_mc_save *save) 561 { 562 u32 crtc_enabled, tmp; 563 int i; 564 565 save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 566 save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); 567 568 /* disable VGA render */ 569 tmp = RREG32(mmVGA_RENDER_CONTROL); 570 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 571 WREG32(mmVGA_RENDER_CONTROL, tmp); 572 573 /* blank the display controllers */ 574 for (i = 0; i < adev->mode_info.num_crtc; i++) { 575 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), 576 CRTC_CONTROL, CRTC_MASTER_EN); 577 if (crtc_enabled) { 578 #if 0 579 u32 frame_count; 580 int j; 581 582 save->crtc_enabled[i] = true; 583 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); 584 if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { 585 amdgpu_display_vblank_wait(adev, i); 586 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 587 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); 588 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 589 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 590 } 591 /* wait for the next frame */ 592 frame_count = amdgpu_display_vblank_get_counter(adev, i); 593 for (j = 0; j < adev->usec_timeout; j++) { 594 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) 595 break; 596 udelay(1); 597 } 598 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 599 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { 600 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); 601 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); 602 } 603 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); 604 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { 605 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); 606 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); 607 } 608 #else 609 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ 610 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 611 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 612 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); 613 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); 614 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 615 save->crtc_enabled[i] = false; 616 /* ***** */ 617 #endif 618 } else { 619 save->crtc_enabled[i] = false; 620 } 621 } 622 } 623 624 static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev, 625 struct amdgpu_mode_mc_save *save) 626 { 627 u32 tmp, frame_count; 628 int i, j; 629 630 /* update crtc base addresses */ 631 for (i = 0; i < adev->mode_info.num_crtc; i++) { 632 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 633 upper_32_bits(adev->mc.vram_start)); 634 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 635 upper_32_bits(adev->mc.vram_start)); 636 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], 637 (u32)adev->mc.vram_start); 638 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], 639 (u32)adev->mc.vram_start); 640 641 if (save->crtc_enabled[i]) { 642 tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); 643 if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { 644 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); 645 WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); 646 } 647 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 648 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { 649 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); 650 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); 651 } 652 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); 653 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { 654 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); 655 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); 656 } 657 for (j = 0; j < adev->usec_timeout; j++) { 658 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); 659 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) 660 break; 661 udelay(1); 662 } 663 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); 664 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); 665 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 666 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 667 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 668 /* wait for the next frame */ 669 frame_count = amdgpu_display_vblank_get_counter(adev, i); 670 for (j = 0; j < adev->usec_timeout; j++) { 671 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) 672 break; 673 udelay(1); 674 } 675 } 676 } 677 678 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); 679 WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); 680 681 /* Unlock vga access */ 682 WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); 683 mdelay(1); 684 WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); 685 } 686 687 static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev, 688 bool render) 689 { 690 u32 tmp; 691 692 /* Lockout access through VGA aperture*/ 693 tmp = RREG32(mmVGA_HDP_CONTROL); 694 if (render) 695 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); 696 else 697 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 698 WREG32(mmVGA_HDP_CONTROL, tmp); 699 700 /* disable VGA render */ 701 tmp = RREG32(mmVGA_RENDER_CONTROL); 702 if (render) 703 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); 704 else 705 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 706 WREG32(mmVGA_RENDER_CONTROL, tmp); 707 } 708 709 static void dce_v10_0_program_fmt(struct drm_encoder *encoder) 710 { 711 struct drm_device *dev = encoder->dev; 712 struct amdgpu_device *adev = dev->dev_private; 713 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 714 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 715 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 716 int bpc = 0; 717 u32 tmp = 0; 718 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; 719 720 if (connector) { 721 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 722 bpc = amdgpu_connector_get_monitor_bpc(connector); 723 dither = amdgpu_connector->dither; 724 } 725 726 /* LVDS/eDP FMT is set up by atom */ 727 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) 728 return; 729 730 /* not needed for analog */ 731 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || 732 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) 733 return; 734 735 if (bpc == 0) 736 return; 737 738 switch (bpc) { 739 case 6: 740 if (dither == AMDGPU_FMT_DITHER_ENABLE) { 741 /* XXX sort out optimal dither settings */ 742 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); 743 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); 744 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); 745 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0); 746 } else { 747 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); 748 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0); 749 } 750 break; 751 case 8: 752 if (dither == AMDGPU_FMT_DITHER_ENABLE) { 753 /* XXX sort out optimal dither settings */ 754 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); 755 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); 756 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); 757 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); 758 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1); 759 } else { 760 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); 761 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1); 762 } 763 break; 764 case 10: 765 if (dither == AMDGPU_FMT_DITHER_ENABLE) { 766 /* XXX sort out optimal dither settings */ 767 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); 768 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); 769 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); 770 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); 771 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2); 772 } else { 773 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); 774 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2); 775 } 776 break; 777 default: 778 /* not needed */ 779 break; 780 } 781 782 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 783 } 784 785 786 /* display watermark setup */ 787 /** 788 * dce_v10_0_line_buffer_adjust - Set up the line buffer 789 * 790 * @adev: amdgpu_device pointer 791 * @amdgpu_crtc: the selected display controller 792 * @mode: the current display mode on the selected display 793 * controller 794 * 795 * Setup up the line buffer allocation for 796 * the selected display controller (CIK). 797 * Returns the line buffer size in pixels. 798 */ 799 static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev, 800 struct amdgpu_crtc *amdgpu_crtc, 801 struct drm_display_mode *mode) 802 { 803 u32 tmp, buffer_alloc, i, mem_cfg; 804 u32 pipe_offset = amdgpu_crtc->crtc_id; 805 /* 806 * Line Buffer Setup 807 * There are 6 line buffers, one for each display controllers. 808 * There are 3 partitions per LB. Select the number of partitions 809 * to enable based on the display width. For display widths larger 810 * than 4096, you need use to use 2 display controllers and combine 811 * them using the stereo blender. 812 */ 813 if (amdgpu_crtc->base.enabled && mode) { 814 if (mode->crtc_hdisplay < 1920) { 815 mem_cfg = 1; 816 buffer_alloc = 2; 817 } else if (mode->crtc_hdisplay < 2560) { 818 mem_cfg = 2; 819 buffer_alloc = 2; 820 } else if (mode->crtc_hdisplay < 4096) { 821 mem_cfg = 0; 822 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 823 } else { 824 DRM_DEBUG_KMS("Mode too big for LB!\n"); 825 mem_cfg = 0; 826 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 827 } 828 } else { 829 mem_cfg = 1; 830 buffer_alloc = 0; 831 } 832 833 tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset); 834 tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg); 835 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp); 836 837 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); 838 tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc); 839 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp); 840 841 for (i = 0; i < adev->usec_timeout; i++) { 842 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); 843 if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED)) 844 break; 845 udelay(1); 846 } 847 848 if (amdgpu_crtc->base.enabled && mode) { 849 switch (mem_cfg) { 850 case 0: 851 default: 852 return 4096 * 2; 853 case 1: 854 return 1920 * 2; 855 case 2: 856 return 2560 * 2; 857 } 858 } 859 860 /* controller not enabled, so no lb used */ 861 return 0; 862 } 863 864 /** 865 * cik_get_number_of_dram_channels - get the number of dram channels 866 * 867 * @adev: amdgpu_device pointer 868 * 869 * Look up the number of video ram channels (CIK). 870 * Used for display watermark bandwidth calculations 871 * Returns the number of dram channels 872 */ 873 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) 874 { 875 u32 tmp = RREG32(mmMC_SHARED_CHMAP); 876 877 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { 878 case 0: 879 default: 880 return 1; 881 case 1: 882 return 2; 883 case 2: 884 return 4; 885 case 3: 886 return 8; 887 case 4: 888 return 3; 889 case 5: 890 return 6; 891 case 6: 892 return 10; 893 case 7: 894 return 12; 895 case 8: 896 return 16; 897 } 898 } 899 900 struct dce10_wm_params { 901 u32 dram_channels; /* number of dram channels */ 902 u32 yclk; /* bandwidth per dram data pin in kHz */ 903 u32 sclk; /* engine clock in kHz */ 904 u32 disp_clk; /* display clock in kHz */ 905 u32 src_width; /* viewport width */ 906 u32 active_time; /* active display time in ns */ 907 u32 blank_time; /* blank time in ns */ 908 bool interlaced; /* mode is interlaced */ 909 fixed20_12 vsc; /* vertical scale ratio */ 910 u32 num_heads; /* number of active crtcs */ 911 u32 bytes_per_pixel; /* bytes per pixel display + overlay */ 912 u32 lb_size; /* line buffer allocated to pipe */ 913 u32 vtaps; /* vertical scaler taps */ 914 }; 915 916 /** 917 * dce_v10_0_dram_bandwidth - get the dram bandwidth 918 * 919 * @wm: watermark calculation data 920 * 921 * Calculate the raw dram bandwidth (CIK). 922 * Used for display watermark bandwidth calculations 923 * Returns the dram bandwidth in MBytes/s 924 */ 925 static u32 dce_v10_0_dram_bandwidth(struct dce10_wm_params *wm) 926 { 927 /* Calculate raw DRAM Bandwidth */ 928 fixed20_12 dram_efficiency; /* 0.7 */ 929 fixed20_12 yclk, dram_channels, bandwidth; 930 fixed20_12 a; 931 932 a.full = dfixed_const(1000); 933 yclk.full = dfixed_const(wm->yclk); 934 yclk.full = dfixed_div(yclk, a); 935 dram_channels.full = dfixed_const(wm->dram_channels * 4); 936 a.full = dfixed_const(10); 937 dram_efficiency.full = dfixed_const(7); 938 dram_efficiency.full = dfixed_div(dram_efficiency, a); 939 bandwidth.full = dfixed_mul(dram_channels, yclk); 940 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); 941 942 return dfixed_trunc(bandwidth); 943 } 944 945 /** 946 * dce_v10_0_dram_bandwidth_for_display - get the dram bandwidth for display 947 * 948 * @wm: watermark calculation data 949 * 950 * Calculate the dram bandwidth used for display (CIK). 951 * Used for display watermark bandwidth calculations 952 * Returns the dram bandwidth for display in MBytes/s 953 */ 954 static u32 dce_v10_0_dram_bandwidth_for_display(struct dce10_wm_params *wm) 955 { 956 /* Calculate DRAM Bandwidth and the part allocated to display. */ 957 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ 958 fixed20_12 yclk, dram_channels, bandwidth; 959 fixed20_12 a; 960 961 a.full = dfixed_const(1000); 962 yclk.full = dfixed_const(wm->yclk); 963 yclk.full = dfixed_div(yclk, a); 964 dram_channels.full = dfixed_const(wm->dram_channels * 4); 965 a.full = dfixed_const(10); 966 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ 967 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); 968 bandwidth.full = dfixed_mul(dram_channels, yclk); 969 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); 970 971 return dfixed_trunc(bandwidth); 972 } 973 974 /** 975 * dce_v10_0_data_return_bandwidth - get the data return bandwidth 976 * 977 * @wm: watermark calculation data 978 * 979 * Calculate the data return bandwidth used for display (CIK). 980 * Used for display watermark bandwidth calculations 981 * Returns the data return bandwidth in MBytes/s 982 */ 983 static u32 dce_v10_0_data_return_bandwidth(struct dce10_wm_params *wm) 984 { 985 /* Calculate the display Data return Bandwidth */ 986 fixed20_12 return_efficiency; /* 0.8 */ 987 fixed20_12 sclk, bandwidth; 988 fixed20_12 a; 989 990 a.full = dfixed_const(1000); 991 sclk.full = dfixed_const(wm->sclk); 992 sclk.full = dfixed_div(sclk, a); 993 a.full = dfixed_const(10); 994 return_efficiency.full = dfixed_const(8); 995 return_efficiency.full = dfixed_div(return_efficiency, a); 996 a.full = dfixed_const(32); 997 bandwidth.full = dfixed_mul(a, sclk); 998 bandwidth.full = dfixed_mul(bandwidth, return_efficiency); 999 1000 return dfixed_trunc(bandwidth); 1001 } 1002 1003 /** 1004 * dce_v10_0_dmif_request_bandwidth - get the dmif bandwidth 1005 * 1006 * @wm: watermark calculation data 1007 * 1008 * Calculate the dmif bandwidth used for display (CIK). 1009 * Used for display watermark bandwidth calculations 1010 * Returns the dmif bandwidth in MBytes/s 1011 */ 1012 static u32 dce_v10_0_dmif_request_bandwidth(struct dce10_wm_params *wm) 1013 { 1014 /* Calculate the DMIF Request Bandwidth */ 1015 fixed20_12 disp_clk_request_efficiency; /* 0.8 */ 1016 fixed20_12 disp_clk, bandwidth; 1017 fixed20_12 a, b; 1018 1019 a.full = dfixed_const(1000); 1020 disp_clk.full = dfixed_const(wm->disp_clk); 1021 disp_clk.full = dfixed_div(disp_clk, a); 1022 a.full = dfixed_const(32); 1023 b.full = dfixed_mul(a, disp_clk); 1024 1025 a.full = dfixed_const(10); 1026 disp_clk_request_efficiency.full = dfixed_const(8); 1027 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); 1028 1029 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); 1030 1031 return dfixed_trunc(bandwidth); 1032 } 1033 1034 /** 1035 * dce_v10_0_available_bandwidth - get the min available bandwidth 1036 * 1037 * @wm: watermark calculation data 1038 * 1039 * Calculate the min available bandwidth used for display (CIK). 1040 * Used for display watermark bandwidth calculations 1041 * Returns the min available bandwidth in MBytes/s 1042 */ 1043 static u32 dce_v10_0_available_bandwidth(struct dce10_wm_params *wm) 1044 { 1045 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ 1046 u32 dram_bandwidth = dce_v10_0_dram_bandwidth(wm); 1047 u32 data_return_bandwidth = dce_v10_0_data_return_bandwidth(wm); 1048 u32 dmif_req_bandwidth = dce_v10_0_dmif_request_bandwidth(wm); 1049 1050 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); 1051 } 1052 1053 /** 1054 * dce_v10_0_average_bandwidth - get the average available bandwidth 1055 * 1056 * @wm: watermark calculation data 1057 * 1058 * Calculate the average available bandwidth used for display (CIK). 1059 * Used for display watermark bandwidth calculations 1060 * Returns the average available bandwidth in MBytes/s 1061 */ 1062 static u32 dce_v10_0_average_bandwidth(struct dce10_wm_params *wm) 1063 { 1064 /* Calculate the display mode Average Bandwidth 1065 * DisplayMode should contain the source and destination dimensions, 1066 * timing, etc. 1067 */ 1068 fixed20_12 bpp; 1069 fixed20_12 line_time; 1070 fixed20_12 src_width; 1071 fixed20_12 bandwidth; 1072 fixed20_12 a; 1073 1074 a.full = dfixed_const(1000); 1075 line_time.full = dfixed_const(wm->active_time + wm->blank_time); 1076 line_time.full = dfixed_div(line_time, a); 1077 bpp.full = dfixed_const(wm->bytes_per_pixel); 1078 src_width.full = dfixed_const(wm->src_width); 1079 bandwidth.full = dfixed_mul(src_width, bpp); 1080 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); 1081 bandwidth.full = dfixed_div(bandwidth, line_time); 1082 1083 return dfixed_trunc(bandwidth); 1084 } 1085 1086 /** 1087 * dce_v10_0_latency_watermark - get the latency watermark 1088 * 1089 * @wm: watermark calculation data 1090 * 1091 * Calculate the latency watermark (CIK). 1092 * Used for display watermark bandwidth calculations 1093 * Returns the latency watermark in ns 1094 */ 1095 static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm) 1096 { 1097 /* First calculate the latency in ns */ 1098 u32 mc_latency = 2000; /* 2000 ns. */ 1099 u32 available_bandwidth = dce_v10_0_available_bandwidth(wm); 1100 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; 1101 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; 1102 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ 1103 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + 1104 (wm->num_heads * cursor_line_pair_return_time); 1105 u32 latency = mc_latency + other_heads_data_return_time + dc_latency; 1106 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; 1107 u32 tmp, dmif_size = 12288; 1108 fixed20_12 a, b, c; 1109 1110 if (wm->num_heads == 0) 1111 return 0; 1112 1113 a.full = dfixed_const(2); 1114 b.full = dfixed_const(1); 1115 if ((wm->vsc.full > a.full) || 1116 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || 1117 (wm->vtaps >= 5) || 1118 ((wm->vsc.full >= a.full) && wm->interlaced)) 1119 max_src_lines_per_dst_line = 4; 1120 else 1121 max_src_lines_per_dst_line = 2; 1122 1123 a.full = dfixed_const(available_bandwidth); 1124 b.full = dfixed_const(wm->num_heads); 1125 a.full = dfixed_div(a, b); 1126 1127 b.full = dfixed_const(mc_latency + 512); 1128 c.full = dfixed_const(wm->disp_clk); 1129 b.full = dfixed_div(b, c); 1130 1131 c.full = dfixed_const(dmif_size); 1132 b.full = dfixed_div(c, b); 1133 1134 tmp = min(dfixed_trunc(a), dfixed_trunc(b)); 1135 1136 b.full = dfixed_const(1000); 1137 c.full = dfixed_const(wm->disp_clk); 1138 b.full = dfixed_div(c, b); 1139 c.full = dfixed_const(wm->bytes_per_pixel); 1140 b.full = dfixed_mul(b, c); 1141 1142 lb_fill_bw = min(tmp, dfixed_trunc(b)); 1143 1144 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 1145 b.full = dfixed_const(1000); 1146 c.full = dfixed_const(lb_fill_bw); 1147 b.full = dfixed_div(c, b); 1148 a.full = dfixed_div(a, b); 1149 line_fill_time = dfixed_trunc(a); 1150 1151 if (line_fill_time < wm->active_time) 1152 return latency; 1153 else 1154 return latency + (line_fill_time - wm->active_time); 1155 1156 } 1157 1158 /** 1159 * dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display - check 1160 * average and available dram bandwidth 1161 * 1162 * @wm: watermark calculation data 1163 * 1164 * Check if the display average bandwidth fits in the display 1165 * dram bandwidth (CIK). 1166 * Used for display watermark bandwidth calculations 1167 * Returns true if the display fits, false if not. 1168 */ 1169 static bool dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm) 1170 { 1171 if (dce_v10_0_average_bandwidth(wm) <= 1172 (dce_v10_0_dram_bandwidth_for_display(wm) / wm->num_heads)) 1173 return true; 1174 else 1175 return false; 1176 } 1177 1178 /** 1179 * dce_v10_0_average_bandwidth_vs_available_bandwidth - check 1180 * average and available bandwidth 1181 * 1182 * @wm: watermark calculation data 1183 * 1184 * Check if the display average bandwidth fits in the display 1185 * available bandwidth (CIK). 1186 * Used for display watermark bandwidth calculations 1187 * Returns true if the display fits, false if not. 1188 */ 1189 static bool dce_v10_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm) 1190 { 1191 if (dce_v10_0_average_bandwidth(wm) <= 1192 (dce_v10_0_available_bandwidth(wm) / wm->num_heads)) 1193 return true; 1194 else 1195 return false; 1196 } 1197 1198 /** 1199 * dce_v10_0_check_latency_hiding - check latency hiding 1200 * 1201 * @wm: watermark calculation data 1202 * 1203 * Check latency hiding (CIK). 1204 * Used for display watermark bandwidth calculations 1205 * Returns true if the display fits, false if not. 1206 */ 1207 static bool dce_v10_0_check_latency_hiding(struct dce10_wm_params *wm) 1208 { 1209 u32 lb_partitions = wm->lb_size / wm->src_width; 1210 u32 line_time = wm->active_time + wm->blank_time; 1211 u32 latency_tolerant_lines; 1212 u32 latency_hiding; 1213 fixed20_12 a; 1214 1215 a.full = dfixed_const(1); 1216 if (wm->vsc.full > a.full) 1217 latency_tolerant_lines = 1; 1218 else { 1219 if (lb_partitions <= (wm->vtaps + 1)) 1220 latency_tolerant_lines = 1; 1221 else 1222 latency_tolerant_lines = 2; 1223 } 1224 1225 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); 1226 1227 if (dce_v10_0_latency_watermark(wm) <= latency_hiding) 1228 return true; 1229 else 1230 return false; 1231 } 1232 1233 /** 1234 * dce_v10_0_program_watermarks - program display watermarks 1235 * 1236 * @adev: amdgpu_device pointer 1237 * @amdgpu_crtc: the selected display controller 1238 * @lb_size: line buffer size 1239 * @num_heads: number of display controllers in use 1240 * 1241 * Calculate and program the display watermarks for the 1242 * selected display controller (CIK). 1243 */ 1244 static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, 1245 struct amdgpu_crtc *amdgpu_crtc, 1246 u32 lb_size, u32 num_heads) 1247 { 1248 struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 1249 struct dce10_wm_params wm_low, wm_high; 1250 u32 pixel_period; 1251 u32 line_time = 0; 1252 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1253 u32 tmp, wm_mask; 1254 1255 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1256 pixel_period = 1000000 / (u32)mode->clock; 1257 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); 1258 1259 /* watermark for high clocks */ 1260 if (adev->pm.dpm_enabled) { 1261 wm_high.yclk = 1262 amdgpu_dpm_get_mclk(adev, false) * 10; 1263 wm_high.sclk = 1264 amdgpu_dpm_get_sclk(adev, false) * 10; 1265 } else { 1266 wm_high.yclk = adev->pm.current_mclk * 10; 1267 wm_high.sclk = adev->pm.current_sclk * 10; 1268 } 1269 1270 wm_high.disp_clk = mode->clock; 1271 wm_high.src_width = mode->crtc_hdisplay; 1272 wm_high.active_time = mode->crtc_hdisplay * pixel_period; 1273 wm_high.blank_time = line_time - wm_high.active_time; 1274 wm_high.interlaced = false; 1275 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1276 wm_high.interlaced = true; 1277 wm_high.vsc = amdgpu_crtc->vsc; 1278 wm_high.vtaps = 1; 1279 if (amdgpu_crtc->rmx_type != RMX_OFF) 1280 wm_high.vtaps = 2; 1281 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1282 wm_high.lb_size = lb_size; 1283 wm_high.dram_channels = cik_get_number_of_dram_channels(adev); 1284 wm_high.num_heads = num_heads; 1285 1286 /* set for high clocks */ 1287 latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535); 1288 1289 /* possibly force display priority to high */ 1290 /* should really do this at mode validation time... */ 1291 if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || 1292 !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_high) || 1293 !dce_v10_0_check_latency_hiding(&wm_high) || 1294 (adev->mode_info.disp_priority == 2)) { 1295 DRM_DEBUG_KMS("force priority to high\n"); 1296 } 1297 1298 /* watermark for low clocks */ 1299 if (adev->pm.dpm_enabled) { 1300 wm_low.yclk = 1301 amdgpu_dpm_get_mclk(adev, true) * 10; 1302 wm_low.sclk = 1303 amdgpu_dpm_get_sclk(adev, true) * 10; 1304 } else { 1305 wm_low.yclk = adev->pm.current_mclk * 10; 1306 wm_low.sclk = adev->pm.current_sclk * 10; 1307 } 1308 1309 wm_low.disp_clk = mode->clock; 1310 wm_low.src_width = mode->crtc_hdisplay; 1311 wm_low.active_time = mode->crtc_hdisplay * pixel_period; 1312 wm_low.blank_time = line_time - wm_low.active_time; 1313 wm_low.interlaced = false; 1314 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1315 wm_low.interlaced = true; 1316 wm_low.vsc = amdgpu_crtc->vsc; 1317 wm_low.vtaps = 1; 1318 if (amdgpu_crtc->rmx_type != RMX_OFF) 1319 wm_low.vtaps = 2; 1320 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1321 wm_low.lb_size = lb_size; 1322 wm_low.dram_channels = cik_get_number_of_dram_channels(adev); 1323 wm_low.num_heads = num_heads; 1324 1325 /* set for low clocks */ 1326 latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535); 1327 1328 /* possibly force display priority to high */ 1329 /* should really do this at mode validation time... */ 1330 if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || 1331 !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_low) || 1332 !dce_v10_0_check_latency_hiding(&wm_low) || 1333 (adev->mode_info.disp_priority == 2)) { 1334 DRM_DEBUG_KMS("force priority to high\n"); 1335 } 1336 } 1337 1338 /* select wm A */ 1339 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); 1340 tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1); 1341 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1342 tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); 1343 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); 1344 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); 1345 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1346 /* select wm B */ 1347 tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2); 1348 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1349 tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); 1350 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b); 1351 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); 1352 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1353 /* restore original selection */ 1354 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); 1355 1356 /* save values for DPM */ 1357 amdgpu_crtc->line_time = line_time; 1358 amdgpu_crtc->wm_high = latency_watermark_a; 1359 amdgpu_crtc->wm_low = latency_watermark_b; 1360 } 1361 1362 /** 1363 * dce_v10_0_bandwidth_update - program display watermarks 1364 * 1365 * @adev: amdgpu_device pointer 1366 * 1367 * Calculate and program the display watermarks and line 1368 * buffer allocation (CIK). 1369 */ 1370 static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev) 1371 { 1372 struct drm_display_mode *mode = NULL; 1373 u32 num_heads = 0, lb_size; 1374 int i; 1375 1376 amdgpu_update_display_priority(adev); 1377 1378 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1379 if (adev->mode_info.crtcs[i]->base.enabled) 1380 num_heads++; 1381 } 1382 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1383 mode = &adev->mode_info.crtcs[i]->base.mode; 1384 lb_size = dce_v10_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); 1385 dce_v10_0_program_watermarks(adev, adev->mode_info.crtcs[i], 1386 lb_size, num_heads); 1387 } 1388 } 1389 1390 static void dce_v10_0_audio_get_connected_pins(struct amdgpu_device *adev) 1391 { 1392 int i; 1393 u32 offset, tmp; 1394 1395 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1396 offset = adev->mode_info.audio.pin[i].offset; 1397 tmp = RREG32_AUDIO_ENDPT(offset, 1398 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); 1399 if (((tmp & 1400 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> 1401 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) 1402 adev->mode_info.audio.pin[i].connected = false; 1403 else 1404 adev->mode_info.audio.pin[i].connected = true; 1405 } 1406 } 1407 1408 static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *adev) 1409 { 1410 int i; 1411 1412 dce_v10_0_audio_get_connected_pins(adev); 1413 1414 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1415 if (adev->mode_info.audio.pin[i].connected) 1416 return &adev->mode_info.audio.pin[i]; 1417 } 1418 DRM_ERROR("No connected audio pins found!\n"); 1419 return NULL; 1420 } 1421 1422 static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder) 1423 { 1424 struct amdgpu_device *adev = encoder->dev->dev_private; 1425 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1426 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1427 u32 tmp; 1428 1429 if (!dig || !dig->afmt || !dig->afmt->pin) 1430 return; 1431 1432 tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset); 1433 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id); 1434 WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp); 1435 } 1436 1437 static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, 1438 struct drm_display_mode *mode) 1439 { 1440 struct amdgpu_device *adev = encoder->dev->dev_private; 1441 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1442 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1443 struct drm_connector *connector; 1444 struct amdgpu_connector *amdgpu_connector = NULL; 1445 u32 tmp; 1446 int interlace = 0; 1447 1448 if (!dig || !dig->afmt || !dig->afmt->pin) 1449 return; 1450 1451 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1452 if (connector->encoder == encoder) { 1453 amdgpu_connector = to_amdgpu_connector(connector); 1454 break; 1455 } 1456 } 1457 1458 if (!amdgpu_connector) { 1459 DRM_ERROR("Couldn't find encoder's connector\n"); 1460 return; 1461 } 1462 1463 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1464 interlace = 1; 1465 if (connector->latency_present[interlace]) { 1466 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1467 VIDEO_LIPSYNC, connector->video_latency[interlace]); 1468 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1469 AUDIO_LIPSYNC, connector->audio_latency[interlace]); 1470 } else { 1471 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1472 VIDEO_LIPSYNC, 0); 1473 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1474 AUDIO_LIPSYNC, 0); 1475 } 1476 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1477 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 1478 } 1479 1480 static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder) 1481 { 1482 struct amdgpu_device *adev = encoder->dev->dev_private; 1483 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1484 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1485 struct drm_connector *connector; 1486 struct amdgpu_connector *amdgpu_connector = NULL; 1487 u32 tmp; 1488 u8 *sadb = NULL; 1489 int sad_count; 1490 1491 if (!dig || !dig->afmt || !dig->afmt->pin) 1492 return; 1493 1494 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1495 if (connector->encoder == encoder) { 1496 amdgpu_connector = to_amdgpu_connector(connector); 1497 break; 1498 } 1499 } 1500 1501 if (!amdgpu_connector) { 1502 DRM_ERROR("Couldn't find encoder's connector\n"); 1503 return; 1504 } 1505 1506 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb); 1507 if (sad_count < 0) { 1508 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 1509 sad_count = 0; 1510 } 1511 1512 /* program the speaker allocation */ 1513 tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1514 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 1515 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1516 DP_CONNECTION, 0); 1517 /* set HDMI mode */ 1518 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1519 HDMI_CONNECTION, 1); 1520 if (sad_count) 1521 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1522 SPEAKER_ALLOCATION, sadb[0]); 1523 else 1524 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1525 SPEAKER_ALLOCATION, 5); /* stereo */ 1526 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1527 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 1528 1529 kfree(sadb); 1530 } 1531 1532 static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) 1533 { 1534 struct amdgpu_device *adev = encoder->dev->dev_private; 1535 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1536 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1537 struct drm_connector *connector; 1538 struct amdgpu_connector *amdgpu_connector = NULL; 1539 struct cea_sad *sads; 1540 int i, sad_count; 1541 1542 static const u16 eld_reg_to_type[][2] = { 1543 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, 1544 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, 1545 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, 1546 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, 1547 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, 1548 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, 1549 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, 1550 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, 1551 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, 1552 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, 1553 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, 1554 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 1555 }; 1556 1557 if (!dig || !dig->afmt || !dig->afmt->pin) 1558 return; 1559 1560 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1561 if (connector->encoder == encoder) { 1562 amdgpu_connector = to_amdgpu_connector(connector); 1563 break; 1564 } 1565 } 1566 1567 if (!amdgpu_connector) { 1568 DRM_ERROR("Couldn't find encoder's connector\n"); 1569 return; 1570 } 1571 1572 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); 1573 if (sad_count <= 0) { 1574 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 1575 return; 1576 } 1577 BUG_ON(!sads); 1578 1579 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 1580 u32 tmp = 0; 1581 u8 stereo_freqs = 0; 1582 int max_channels = -1; 1583 int j; 1584 1585 for (j = 0; j < sad_count; j++) { 1586 struct cea_sad *sad = &sads[j]; 1587 1588 if (sad->format == eld_reg_to_type[i][1]) { 1589 if (sad->channels > max_channels) { 1590 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1591 MAX_CHANNELS, sad->channels); 1592 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1593 DESCRIPTOR_BYTE_2, sad->byte2); 1594 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1595 SUPPORTED_FREQUENCIES, sad->freq); 1596 max_channels = sad->channels; 1597 } 1598 1599 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 1600 stereo_freqs |= sad->freq; 1601 else 1602 break; 1603 } 1604 } 1605 1606 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1607 SUPPORTED_FREQUENCIES_STEREO, stereo_freqs); 1608 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp); 1609 } 1610 1611 kfree(sads); 1612 } 1613 1614 static void dce_v10_0_audio_enable(struct amdgpu_device *adev, 1615 struct amdgpu_audio_pin *pin, 1616 bool enable) 1617 { 1618 if (!pin) 1619 return; 1620 1621 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 1622 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); 1623 } 1624 1625 static const u32 pin_offsets[] = 1626 { 1627 AUD0_REGISTER_OFFSET, 1628 AUD1_REGISTER_OFFSET, 1629 AUD2_REGISTER_OFFSET, 1630 AUD3_REGISTER_OFFSET, 1631 AUD4_REGISTER_OFFSET, 1632 AUD5_REGISTER_OFFSET, 1633 AUD6_REGISTER_OFFSET, 1634 }; 1635 1636 static int dce_v10_0_audio_init(struct amdgpu_device *adev) 1637 { 1638 int i; 1639 1640 if (!amdgpu_audio) 1641 return 0; 1642 1643 adev->mode_info.audio.enabled = true; 1644 1645 adev->mode_info.audio.num_pins = 7; 1646 1647 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1648 adev->mode_info.audio.pin[i].channels = -1; 1649 adev->mode_info.audio.pin[i].rate = -1; 1650 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1651 adev->mode_info.audio.pin[i].status_bits = 0; 1652 adev->mode_info.audio.pin[i].category_code = 0; 1653 adev->mode_info.audio.pin[i].connected = false; 1654 adev->mode_info.audio.pin[i].offset = pin_offsets[i]; 1655 adev->mode_info.audio.pin[i].id = i; 1656 /* disable audio. it will be set up later */ 1657 /* XXX remove once we switch to ip funcs */ 1658 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1659 } 1660 1661 return 0; 1662 } 1663 1664 static void dce_v10_0_audio_fini(struct amdgpu_device *adev) 1665 { 1666 int i; 1667 1668 if (!adev->mode_info.audio.enabled) 1669 return; 1670 1671 for (i = 0; i < adev->mode_info.audio.num_pins; i++) 1672 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1673 1674 adev->mode_info.audio.enabled = false; 1675 } 1676 1677 /* 1678 * update the N and CTS parameters for a given pixel clock rate 1679 */ 1680 static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) 1681 { 1682 struct drm_device *dev = encoder->dev; 1683 struct amdgpu_device *adev = dev->dev_private; 1684 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); 1685 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1686 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1687 u32 tmp; 1688 1689 tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset); 1690 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz); 1691 WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp); 1692 tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset); 1693 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz); 1694 WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp); 1695 1696 tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset); 1697 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz); 1698 WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp); 1699 tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset); 1700 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz); 1701 WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp); 1702 1703 tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset); 1704 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz); 1705 WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp); 1706 tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset); 1707 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz); 1708 WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp); 1709 1710 } 1711 1712 /* 1713 * build a HDMI Video Info Frame 1714 */ 1715 static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, 1716 void *buffer, size_t size) 1717 { 1718 struct drm_device *dev = encoder->dev; 1719 struct amdgpu_device *adev = dev->dev_private; 1720 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1721 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1722 uint8_t *frame = buffer + 3; 1723 uint8_t *header = buffer; 1724 1725 WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset, 1726 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 1727 WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset, 1728 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); 1729 WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset, 1730 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); 1731 WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset, 1732 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); 1733 } 1734 1735 static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) 1736 { 1737 struct drm_device *dev = encoder->dev; 1738 struct amdgpu_device *adev = dev->dev_private; 1739 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1740 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1741 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1742 u32 dto_phase = 24 * 1000; 1743 u32 dto_modulo = clock; 1744 u32 tmp; 1745 1746 if (!dig || !dig->afmt) 1747 return; 1748 1749 /* XXX two dtos; generally use dto0 for hdmi */ 1750 /* Express [24MHz / target pixel clock] as an exact rational 1751 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 1752 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 1753 */ 1754 tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE); 1755 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, 1756 amdgpu_crtc->crtc_id); 1757 WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp); 1758 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); 1759 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); 1760 } 1761 1762 /* 1763 * update the info frames with the data from the current display mode 1764 */ 1765 static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder, 1766 struct drm_display_mode *mode) 1767 { 1768 struct drm_device *dev = encoder->dev; 1769 struct amdgpu_device *adev = dev->dev_private; 1770 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1771 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1772 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 1773 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 1774 struct hdmi_avi_infoframe frame; 1775 ssize_t err; 1776 u32 tmp; 1777 int bpc = 8; 1778 1779 if (!dig || !dig->afmt) 1780 return; 1781 1782 /* Silent, r600_hdmi_enable will raise WARN for us */ 1783 if (!dig->afmt->enabled) 1784 return; 1785 1786 /* hdmi deep color mode general control packets setup, if bpc > 8 */ 1787 if (encoder->crtc) { 1788 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1789 bpc = amdgpu_crtc->bpc; 1790 } 1791 1792 /* disable audio prior to setting up hw */ 1793 dig->afmt->pin = dce_v10_0_audio_get_pin(adev); 1794 dce_v10_0_audio_enable(adev, dig->afmt->pin, false); 1795 1796 dce_v10_0_audio_set_dto(encoder, mode->clock); 1797 1798 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); 1799 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); 1800 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */ 1801 1802 WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000); 1803 1804 tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset); 1805 switch (bpc) { 1806 case 0: 1807 case 6: 1808 case 8: 1809 case 16: 1810 default: 1811 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0); 1812 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); 1813 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", 1814 connector->name, bpc); 1815 break; 1816 case 10: 1817 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); 1818 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1); 1819 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", 1820 connector->name); 1821 break; 1822 case 12: 1823 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); 1824 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2); 1825 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", 1826 connector->name); 1827 break; 1828 } 1829 WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp); 1830 1831 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); 1832 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */ 1833 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */ 1834 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */ 1835 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); 1836 1837 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1838 /* enable audio info frames (frames won't be set until audio is enabled) */ 1839 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); 1840 /* required for audio info values to be updated */ 1841 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1); 1842 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1843 1844 tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset); 1845 /* required for audio info values to be updated */ 1846 tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); 1847 WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1848 1849 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1850 /* anything other than 0 */ 1851 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2); 1852 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1853 1854 WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */ 1855 1856 tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1857 /* set the default audio delay */ 1858 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); 1859 /* should be suffient for all audio modes and small enough for all hblanks */ 1860 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3); 1861 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1862 1863 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1864 /* allow 60958 channel status fields to be updated */ 1865 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); 1866 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1867 1868 tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset); 1869 if (bpc > 8) 1870 /* clear SW CTS value */ 1871 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0); 1872 else 1873 /* select SW CTS value */ 1874 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1); 1875 /* allow hw to sent ACR packets when required */ 1876 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1); 1877 WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp); 1878 1879 dce_v10_0_afmt_update_ACR(encoder, mode->clock); 1880 1881 tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset); 1882 tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1); 1883 WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp); 1884 1885 tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset); 1886 tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); 1887 WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp); 1888 1889 tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset); 1890 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3); 1891 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4); 1892 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5); 1893 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6); 1894 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7); 1895 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); 1896 WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp); 1897 1898 dce_v10_0_audio_write_speaker_allocation(encoder); 1899 1900 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, 1901 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); 1902 1903 dce_v10_0_afmt_audio_select_pin(encoder); 1904 dce_v10_0_audio_write_sad_regs(encoder); 1905 dce_v10_0_audio_write_latency_fields(encoder, mode); 1906 1907 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 1908 if (err < 0) { 1909 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1910 return; 1911 } 1912 1913 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); 1914 if (err < 0) { 1915 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); 1916 return; 1917 } 1918 1919 dce_v10_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); 1920 1921 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1922 /* enable AVI info frames */ 1923 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1); 1924 /* required for audio info values to be updated */ 1925 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1); 1926 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1927 1928 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1929 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2); 1930 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1931 1932 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1933 /* send audio packets */ 1934 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); 1935 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1936 1937 WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF); 1938 WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF); 1939 WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001); 1940 WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001); 1941 1942 /* enable audio after to setting up hw */ 1943 dce_v10_0_audio_enable(adev, dig->afmt->pin, true); 1944 } 1945 1946 static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable) 1947 { 1948 struct drm_device *dev = encoder->dev; 1949 struct amdgpu_device *adev = dev->dev_private; 1950 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1951 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1952 1953 if (!dig || !dig->afmt) 1954 return; 1955 1956 /* Silent, r600_hdmi_enable will raise WARN for us */ 1957 if (enable && dig->afmt->enabled) 1958 return; 1959 if (!enable && !dig->afmt->enabled) 1960 return; 1961 1962 if (!enable && dig->afmt->pin) { 1963 dce_v10_0_audio_enable(adev, dig->afmt->pin, false); 1964 dig->afmt->pin = NULL; 1965 } 1966 1967 dig->afmt->enabled = enable; 1968 1969 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", 1970 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); 1971 } 1972 1973 static void dce_v10_0_afmt_init(struct amdgpu_device *adev) 1974 { 1975 int i; 1976 1977 for (i = 0; i < adev->mode_info.num_dig; i++) 1978 adev->mode_info.afmt[i] = NULL; 1979 1980 /* DCE10 has audio blocks tied to DIG encoders */ 1981 for (i = 0; i < adev->mode_info.num_dig; i++) { 1982 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); 1983 if (adev->mode_info.afmt[i]) { 1984 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1985 adev->mode_info.afmt[i]->id = i; 1986 } 1987 } 1988 } 1989 1990 static void dce_v10_0_afmt_fini(struct amdgpu_device *adev) 1991 { 1992 int i; 1993 1994 for (i = 0; i < adev->mode_info.num_dig; i++) { 1995 kfree(adev->mode_info.afmt[i]); 1996 adev->mode_info.afmt[i] = NULL; 1997 } 1998 } 1999 2000 static const u32 vga_control_regs[6] = 2001 { 2002 mmD1VGA_CONTROL, 2003 mmD2VGA_CONTROL, 2004 mmD3VGA_CONTROL, 2005 mmD4VGA_CONTROL, 2006 mmD5VGA_CONTROL, 2007 mmD6VGA_CONTROL, 2008 }; 2009 2010 static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable) 2011 { 2012 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2013 struct drm_device *dev = crtc->dev; 2014 struct amdgpu_device *adev = dev->dev_private; 2015 u32 vga_control; 2016 2017 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; 2018 if (enable) 2019 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); 2020 else 2021 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); 2022 } 2023 2024 static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable) 2025 { 2026 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2027 struct drm_device *dev = crtc->dev; 2028 struct amdgpu_device *adev = dev->dev_private; 2029 2030 if (enable) 2031 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); 2032 else 2033 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); 2034 } 2035 2036 static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, 2037 struct drm_framebuffer *fb, 2038 int x, int y, int atomic) 2039 { 2040 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2041 struct drm_device *dev = crtc->dev; 2042 struct amdgpu_device *adev = dev->dev_private; 2043 struct amdgpu_framebuffer *amdgpu_fb; 2044 struct drm_framebuffer *target_fb; 2045 struct drm_gem_object *obj; 2046 struct amdgpu_bo *rbo; 2047 uint64_t fb_location, tiling_flags; 2048 uint32_t fb_format, fb_pitch_pixels; 2049 u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); 2050 u32 pipe_config; 2051 u32 tmp, viewport_w, viewport_h; 2052 int r; 2053 bool bypass_lut = false; 2054 2055 /* no fb bound */ 2056 if (!atomic && !crtc->primary->fb) { 2057 DRM_DEBUG_KMS("No FB bound\n"); 2058 return 0; 2059 } 2060 2061 if (atomic) { 2062 amdgpu_fb = to_amdgpu_framebuffer(fb); 2063 target_fb = fb; 2064 } 2065 else { 2066 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 2067 target_fb = crtc->primary->fb; 2068 } 2069 2070 /* If atomic, assume fb object is pinned & idle & fenced and 2071 * just update base pointers 2072 */ 2073 obj = amdgpu_fb->obj; 2074 rbo = gem_to_amdgpu_bo(obj); 2075 r = amdgpu_bo_reserve(rbo, false); 2076 if (unlikely(r != 0)) 2077 return r; 2078 2079 if (atomic) 2080 fb_location = amdgpu_bo_gpu_offset(rbo); 2081 else { 2082 r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); 2083 if (unlikely(r != 0)) { 2084 amdgpu_bo_unreserve(rbo); 2085 return -EINVAL; 2086 } 2087 } 2088 2089 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); 2090 amdgpu_bo_unreserve(rbo); 2091 2092 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 2093 2094 switch (target_fb->pixel_format) { 2095 case DRM_FORMAT_C8: 2096 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); 2097 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 2098 break; 2099 case DRM_FORMAT_XRGB4444: 2100 case DRM_FORMAT_ARGB4444: 2101 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 2102 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2); 2103 #ifdef __BIG_ENDIAN 2104 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2105 ENDIAN_8IN16); 2106 #endif 2107 break; 2108 case DRM_FORMAT_XRGB1555: 2109 case DRM_FORMAT_ARGB1555: 2110 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 2111 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 2112 #ifdef __BIG_ENDIAN 2113 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2114 ENDIAN_8IN16); 2115 #endif 2116 break; 2117 case DRM_FORMAT_BGRX5551: 2118 case DRM_FORMAT_BGRA5551: 2119 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 2120 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5); 2121 #ifdef __BIG_ENDIAN 2122 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2123 ENDIAN_8IN16); 2124 #endif 2125 break; 2126 case DRM_FORMAT_RGB565: 2127 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 2128 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); 2129 #ifdef __BIG_ENDIAN 2130 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2131 ENDIAN_8IN16); 2132 #endif 2133 break; 2134 case DRM_FORMAT_XRGB8888: 2135 case DRM_FORMAT_ARGB8888: 2136 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 2137 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 2138 #ifdef __BIG_ENDIAN 2139 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2140 ENDIAN_8IN32); 2141 #endif 2142 break; 2143 case DRM_FORMAT_XRGB2101010: 2144 case DRM_FORMAT_ARGB2101010: 2145 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 2146 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); 2147 #ifdef __BIG_ENDIAN 2148 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2149 ENDIAN_8IN32); 2150 #endif 2151 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 2152 bypass_lut = true; 2153 break; 2154 case DRM_FORMAT_BGRX1010102: 2155 case DRM_FORMAT_BGRA1010102: 2156 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 2157 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4); 2158 #ifdef __BIG_ENDIAN 2159 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2160 ENDIAN_8IN32); 2161 #endif 2162 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 2163 bypass_lut = true; 2164 break; 2165 default: 2166 DRM_ERROR("Unsupported screen format %s\n", 2167 drm_get_format_name(target_fb->pixel_format)); 2168 return -EINVAL; 2169 } 2170 2171 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { 2172 unsigned bankw, bankh, mtaspect, tile_split, num_banks; 2173 2174 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 2175 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 2176 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 2177 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 2178 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 2179 2180 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks); 2181 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, 2182 ARRAY_2D_TILED_THIN1); 2183 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT, 2184 tile_split); 2185 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw); 2186 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh); 2187 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, 2188 mtaspect); 2189 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, 2190 ADDR_SURF_MICRO_TILING_DISPLAY); 2191 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { 2192 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, 2193 ARRAY_1D_TILED_THIN1); 2194 } 2195 2196 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG, 2197 pipe_config); 2198 2199 dce_v10_0_vga_enable(crtc, false); 2200 2201 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2202 upper_32_bits(fb_location)); 2203 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2204 upper_32_bits(fb_location)); 2205 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2206 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 2207 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2208 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); 2209 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 2210 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); 2211 2212 /* 2213 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT 2214 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to 2215 * retain the full precision throughout the pipeline. 2216 */ 2217 tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset); 2218 if (bypass_lut) 2219 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1); 2220 else 2221 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0); 2222 WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp); 2223 2224 if (bypass_lut) 2225 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); 2226 2227 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); 2228 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); 2229 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); 2230 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); 2231 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); 2232 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); 2233 2234 fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); 2235 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); 2236 2237 dce_v10_0_grph_enable(crtc, true); 2238 2239 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, 2240 target_fb->height); 2241 2242 x &= ~3; 2243 y &= ~1; 2244 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, 2245 (x << 16) | y); 2246 viewport_w = crtc->mode.hdisplay; 2247 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 2248 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, 2249 (viewport_w << 16) | viewport_h); 2250 2251 /* pageflip setup */ 2252 /* make sure flip is at vb rather than hb */ 2253 tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); 2254 tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, 2255 GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); 2256 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2257 2258 /* set pageflip to happen only at start of vblank interval (front porch) */ 2259 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); 2260 2261 if (!atomic && fb && fb != crtc->primary->fb) { 2262 amdgpu_fb = to_amdgpu_framebuffer(fb); 2263 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); 2264 r = amdgpu_bo_reserve(rbo, false); 2265 if (unlikely(r != 0)) 2266 return r; 2267 amdgpu_bo_unpin(rbo); 2268 amdgpu_bo_unreserve(rbo); 2269 } 2270 2271 /* Bytes per pixel may have changed */ 2272 dce_v10_0_bandwidth_update(adev); 2273 2274 return 0; 2275 } 2276 2277 static void dce_v10_0_set_interleave(struct drm_crtc *crtc, 2278 struct drm_display_mode *mode) 2279 { 2280 struct drm_device *dev = crtc->dev; 2281 struct amdgpu_device *adev = dev->dev_private; 2282 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2283 u32 tmp; 2284 2285 tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset); 2286 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 2287 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1); 2288 else 2289 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0); 2290 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp); 2291 } 2292 2293 static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc) 2294 { 2295 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2296 struct drm_device *dev = crtc->dev; 2297 struct amdgpu_device *adev = dev->dev_private; 2298 int i; 2299 u32 tmp; 2300 2301 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); 2302 2303 tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); 2304 tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0); 2305 tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_OVL_MODE, 0); 2306 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2307 2308 tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset); 2309 tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1); 2310 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2311 2312 tmp = RREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset); 2313 tmp = REG_SET_FIELD(tmp, PRESCALE_OVL_CONTROL, OVL_PRESCALE_BYPASS, 1); 2314 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2315 2316 tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset); 2317 tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0); 2318 tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, OVL_INPUT_GAMMA_MODE, 0); 2319 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2320 2321 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); 2322 2323 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); 2324 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); 2325 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); 2326 2327 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); 2328 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); 2329 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); 2330 2331 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); 2332 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); 2333 2334 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); 2335 for (i = 0; i < 256; i++) { 2336 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, 2337 (amdgpu_crtc->lut_r[i] << 20) | 2338 (amdgpu_crtc->lut_g[i] << 10) | 2339 (amdgpu_crtc->lut_b[i] << 0)); 2340 } 2341 2342 tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset); 2343 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0); 2344 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, OVL_DEGAMMA_MODE, 0); 2345 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0); 2346 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2347 2348 tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset); 2349 tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0); 2350 tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, OVL_GAMUT_REMAP_MODE, 0); 2351 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2352 2353 tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset); 2354 tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0); 2355 tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, OVL_REGAMMA_MODE, 0); 2356 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2357 2358 tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); 2359 tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0); 2360 tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_OVL_MODE, 0); 2361 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2362 2363 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 2364 WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0); 2365 /* XXX this only needs to be programmed once per crtc at startup, 2366 * not sure where the best place for it is 2367 */ 2368 tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset); 2369 tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1); 2370 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2371 } 2372 2373 static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder) 2374 { 2375 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 2376 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 2377 2378 switch (amdgpu_encoder->encoder_id) { 2379 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2380 if (dig->linkb) 2381 return 1; 2382 else 2383 return 0; 2384 break; 2385 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2386 if (dig->linkb) 2387 return 3; 2388 else 2389 return 2; 2390 break; 2391 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2392 if (dig->linkb) 2393 return 5; 2394 else 2395 return 4; 2396 break; 2397 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2398 return 6; 2399 break; 2400 default: 2401 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); 2402 return 0; 2403 } 2404 } 2405 2406 /** 2407 * dce_v10_0_pick_pll - Allocate a PPLL for use by the crtc. 2408 * 2409 * @crtc: drm crtc 2410 * 2411 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors 2412 * a single PPLL can be used for all DP crtcs/encoders. For non-DP 2413 * monitors a dedicated PPLL must be used. If a particular board has 2414 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming 2415 * as there is no need to program the PLL itself. If we are not able to 2416 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to 2417 * avoid messing up an existing monitor. 2418 * 2419 * Asic specific PLL information 2420 * 2421 * DCE 10.x 2422 * Tonga 2423 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) 2424 * CI 2425 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC 2426 * 2427 */ 2428 static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc) 2429 { 2430 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2431 struct drm_device *dev = crtc->dev; 2432 struct amdgpu_device *adev = dev->dev_private; 2433 u32 pll_in_use; 2434 int pll; 2435 2436 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { 2437 if (adev->clock.dp_extclk) 2438 /* skip PPLL programming if using ext clock */ 2439 return ATOM_PPLL_INVALID; 2440 else { 2441 /* use the same PPLL for all DP monitors */ 2442 pll = amdgpu_pll_get_shared_dp_ppll(crtc); 2443 if (pll != ATOM_PPLL_INVALID) 2444 return pll; 2445 } 2446 } else { 2447 /* use the same PPLL for all monitors with the same clock */ 2448 pll = amdgpu_pll_get_shared_nondp_ppll(crtc); 2449 if (pll != ATOM_PPLL_INVALID) 2450 return pll; 2451 } 2452 2453 /* DCE10 has PPLL0, PPLL1, and PPLL2 */ 2454 pll_in_use = amdgpu_pll_get_use_mask(crtc); 2455 if (!(pll_in_use & (1 << ATOM_PPLL2))) 2456 return ATOM_PPLL2; 2457 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2458 return ATOM_PPLL1; 2459 if (!(pll_in_use & (1 << ATOM_PPLL0))) 2460 return ATOM_PPLL0; 2461 DRM_ERROR("unable to allocate a PPLL\n"); 2462 return ATOM_PPLL_INVALID; 2463 } 2464 2465 static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock) 2466 { 2467 struct amdgpu_device *adev = crtc->dev->dev_private; 2468 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2469 uint32_t cur_lock; 2470 2471 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); 2472 if (lock) 2473 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1); 2474 else 2475 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0); 2476 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); 2477 } 2478 2479 static void dce_v10_0_hide_cursor(struct drm_crtc *crtc) 2480 { 2481 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2482 struct amdgpu_device *adev = crtc->dev->dev_private; 2483 u32 tmp; 2484 2485 tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); 2486 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); 2487 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2488 } 2489 2490 static void dce_v10_0_show_cursor(struct drm_crtc *crtc) 2491 { 2492 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2493 struct amdgpu_device *adev = crtc->dev->dev_private; 2494 u32 tmp; 2495 2496 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2497 upper_32_bits(amdgpu_crtc->cursor_addr)); 2498 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2499 lower_32_bits(amdgpu_crtc->cursor_addr)); 2500 2501 tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); 2502 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); 2503 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); 2504 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2505 } 2506 2507 static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc, 2508 int x, int y) 2509 { 2510 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2511 struct amdgpu_device *adev = crtc->dev->dev_private; 2512 int xorigin = 0, yorigin = 0; 2513 2514 /* avivo cursor are offset into the total surface */ 2515 x += crtc->x; 2516 y += crtc->y; 2517 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 2518 2519 if (x < 0) { 2520 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 2521 x = 0; 2522 } 2523 if (y < 0) { 2524 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 2525 y = 0; 2526 } 2527 2528 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2529 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2530 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2531 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2532 2533 amdgpu_crtc->cursor_x = x; 2534 amdgpu_crtc->cursor_y = y; 2535 2536 return 0; 2537 } 2538 2539 static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc, 2540 int x, int y) 2541 { 2542 int ret; 2543 2544 dce_v10_0_lock_cursor(crtc, true); 2545 ret = dce_v10_0_cursor_move_locked(crtc, x, y); 2546 dce_v10_0_lock_cursor(crtc, false); 2547 2548 return ret; 2549 } 2550 2551 static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, 2552 struct drm_file *file_priv, 2553 uint32_t handle, 2554 uint32_t width, 2555 uint32_t height, 2556 int32_t hot_x, 2557 int32_t hot_y) 2558 { 2559 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2560 struct drm_gem_object *obj; 2561 struct amdgpu_bo *aobj; 2562 int ret; 2563 2564 if (!handle) { 2565 /* turn off cursor */ 2566 dce_v10_0_hide_cursor(crtc); 2567 obj = NULL; 2568 goto unpin; 2569 } 2570 2571 if ((width > amdgpu_crtc->max_cursor_width) || 2572 (height > amdgpu_crtc->max_cursor_height)) { 2573 DRM_ERROR("bad cursor width or height %d x %d\n", width, height); 2574 return -EINVAL; 2575 } 2576 2577 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); 2578 if (!obj) { 2579 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); 2580 return -ENOENT; 2581 } 2582 2583 aobj = gem_to_amdgpu_bo(obj); 2584 ret = amdgpu_bo_reserve(aobj, false); 2585 if (ret != 0) { 2586 drm_gem_object_unreference_unlocked(obj); 2587 return ret; 2588 } 2589 2590 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr); 2591 amdgpu_bo_unreserve(aobj); 2592 if (ret) { 2593 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 2594 drm_gem_object_unreference_unlocked(obj); 2595 return ret; 2596 } 2597 2598 amdgpu_crtc->cursor_width = width; 2599 amdgpu_crtc->cursor_height = height; 2600 2601 dce_v10_0_lock_cursor(crtc, true); 2602 2603 if (hot_x != amdgpu_crtc->cursor_hot_x || 2604 hot_y != amdgpu_crtc->cursor_hot_y) { 2605 int x, y; 2606 2607 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x; 2608 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y; 2609 2610 dce_v10_0_cursor_move_locked(crtc, x, y); 2611 2612 amdgpu_crtc->cursor_hot_x = hot_x; 2613 amdgpu_crtc->cursor_hot_y = hot_y; 2614 } 2615 2616 dce_v10_0_show_cursor(crtc); 2617 dce_v10_0_lock_cursor(crtc, false); 2618 2619 unpin: 2620 if (amdgpu_crtc->cursor_bo) { 2621 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2622 ret = amdgpu_bo_reserve(aobj, false); 2623 if (likely(ret == 0)) { 2624 amdgpu_bo_unpin(aobj); 2625 amdgpu_bo_unreserve(aobj); 2626 } 2627 drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); 2628 } 2629 2630 amdgpu_crtc->cursor_bo = obj; 2631 return 0; 2632 } 2633 2634 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) 2635 { 2636 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2637 2638 if (amdgpu_crtc->cursor_bo) { 2639 dce_v10_0_lock_cursor(crtc, true); 2640 2641 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2642 amdgpu_crtc->cursor_y); 2643 2644 dce_v10_0_show_cursor(crtc); 2645 2646 dce_v10_0_lock_cursor(crtc, false); 2647 } 2648 } 2649 2650 static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2651 u16 *blue, uint32_t start, uint32_t size) 2652 { 2653 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2654 int end = (start + size > 256) ? 256 : start + size, i; 2655 2656 /* userspace palettes are always correct as is */ 2657 for (i = start; i < end; i++) { 2658 amdgpu_crtc->lut_r[i] = red[i] >> 6; 2659 amdgpu_crtc->lut_g[i] = green[i] >> 6; 2660 amdgpu_crtc->lut_b[i] = blue[i] >> 6; 2661 } 2662 dce_v10_0_crtc_load_lut(crtc); 2663 } 2664 2665 static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) 2666 { 2667 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2668 2669 drm_crtc_cleanup(crtc); 2670 destroy_workqueue(amdgpu_crtc->pflip_queue); 2671 kfree(amdgpu_crtc); 2672 } 2673 2674 static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = { 2675 .cursor_set2 = dce_v10_0_crtc_cursor_set2, 2676 .cursor_move = dce_v10_0_crtc_cursor_move, 2677 .gamma_set = dce_v10_0_crtc_gamma_set, 2678 .set_config = amdgpu_crtc_set_config, 2679 .destroy = dce_v10_0_crtc_destroy, 2680 .page_flip = amdgpu_crtc_page_flip, 2681 }; 2682 2683 static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2684 { 2685 struct drm_device *dev = crtc->dev; 2686 struct amdgpu_device *adev = dev->dev_private; 2687 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2688 unsigned type; 2689 2690 switch (mode) { 2691 case DRM_MODE_DPMS_ON: 2692 amdgpu_crtc->enabled = true; 2693 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); 2694 dce_v10_0_vga_enable(crtc, true); 2695 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2696 dce_v10_0_vga_enable(crtc, false); 2697 /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2698 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2699 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2700 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2701 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2702 dce_v10_0_crtc_load_lut(crtc); 2703 break; 2704 case DRM_MODE_DPMS_STANDBY: 2705 case DRM_MODE_DPMS_SUSPEND: 2706 case DRM_MODE_DPMS_OFF: 2707 drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id); 2708 if (amdgpu_crtc->enabled) { 2709 dce_v10_0_vga_enable(crtc, true); 2710 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2711 dce_v10_0_vga_enable(crtc, false); 2712 } 2713 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); 2714 amdgpu_crtc->enabled = false; 2715 break; 2716 } 2717 /* adjust pm to dpms */ 2718 amdgpu_pm_compute_clocks(adev); 2719 } 2720 2721 static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc) 2722 { 2723 /* disable crtc pair power gating before programming */ 2724 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); 2725 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); 2726 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2727 } 2728 2729 static void dce_v10_0_crtc_commit(struct drm_crtc *crtc) 2730 { 2731 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 2732 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); 2733 } 2734 2735 static void dce_v10_0_crtc_disable(struct drm_crtc *crtc) 2736 { 2737 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2738 struct drm_device *dev = crtc->dev; 2739 struct amdgpu_device *adev = dev->dev_private; 2740 struct amdgpu_atom_ss ss; 2741 int i; 2742 2743 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2744 if (crtc->primary->fb) { 2745 int r; 2746 struct amdgpu_framebuffer *amdgpu_fb; 2747 struct amdgpu_bo *rbo; 2748 2749 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 2750 rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); 2751 r = amdgpu_bo_reserve(rbo, false); 2752 if (unlikely(r)) 2753 DRM_ERROR("failed to reserve rbo before unpin\n"); 2754 else { 2755 amdgpu_bo_unpin(rbo); 2756 amdgpu_bo_unreserve(rbo); 2757 } 2758 } 2759 /* disable the GRPH */ 2760 dce_v10_0_grph_enable(crtc, false); 2761 2762 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); 2763 2764 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2765 if (adev->mode_info.crtcs[i] && 2766 adev->mode_info.crtcs[i]->enabled && 2767 i != amdgpu_crtc->crtc_id && 2768 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { 2769 /* one other crtc is using this pll don't turn 2770 * off the pll 2771 */ 2772 goto done; 2773 } 2774 } 2775 2776 switch (amdgpu_crtc->pll_id) { 2777 case ATOM_PPLL0: 2778 case ATOM_PPLL1: 2779 case ATOM_PPLL2: 2780 /* disable the ppll */ 2781 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, 2782 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2783 break; 2784 default: 2785 break; 2786 } 2787 done: 2788 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2789 amdgpu_crtc->adjusted_clock = 0; 2790 amdgpu_crtc->encoder = NULL; 2791 amdgpu_crtc->connector = NULL; 2792 } 2793 2794 static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc, 2795 struct drm_display_mode *mode, 2796 struct drm_display_mode *adjusted_mode, 2797 int x, int y, struct drm_framebuffer *old_fb) 2798 { 2799 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2800 2801 if (!amdgpu_crtc->adjusted_clock) 2802 return -EINVAL; 2803 2804 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); 2805 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); 2806 dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2807 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); 2808 amdgpu_atombios_crtc_scaler_setup(crtc); 2809 dce_v10_0_cursor_reset(crtc); 2810 /* update the hw version fpr dpm */ 2811 amdgpu_crtc->hw_mode = *adjusted_mode; 2812 2813 return 0; 2814 } 2815 2816 static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc, 2817 const struct drm_display_mode *mode, 2818 struct drm_display_mode *adjusted_mode) 2819 { 2820 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2821 struct drm_device *dev = crtc->dev; 2822 struct drm_encoder *encoder; 2823 2824 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ 2825 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2826 if (encoder->crtc == crtc) { 2827 amdgpu_crtc->encoder = encoder; 2828 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); 2829 break; 2830 } 2831 } 2832 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { 2833 amdgpu_crtc->encoder = NULL; 2834 amdgpu_crtc->connector = NULL; 2835 return false; 2836 } 2837 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2838 return false; 2839 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2840 return false; 2841 /* pick pll */ 2842 amdgpu_crtc->pll_id = dce_v10_0_pick_pll(crtc); 2843 /* if we can't get a PPLL for a non-DP encoder, fail */ 2844 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && 2845 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) 2846 return false; 2847 2848 return true; 2849 } 2850 2851 static int dce_v10_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, 2852 struct drm_framebuffer *old_fb) 2853 { 2854 return dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2855 } 2856 2857 static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc, 2858 struct drm_framebuffer *fb, 2859 int x, int y, enum mode_set_atomic state) 2860 { 2861 return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1); 2862 } 2863 2864 static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = { 2865 .dpms = dce_v10_0_crtc_dpms, 2866 .mode_fixup = dce_v10_0_crtc_mode_fixup, 2867 .mode_set = dce_v10_0_crtc_mode_set, 2868 .mode_set_base = dce_v10_0_crtc_set_base, 2869 .mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic, 2870 .prepare = dce_v10_0_crtc_prepare, 2871 .commit = dce_v10_0_crtc_commit, 2872 .load_lut = dce_v10_0_crtc_load_lut, 2873 .disable = dce_v10_0_crtc_disable, 2874 }; 2875 2876 static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) 2877 { 2878 struct amdgpu_crtc *amdgpu_crtc; 2879 int i; 2880 2881 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + 2882 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 2883 if (amdgpu_crtc == NULL) 2884 return -ENOMEM; 2885 2886 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs); 2887 2888 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); 2889 amdgpu_crtc->crtc_id = index; 2890 amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); 2891 adev->mode_info.crtcs[index] = amdgpu_crtc; 2892 2893 amdgpu_crtc->max_cursor_width = 128; 2894 amdgpu_crtc->max_cursor_height = 128; 2895 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; 2896 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; 2897 2898 for (i = 0; i < 256; i++) { 2899 amdgpu_crtc->lut_r[i] = i << 2; 2900 amdgpu_crtc->lut_g[i] = i << 2; 2901 amdgpu_crtc->lut_b[i] = i << 2; 2902 } 2903 2904 switch (amdgpu_crtc->crtc_id) { 2905 case 0: 2906 default: 2907 amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET; 2908 break; 2909 case 1: 2910 amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET; 2911 break; 2912 case 2: 2913 amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET; 2914 break; 2915 case 3: 2916 amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET; 2917 break; 2918 case 4: 2919 amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET; 2920 break; 2921 case 5: 2922 amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET; 2923 break; 2924 } 2925 2926 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2927 amdgpu_crtc->adjusted_clock = 0; 2928 amdgpu_crtc->encoder = NULL; 2929 amdgpu_crtc->connector = NULL; 2930 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs); 2931 2932 return 0; 2933 } 2934 2935 static int dce_v10_0_early_init(void *handle) 2936 { 2937 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2938 2939 adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg; 2940 adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg; 2941 2942 dce_v10_0_set_display_funcs(adev); 2943 dce_v10_0_set_irq_funcs(adev); 2944 2945 switch (adev->asic_type) { 2946 case CHIP_FIJI: 2947 case CHIP_TONGA: 2948 adev->mode_info.num_crtc = 6; /* XXX 7??? */ 2949 adev->mode_info.num_hpd = 6; 2950 adev->mode_info.num_dig = 7; 2951 break; 2952 default: 2953 /* FIXME: not supported yet */ 2954 return -EINVAL; 2955 } 2956 2957 return 0; 2958 } 2959 2960 static int dce_v10_0_sw_init(void *handle) 2961 { 2962 int r, i; 2963 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2964 2965 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2966 r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq); 2967 if (r) 2968 return r; 2969 } 2970 2971 for (i = 8; i < 20; i += 2) { 2972 r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq); 2973 if (r) 2974 return r; 2975 } 2976 2977 /* HPD hotplug */ 2978 r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq); 2979 if (r) 2980 return r; 2981 2982 adev->mode_info.mode_config_initialized = true; 2983 2984 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; 2985 2986 adev->ddev->mode_config.max_width = 16384; 2987 adev->ddev->mode_config.max_height = 16384; 2988 2989 adev->ddev->mode_config.preferred_depth = 24; 2990 adev->ddev->mode_config.prefer_shadow = 1; 2991 2992 adev->ddev->mode_config.fb_base = adev->mc.aper_base; 2993 2994 r = amdgpu_modeset_create_props(adev); 2995 if (r) 2996 return r; 2997 2998 adev->ddev->mode_config.max_width = 16384; 2999 adev->ddev->mode_config.max_height = 16384; 3000 3001 /* allocate crtcs */ 3002 for (i = 0; i < adev->mode_info.num_crtc; i++) { 3003 r = dce_v10_0_crtc_init(adev, i); 3004 if (r) 3005 return r; 3006 } 3007 3008 if (amdgpu_atombios_get_connector_info_from_object_table(adev)) 3009 amdgpu_print_display_setup(adev->ddev); 3010 else 3011 return -EINVAL; 3012 3013 /* setup afmt */ 3014 dce_v10_0_afmt_init(adev); 3015 3016 r = dce_v10_0_audio_init(adev); 3017 if (r) 3018 return r; 3019 3020 drm_kms_helper_poll_init(adev->ddev); 3021 3022 return r; 3023 } 3024 3025 static int dce_v10_0_sw_fini(void *handle) 3026 { 3027 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3028 3029 kfree(adev->mode_info.bios_hardcoded_edid); 3030 3031 drm_kms_helper_poll_fini(adev->ddev); 3032 3033 dce_v10_0_audio_fini(adev); 3034 3035 dce_v10_0_afmt_fini(adev); 3036 3037 drm_mode_config_cleanup(adev->ddev); 3038 adev->mode_info.mode_config_initialized = false; 3039 3040 return 0; 3041 } 3042 3043 static int dce_v10_0_hw_init(void *handle) 3044 { 3045 int i; 3046 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3047 3048 dce_v10_0_init_golden_registers(adev); 3049 3050 /* init dig PHYs, disp eng pll */ 3051 amdgpu_atombios_encoder_init_dig(adev); 3052 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); 3053 3054 /* initialize hpd */ 3055 dce_v10_0_hpd_init(adev); 3056 3057 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 3058 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 3059 } 3060 3061 dce_v10_0_pageflip_interrupt_init(adev); 3062 3063 return 0; 3064 } 3065 3066 static int dce_v10_0_hw_fini(void *handle) 3067 { 3068 int i; 3069 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3070 3071 dce_v10_0_hpd_fini(adev); 3072 3073 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 3074 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 3075 } 3076 3077 dce_v10_0_pageflip_interrupt_fini(adev); 3078 3079 return 0; 3080 } 3081 3082 static int dce_v10_0_suspend(void *handle) 3083 { 3084 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3085 3086 amdgpu_atombios_scratch_regs_save(adev); 3087 3088 return dce_v10_0_hw_fini(handle); 3089 } 3090 3091 static int dce_v10_0_resume(void *handle) 3092 { 3093 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3094 int ret; 3095 3096 ret = dce_v10_0_hw_init(handle); 3097 3098 amdgpu_atombios_scratch_regs_restore(adev); 3099 3100 /* turn on the BL */ 3101 if (adev->mode_info.bl_encoder) { 3102 u8 bl_level = amdgpu_display_backlight_get_level(adev, 3103 adev->mode_info.bl_encoder); 3104 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 3105 bl_level); 3106 } 3107 3108 return ret; 3109 } 3110 3111 static bool dce_v10_0_is_idle(void *handle) 3112 { 3113 return true; 3114 } 3115 3116 static int dce_v10_0_wait_for_idle(void *handle) 3117 { 3118 return 0; 3119 } 3120 3121 static void dce_v10_0_print_status(void *handle) 3122 { 3123 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3124 3125 dev_info(adev->dev, "DCE 10.x registers\n"); 3126 /* XXX todo */ 3127 } 3128 3129 static int dce_v10_0_soft_reset(void *handle) 3130 { 3131 u32 srbm_soft_reset = 0, tmp; 3132 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3133 3134 if (dce_v10_0_is_display_hung(adev)) 3135 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; 3136 3137 if (srbm_soft_reset) { 3138 dce_v10_0_print_status((void *)adev); 3139 3140 tmp = RREG32(mmSRBM_SOFT_RESET); 3141 tmp |= srbm_soft_reset; 3142 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 3143 WREG32(mmSRBM_SOFT_RESET, tmp); 3144 tmp = RREG32(mmSRBM_SOFT_RESET); 3145 3146 udelay(50); 3147 3148 tmp &= ~srbm_soft_reset; 3149 WREG32(mmSRBM_SOFT_RESET, tmp); 3150 tmp = RREG32(mmSRBM_SOFT_RESET); 3151 3152 /* Wait a little for things to settle down */ 3153 udelay(50); 3154 dce_v10_0_print_status((void *)adev); 3155 } 3156 return 0; 3157 } 3158 3159 static void dce_v10_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, 3160 int crtc, 3161 enum amdgpu_interrupt_state state) 3162 { 3163 u32 lb_interrupt_mask; 3164 3165 if (crtc >= adev->mode_info.num_crtc) { 3166 DRM_DEBUG("invalid crtc %d\n", crtc); 3167 return; 3168 } 3169 3170 switch (state) { 3171 case AMDGPU_IRQ_STATE_DISABLE: 3172 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3173 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3174 VBLANK_INTERRUPT_MASK, 0); 3175 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3176 break; 3177 case AMDGPU_IRQ_STATE_ENABLE: 3178 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3179 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3180 VBLANK_INTERRUPT_MASK, 1); 3181 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3182 break; 3183 default: 3184 break; 3185 } 3186 } 3187 3188 static void dce_v10_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, 3189 int crtc, 3190 enum amdgpu_interrupt_state state) 3191 { 3192 u32 lb_interrupt_mask; 3193 3194 if (crtc >= adev->mode_info.num_crtc) { 3195 DRM_DEBUG("invalid crtc %d\n", crtc); 3196 return; 3197 } 3198 3199 switch (state) { 3200 case AMDGPU_IRQ_STATE_DISABLE: 3201 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3202 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3203 VLINE_INTERRUPT_MASK, 0); 3204 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3205 break; 3206 case AMDGPU_IRQ_STATE_ENABLE: 3207 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3208 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3209 VLINE_INTERRUPT_MASK, 1); 3210 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3211 break; 3212 default: 3213 break; 3214 } 3215 } 3216 3217 static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev, 3218 struct amdgpu_irq_src *source, 3219 unsigned hpd, 3220 enum amdgpu_interrupt_state state) 3221 { 3222 u32 tmp; 3223 3224 if (hpd >= adev->mode_info.num_hpd) { 3225 DRM_DEBUG("invalid hdp %d\n", hpd); 3226 return 0; 3227 } 3228 3229 switch (state) { 3230 case AMDGPU_IRQ_STATE_DISABLE: 3231 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 3232 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); 3233 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 3234 break; 3235 case AMDGPU_IRQ_STATE_ENABLE: 3236 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 3237 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1); 3238 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 3239 break; 3240 default: 3241 break; 3242 } 3243 3244 return 0; 3245 } 3246 3247 static int dce_v10_0_set_crtc_irq_state(struct amdgpu_device *adev, 3248 struct amdgpu_irq_src *source, 3249 unsigned type, 3250 enum amdgpu_interrupt_state state) 3251 { 3252 switch (type) { 3253 case AMDGPU_CRTC_IRQ_VBLANK1: 3254 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 0, state); 3255 break; 3256 case AMDGPU_CRTC_IRQ_VBLANK2: 3257 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 1, state); 3258 break; 3259 case AMDGPU_CRTC_IRQ_VBLANK3: 3260 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 2, state); 3261 break; 3262 case AMDGPU_CRTC_IRQ_VBLANK4: 3263 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 3, state); 3264 break; 3265 case AMDGPU_CRTC_IRQ_VBLANK5: 3266 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 4, state); 3267 break; 3268 case AMDGPU_CRTC_IRQ_VBLANK6: 3269 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 5, state); 3270 break; 3271 case AMDGPU_CRTC_IRQ_VLINE1: 3272 dce_v10_0_set_crtc_vline_interrupt_state(adev, 0, state); 3273 break; 3274 case AMDGPU_CRTC_IRQ_VLINE2: 3275 dce_v10_0_set_crtc_vline_interrupt_state(adev, 1, state); 3276 break; 3277 case AMDGPU_CRTC_IRQ_VLINE3: 3278 dce_v10_0_set_crtc_vline_interrupt_state(adev, 2, state); 3279 break; 3280 case AMDGPU_CRTC_IRQ_VLINE4: 3281 dce_v10_0_set_crtc_vline_interrupt_state(adev, 3, state); 3282 break; 3283 case AMDGPU_CRTC_IRQ_VLINE5: 3284 dce_v10_0_set_crtc_vline_interrupt_state(adev, 4, state); 3285 break; 3286 case AMDGPU_CRTC_IRQ_VLINE6: 3287 dce_v10_0_set_crtc_vline_interrupt_state(adev, 5, state); 3288 break; 3289 default: 3290 break; 3291 } 3292 return 0; 3293 } 3294 3295 static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev, 3296 struct amdgpu_irq_src *src, 3297 unsigned type, 3298 enum amdgpu_interrupt_state state) 3299 { 3300 u32 reg; 3301 3302 if (type >= adev->mode_info.num_crtc) { 3303 DRM_ERROR("invalid pageflip crtc %d\n", type); 3304 return -EINVAL; 3305 } 3306 3307 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]); 3308 if (state == AMDGPU_IRQ_STATE_DISABLE) 3309 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3310 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3311 else 3312 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3313 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3314 3315 return 0; 3316 } 3317 3318 static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, 3319 struct amdgpu_irq_src *source, 3320 struct amdgpu_iv_entry *entry) 3321 { 3322 unsigned long flags; 3323 unsigned crtc_id; 3324 struct amdgpu_crtc *amdgpu_crtc; 3325 struct amdgpu_flip_work *works; 3326 3327 crtc_id = (entry->src_id - 8) >> 1; 3328 amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 3329 3330 if (crtc_id >= adev->mode_info.num_crtc) { 3331 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); 3332 return -EINVAL; 3333 } 3334 3335 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) & 3336 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 3337 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id], 3338 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 3339 3340 /* IRQ could occur when in initial stage */ 3341 if (amdgpu_crtc == NULL) 3342 return 0; 3343 3344 spin_lock_irqsave(&adev->ddev->event_lock, flags); 3345 works = amdgpu_crtc->pflip_works; 3346 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { 3347 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " 3348 "AMDGPU_FLIP_SUBMITTED(%d)\n", 3349 amdgpu_crtc->pflip_status, 3350 AMDGPU_FLIP_SUBMITTED); 3351 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3352 return 0; 3353 } 3354 3355 /* page flip completed. clean up */ 3356 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 3357 amdgpu_crtc->pflip_works = NULL; 3358 3359 /* wakeup usersapce */ 3360 if (works->event) 3361 drm_send_vblank_event(adev->ddev, crtc_id, works->event); 3362 3363 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3364 3365 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3366 queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); 3367 3368 return 0; 3369 } 3370 3371 static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev, 3372 int hpd) 3373 { 3374 u32 tmp; 3375 3376 if (hpd >= adev->mode_info.num_hpd) { 3377 DRM_DEBUG("invalid hdp %d\n", hpd); 3378 return; 3379 } 3380 3381 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 3382 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1); 3383 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 3384 } 3385 3386 static void dce_v10_0_crtc_vblank_int_ack(struct amdgpu_device *adev, 3387 int crtc) 3388 { 3389 u32 tmp; 3390 3391 if (crtc >= adev->mode_info.num_crtc) { 3392 DRM_DEBUG("invalid crtc %d\n", crtc); 3393 return; 3394 } 3395 3396 tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]); 3397 tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1); 3398 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp); 3399 } 3400 3401 static void dce_v10_0_crtc_vline_int_ack(struct amdgpu_device *adev, 3402 int crtc) 3403 { 3404 u32 tmp; 3405 3406 if (crtc >= adev->mode_info.num_crtc) { 3407 DRM_DEBUG("invalid crtc %d\n", crtc); 3408 return; 3409 } 3410 3411 tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]); 3412 tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1); 3413 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp); 3414 } 3415 3416 static int dce_v10_0_crtc_irq(struct amdgpu_device *adev, 3417 struct amdgpu_irq_src *source, 3418 struct amdgpu_iv_entry *entry) 3419 { 3420 unsigned crtc = entry->src_id - 1; 3421 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 3422 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); 3423 3424 switch (entry->src_data) { 3425 case 0: /* vblank */ 3426 if (disp_int & interrupt_status_offsets[crtc].vblank) 3427 dce_v10_0_crtc_vblank_int_ack(adev, crtc); 3428 else 3429 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3430 3431 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3432 drm_handle_vblank(adev->ddev, crtc); 3433 } 3434 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3435 3436 break; 3437 case 1: /* vline */ 3438 if (disp_int & interrupt_status_offsets[crtc].vline) 3439 dce_v10_0_crtc_vline_int_ack(adev, crtc); 3440 else 3441 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3442 3443 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3444 3445 break; 3446 default: 3447 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3448 break; 3449 } 3450 3451 return 0; 3452 } 3453 3454 static int dce_v10_0_hpd_irq(struct amdgpu_device *adev, 3455 struct amdgpu_irq_src *source, 3456 struct amdgpu_iv_entry *entry) 3457 { 3458 uint32_t disp_int, mask; 3459 unsigned hpd; 3460 3461 if (entry->src_data >= adev->mode_info.num_hpd) { 3462 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3463 return 0; 3464 } 3465 3466 hpd = entry->src_data; 3467 disp_int = RREG32(interrupt_status_offsets[hpd].reg); 3468 mask = interrupt_status_offsets[hpd].hpd; 3469 3470 if (disp_int & mask) { 3471 dce_v10_0_hpd_int_ack(adev, hpd); 3472 schedule_work(&adev->hotplug_work); 3473 DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3474 } 3475 3476 return 0; 3477 } 3478 3479 static int dce_v10_0_set_clockgating_state(void *handle, 3480 enum amd_clockgating_state state) 3481 { 3482 return 0; 3483 } 3484 3485 static int dce_v10_0_set_powergating_state(void *handle, 3486 enum amd_powergating_state state) 3487 { 3488 return 0; 3489 } 3490 3491 const struct amd_ip_funcs dce_v10_0_ip_funcs = { 3492 .early_init = dce_v10_0_early_init, 3493 .late_init = NULL, 3494 .sw_init = dce_v10_0_sw_init, 3495 .sw_fini = dce_v10_0_sw_fini, 3496 .hw_init = dce_v10_0_hw_init, 3497 .hw_fini = dce_v10_0_hw_fini, 3498 .suspend = dce_v10_0_suspend, 3499 .resume = dce_v10_0_resume, 3500 .is_idle = dce_v10_0_is_idle, 3501 .wait_for_idle = dce_v10_0_wait_for_idle, 3502 .soft_reset = dce_v10_0_soft_reset, 3503 .print_status = dce_v10_0_print_status, 3504 .set_clockgating_state = dce_v10_0_set_clockgating_state, 3505 .set_powergating_state = dce_v10_0_set_powergating_state, 3506 }; 3507 3508 static void 3509 dce_v10_0_encoder_mode_set(struct drm_encoder *encoder, 3510 struct drm_display_mode *mode, 3511 struct drm_display_mode *adjusted_mode) 3512 { 3513 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3514 3515 amdgpu_encoder->pixel_clock = adjusted_mode->clock; 3516 3517 /* need to call this here rather than in prepare() since we need some crtc info */ 3518 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3519 3520 /* set scaler clears this on some chips */ 3521 dce_v10_0_set_interleave(encoder->crtc, mode); 3522 3523 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 3524 dce_v10_0_afmt_enable(encoder, true); 3525 dce_v10_0_afmt_setmode(encoder, adjusted_mode); 3526 } 3527 } 3528 3529 static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder) 3530 { 3531 struct amdgpu_device *adev = encoder->dev->dev_private; 3532 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3533 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 3534 3535 if ((amdgpu_encoder->active_device & 3536 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || 3537 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != 3538 ENCODER_OBJECT_ID_NONE)) { 3539 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 3540 if (dig) { 3541 dig->dig_encoder = dce_v10_0_pick_dig_encoder(encoder); 3542 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) 3543 dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; 3544 } 3545 } 3546 3547 amdgpu_atombios_scratch_regs_lock(adev, true); 3548 3549 if (connector) { 3550 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 3551 3552 /* select the clock/data port if it uses a router */ 3553 if (amdgpu_connector->router.cd_valid) 3554 amdgpu_i2c_router_select_cd_port(amdgpu_connector); 3555 3556 /* turn eDP panel on for mode set */ 3557 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3558 amdgpu_atombios_encoder_set_edp_panel_power(connector, 3559 ATOM_TRANSMITTER_ACTION_POWER_ON); 3560 } 3561 3562 /* this is needed for the pll/ss setup to work correctly in some cases */ 3563 amdgpu_atombios_encoder_set_crtc_source(encoder); 3564 /* set up the FMT blocks */ 3565 dce_v10_0_program_fmt(encoder); 3566 } 3567 3568 static void dce_v10_0_encoder_commit(struct drm_encoder *encoder) 3569 { 3570 struct drm_device *dev = encoder->dev; 3571 struct amdgpu_device *adev = dev->dev_private; 3572 3573 /* need to call this here as we need the crtc set up */ 3574 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 3575 amdgpu_atombios_scratch_regs_lock(adev, false); 3576 } 3577 3578 static void dce_v10_0_encoder_disable(struct drm_encoder *encoder) 3579 { 3580 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3581 struct amdgpu_encoder_atom_dig *dig; 3582 3583 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3584 3585 if (amdgpu_atombios_encoder_is_digital(encoder)) { 3586 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 3587 dce_v10_0_afmt_enable(encoder, false); 3588 dig = amdgpu_encoder->enc_priv; 3589 dig->dig_encoder = -1; 3590 } 3591 amdgpu_encoder->active_device = 0; 3592 } 3593 3594 /* these are handled by the primary encoders */ 3595 static void dce_v10_0_ext_prepare(struct drm_encoder *encoder) 3596 { 3597 3598 } 3599 3600 static void dce_v10_0_ext_commit(struct drm_encoder *encoder) 3601 { 3602 3603 } 3604 3605 static void 3606 dce_v10_0_ext_mode_set(struct drm_encoder *encoder, 3607 struct drm_display_mode *mode, 3608 struct drm_display_mode *adjusted_mode) 3609 { 3610 3611 } 3612 3613 static void dce_v10_0_ext_disable(struct drm_encoder *encoder) 3614 { 3615 3616 } 3617 3618 static void 3619 dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode) 3620 { 3621 3622 } 3623 3624 static bool dce_v10_0_ext_mode_fixup(struct drm_encoder *encoder, 3625 const struct drm_display_mode *mode, 3626 struct drm_display_mode *adjusted_mode) 3627 { 3628 return true; 3629 } 3630 3631 static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = { 3632 .dpms = dce_v10_0_ext_dpms, 3633 .mode_fixup = dce_v10_0_ext_mode_fixup, 3634 .prepare = dce_v10_0_ext_prepare, 3635 .mode_set = dce_v10_0_ext_mode_set, 3636 .commit = dce_v10_0_ext_commit, 3637 .disable = dce_v10_0_ext_disable, 3638 /* no detect for TMDS/LVDS yet */ 3639 }; 3640 3641 static const struct drm_encoder_helper_funcs dce_v10_0_dig_helper_funcs = { 3642 .dpms = amdgpu_atombios_encoder_dpms, 3643 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3644 .prepare = dce_v10_0_encoder_prepare, 3645 .mode_set = dce_v10_0_encoder_mode_set, 3646 .commit = dce_v10_0_encoder_commit, 3647 .disable = dce_v10_0_encoder_disable, 3648 .detect = amdgpu_atombios_encoder_dig_detect, 3649 }; 3650 3651 static const struct drm_encoder_helper_funcs dce_v10_0_dac_helper_funcs = { 3652 .dpms = amdgpu_atombios_encoder_dpms, 3653 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3654 .prepare = dce_v10_0_encoder_prepare, 3655 .mode_set = dce_v10_0_encoder_mode_set, 3656 .commit = dce_v10_0_encoder_commit, 3657 .detect = amdgpu_atombios_encoder_dac_detect, 3658 }; 3659 3660 static void dce_v10_0_encoder_destroy(struct drm_encoder *encoder) 3661 { 3662 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3663 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3664 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); 3665 kfree(amdgpu_encoder->enc_priv); 3666 drm_encoder_cleanup(encoder); 3667 kfree(amdgpu_encoder); 3668 } 3669 3670 static const struct drm_encoder_funcs dce_v10_0_encoder_funcs = { 3671 .destroy = dce_v10_0_encoder_destroy, 3672 }; 3673 3674 static void dce_v10_0_encoder_add(struct amdgpu_device *adev, 3675 uint32_t encoder_enum, 3676 uint32_t supported_device, 3677 u16 caps) 3678 { 3679 struct drm_device *dev = adev->ddev; 3680 struct drm_encoder *encoder; 3681 struct amdgpu_encoder *amdgpu_encoder; 3682 3683 /* see if we already added it */ 3684 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3685 amdgpu_encoder = to_amdgpu_encoder(encoder); 3686 if (amdgpu_encoder->encoder_enum == encoder_enum) { 3687 amdgpu_encoder->devices |= supported_device; 3688 return; 3689 } 3690 3691 } 3692 3693 /* add a new one */ 3694 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); 3695 if (!amdgpu_encoder) 3696 return; 3697 3698 encoder = &amdgpu_encoder->base; 3699 switch (adev->mode_info.num_crtc) { 3700 case 1: 3701 encoder->possible_crtcs = 0x1; 3702 break; 3703 case 2: 3704 default: 3705 encoder->possible_crtcs = 0x3; 3706 break; 3707 case 4: 3708 encoder->possible_crtcs = 0xf; 3709 break; 3710 case 6: 3711 encoder->possible_crtcs = 0x3f; 3712 break; 3713 } 3714 3715 amdgpu_encoder->enc_priv = NULL; 3716 3717 amdgpu_encoder->encoder_enum = encoder_enum; 3718 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 3719 amdgpu_encoder->devices = supported_device; 3720 amdgpu_encoder->rmx_type = RMX_OFF; 3721 amdgpu_encoder->underscan_type = UNDERSCAN_OFF; 3722 amdgpu_encoder->is_ext_encoder = false; 3723 amdgpu_encoder->caps = caps; 3724 3725 switch (amdgpu_encoder->encoder_id) { 3726 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3727 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3728 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3729 DRM_MODE_ENCODER_DAC); 3730 drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs); 3731 break; 3732 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3733 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 3734 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 3735 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 3736 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 3737 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3738 amdgpu_encoder->rmx_type = RMX_FULL; 3739 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3740 DRM_MODE_ENCODER_LVDS); 3741 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3742 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3743 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3744 DRM_MODE_ENCODER_DAC); 3745 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3746 } else { 3747 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3748 DRM_MODE_ENCODER_TMDS); 3749 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3750 } 3751 drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs); 3752 break; 3753 case ENCODER_OBJECT_ID_SI170B: 3754 case ENCODER_OBJECT_ID_CH7303: 3755 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: 3756 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: 3757 case ENCODER_OBJECT_ID_TITFP513: 3758 case ENCODER_OBJECT_ID_VT1623: 3759 case ENCODER_OBJECT_ID_HDMI_SI1930: 3760 case ENCODER_OBJECT_ID_TRAVIS: 3761 case ENCODER_OBJECT_ID_NUTMEG: 3762 /* these are handled by the primary encoders */ 3763 amdgpu_encoder->is_ext_encoder = true; 3764 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3765 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3766 DRM_MODE_ENCODER_LVDS); 3767 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3768 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3769 DRM_MODE_ENCODER_DAC); 3770 else 3771 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3772 DRM_MODE_ENCODER_TMDS); 3773 drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs); 3774 break; 3775 } 3776 } 3777 3778 static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { 3779 .set_vga_render_state = &dce_v10_0_set_vga_render_state, 3780 .bandwidth_update = &dce_v10_0_bandwidth_update, 3781 .vblank_get_counter = &dce_v10_0_vblank_get_counter, 3782 .vblank_wait = &dce_v10_0_vblank_wait, 3783 .is_display_hung = &dce_v10_0_is_display_hung, 3784 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3785 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3786 .hpd_sense = &dce_v10_0_hpd_sense, 3787 .hpd_set_polarity = &dce_v10_0_hpd_set_polarity, 3788 .hpd_get_gpio_reg = &dce_v10_0_hpd_get_gpio_reg, 3789 .page_flip = &dce_v10_0_page_flip, 3790 .page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos, 3791 .add_encoder = &dce_v10_0_encoder_add, 3792 .add_connector = &amdgpu_connector_add, 3793 .stop_mc_access = &dce_v10_0_stop_mc_access, 3794 .resume_mc_access = &dce_v10_0_resume_mc_access, 3795 }; 3796 3797 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev) 3798 { 3799 if (adev->mode_info.funcs == NULL) 3800 adev->mode_info.funcs = &dce_v10_0_display_funcs; 3801 } 3802 3803 static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = { 3804 .set = dce_v10_0_set_crtc_irq_state, 3805 .process = dce_v10_0_crtc_irq, 3806 }; 3807 3808 static const struct amdgpu_irq_src_funcs dce_v10_0_pageflip_irq_funcs = { 3809 .set = dce_v10_0_set_pageflip_irq_state, 3810 .process = dce_v10_0_pageflip_irq, 3811 }; 3812 3813 static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = { 3814 .set = dce_v10_0_set_hpd_irq_state, 3815 .process = dce_v10_0_hpd_irq, 3816 }; 3817 3818 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev) 3819 { 3820 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; 3821 adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs; 3822 3823 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; 3824 adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs; 3825 3826 adev->hpd_irq.num_types = AMDGPU_HPD_LAST; 3827 adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs; 3828 } 3829