1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include <drm/drmP.h> 24 #include "amdgpu.h" 25 #include "amdgpu_pm.h" 26 #include "amdgpu_i2c.h" 27 #include "atom.h" 28 #include "amdgpu_atombios.h" 29 #include "atombios_crtc.h" 30 #include "atombios_encoders.h" 31 #include "amdgpu_pll.h" 32 #include "amdgpu_connectors.h" 33 34 #include "bif/bif_3_0_d.h" 35 #include "bif/bif_3_0_sh_mask.h" 36 #include "oss/oss_1_0_d.h" 37 #include "oss/oss_1_0_sh_mask.h" 38 #include "gca/gfx_6_0_d.h" 39 #include "gca/gfx_6_0_sh_mask.h" 40 #include "gmc/gmc_6_0_d.h" 41 #include "gmc/gmc_6_0_sh_mask.h" 42 #include "dce/dce_6_0_d.h" 43 #include "dce/dce_6_0_sh_mask.h" 44 #include "gca/gfx_7_2_enum.h" 45 #include "dce_v6_0.h" 46 #include "si_enums.h" 47 48 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev); 49 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev); 50 51 static const u32 crtc_offsets[6] = 52 { 53 SI_CRTC0_REGISTER_OFFSET, 54 SI_CRTC1_REGISTER_OFFSET, 55 SI_CRTC2_REGISTER_OFFSET, 56 SI_CRTC3_REGISTER_OFFSET, 57 SI_CRTC4_REGISTER_OFFSET, 58 SI_CRTC5_REGISTER_OFFSET 59 }; 60 61 static const u32 hpd_offsets[] = 62 { 63 mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS, 64 mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS, 65 mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS, 66 mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS, 67 mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS, 68 mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS, 69 }; 70 71 static const uint32_t dig_offsets[] = { 72 SI_CRTC0_REGISTER_OFFSET, 73 SI_CRTC1_REGISTER_OFFSET, 74 SI_CRTC2_REGISTER_OFFSET, 75 SI_CRTC3_REGISTER_OFFSET, 76 SI_CRTC4_REGISTER_OFFSET, 77 SI_CRTC5_REGISTER_OFFSET, 78 (0x13830 - 0x7030) >> 2, 79 }; 80 81 static const struct { 82 uint32_t reg; 83 uint32_t vblank; 84 uint32_t vline; 85 uint32_t hpd; 86 87 } interrupt_status_offsets[6] = { { 88 .reg = mmDISP_INTERRUPT_STATUS, 89 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, 90 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, 91 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 92 }, { 93 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, 94 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, 95 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, 96 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 97 }, { 98 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, 99 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, 100 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, 101 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 102 }, { 103 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, 104 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, 105 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, 106 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 107 }, { 108 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, 109 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, 110 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, 111 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 112 }, { 113 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, 114 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, 115 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, 116 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 117 } }; 118 119 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev, 120 u32 block_offset, u32 reg) 121 { 122 unsigned long flags; 123 u32 r; 124 125 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 126 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 127 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); 128 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 129 130 return r; 131 } 132 133 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev, 134 u32 block_offset, u32 reg, u32 v) 135 { 136 unsigned long flags; 137 138 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 139 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, 140 reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK); 141 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); 142 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 143 } 144 145 static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc) 146 { 147 if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK) 148 return true; 149 else 150 return false; 151 } 152 153 static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc) 154 { 155 u32 pos1, pos2; 156 157 pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 158 pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 159 160 if (pos1 != pos2) 161 return true; 162 else 163 return false; 164 } 165 166 /** 167 * dce_v6_0_wait_for_vblank - vblank wait asic callback. 168 * 169 * @crtc: crtc to wait for vblank on 170 * 171 * Wait for vblank on the requested crtc (evergreen+). 172 */ 173 static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc) 174 { 175 unsigned i = 100; 176 177 if (crtc >= adev->mode_info.num_crtc) 178 return; 179 180 if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK)) 181 return; 182 183 /* depending on when we hit vblank, we may be close to active; if so, 184 * wait for another frame. 185 */ 186 while (dce_v6_0_is_in_vblank(adev, crtc)) { 187 if (i++ == 100) { 188 i = 0; 189 if (!dce_v6_0_is_counter_moving(adev, crtc)) 190 break; 191 } 192 } 193 194 while (!dce_v6_0_is_in_vblank(adev, crtc)) { 195 if (i++ == 100) { 196 i = 0; 197 if (!dce_v6_0_is_counter_moving(adev, crtc)) 198 break; 199 } 200 } 201 } 202 203 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 204 { 205 if (crtc >= adev->mode_info.num_crtc) 206 return 0; 207 else 208 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 209 } 210 211 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev) 212 { 213 unsigned i; 214 215 /* Enable pflip interrupts */ 216 for (i = 0; i < adev->mode_info.num_crtc; i++) 217 amdgpu_irq_get(adev, &adev->pageflip_irq, i); 218 } 219 220 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev) 221 { 222 unsigned i; 223 224 /* Disable pflip interrupts */ 225 for (i = 0; i < adev->mode_info.num_crtc; i++) 226 amdgpu_irq_put(adev, &adev->pageflip_irq, i); 227 } 228 229 /** 230 * dce_v6_0_page_flip - pageflip callback. 231 * 232 * @adev: amdgpu_device pointer 233 * @crtc_id: crtc to cleanup pageflip on 234 * @crtc_base: new address of the crtc (GPU MC address) 235 * 236 * Does the actual pageflip (evergreen+). 237 * During vblank we take the crtc lock and wait for the update_pending 238 * bit to go high, when it does, we release the lock, and allow the 239 * double buffered update to take place. 240 * Returns the current update pending status. 241 */ 242 static void dce_v6_0_page_flip(struct amdgpu_device *adev, 243 int crtc_id, u64 crtc_base, bool async) 244 { 245 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 246 247 /* flip at hsync for async, default is vsync */ 248 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ? 249 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0); 250 /* update the scanout addresses */ 251 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 252 upper_32_bits(crtc_base)); 253 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 254 (u32)crtc_base); 255 256 /* post the write */ 257 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); 258 } 259 260 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 261 u32 *vbl, u32 *position) 262 { 263 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 264 return -EINVAL; 265 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); 266 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 267 268 return 0; 269 270 } 271 272 /** 273 * dce_v6_0_hpd_sense - hpd sense callback. 274 * 275 * @adev: amdgpu_device pointer 276 * @hpd: hpd (hotplug detect) pin 277 * 278 * Checks if a digital monitor is connected (evergreen+). 279 * Returns true if connected, false if not connected. 280 */ 281 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev, 282 enum amdgpu_hpd_id hpd) 283 { 284 bool connected = false; 285 286 if (hpd >= adev->mode_info.num_hpd) 287 return connected; 288 289 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK) 290 connected = true; 291 292 return connected; 293 } 294 295 /** 296 * dce_v6_0_hpd_set_polarity - hpd set polarity callback. 297 * 298 * @adev: amdgpu_device pointer 299 * @hpd: hpd (hotplug detect) pin 300 * 301 * Set the polarity of the hpd pin (evergreen+). 302 */ 303 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev, 304 enum amdgpu_hpd_id hpd) 305 { 306 u32 tmp; 307 bool connected = dce_v6_0_hpd_sense(adev, hpd); 308 309 if (hpd >= adev->mode_info.num_hpd) 310 return; 311 312 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 313 if (connected) 314 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 315 else 316 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 317 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 318 } 319 320 /** 321 * dce_v6_0_hpd_init - hpd setup callback. 322 * 323 * @adev: amdgpu_device pointer 324 * 325 * Setup the hpd pins used by the card (evergreen+). 326 * Enable the pin, set the polarity, and enable the hpd interrupts. 327 */ 328 static void dce_v6_0_hpd_init(struct amdgpu_device *adev) 329 { 330 struct drm_device *dev = adev->ddev; 331 struct drm_connector *connector; 332 u32 tmp; 333 334 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 335 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 336 337 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 338 continue; 339 340 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 341 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK; 342 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 343 344 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 345 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 346 /* don't try to enable hpd on eDP or LVDS avoid breaking the 347 * aux dp channel on imac and help (but not completely fix) 348 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 349 * also avoid interrupt storms during dpms. 350 */ 351 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 352 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 353 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 354 continue; 355 } 356 357 dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); 358 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 359 } 360 361 } 362 363 /** 364 * dce_v6_0_hpd_fini - hpd tear down callback. 365 * 366 * @adev: amdgpu_device pointer 367 * 368 * Tear down the hpd pins used by the card (evergreen+). 369 * Disable the hpd interrupts. 370 */ 371 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) 372 { 373 struct drm_device *dev = adev->ddev; 374 struct drm_connector *connector; 375 u32 tmp; 376 377 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 378 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 379 380 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 381 continue; 382 383 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 384 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK; 385 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0); 386 387 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 388 } 389 } 390 391 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev) 392 { 393 return mmDC_GPIO_HPD_A; 394 } 395 396 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev, 397 bool render) 398 { 399 if (!render) 400 WREG32(mmVGA_RENDER_CONTROL, 401 RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL); 402 403 } 404 405 static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev) 406 { 407 switch (adev->asic_type) { 408 case CHIP_TAHITI: 409 case CHIP_PITCAIRN: 410 case CHIP_VERDE: 411 return 6; 412 case CHIP_OLAND: 413 return 2; 414 default: 415 return 0; 416 } 417 } 418 419 void dce_v6_0_disable_dce(struct amdgpu_device *adev) 420 { 421 /*Disable VGA render and enabled crtc, if has DCE engine*/ 422 if (amdgpu_atombios_has_dce_engine_info(adev)) { 423 u32 tmp; 424 int crtc_enabled, i; 425 426 dce_v6_0_set_vga_render_state(adev, false); 427 428 /*Disable crtc*/ 429 for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) { 430 crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & 431 CRTC_CONTROL__CRTC_MASTER_EN_MASK; 432 if (crtc_enabled) { 433 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 434 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 435 tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK; 436 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); 437 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 438 } 439 } 440 } 441 } 442 443 static void dce_v6_0_program_fmt(struct drm_encoder *encoder) 444 { 445 446 struct drm_device *dev = encoder->dev; 447 struct amdgpu_device *adev = dev->dev_private; 448 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 449 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 450 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 451 int bpc = 0; 452 u32 tmp = 0; 453 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; 454 455 if (connector) { 456 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 457 bpc = amdgpu_connector_get_monitor_bpc(connector); 458 dither = amdgpu_connector->dither; 459 } 460 461 /* LVDS FMT is set up by atom */ 462 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) 463 return; 464 465 if (bpc == 0) 466 return; 467 468 469 switch (bpc) { 470 case 6: 471 if (dither == AMDGPU_FMT_DITHER_ENABLE) 472 /* XXX sort out optimal dither settings */ 473 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 474 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 475 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK); 476 else 477 tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK; 478 break; 479 case 8: 480 if (dither == AMDGPU_FMT_DITHER_ENABLE) 481 /* XXX sort out optimal dither settings */ 482 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 483 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 484 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK | 485 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | 486 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK); 487 else 488 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | 489 FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK); 490 break; 491 case 10: 492 default: 493 /* not needed */ 494 break; 495 } 496 497 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 498 } 499 500 /** 501 * cik_get_number_of_dram_channels - get the number of dram channels 502 * 503 * @adev: amdgpu_device pointer 504 * 505 * Look up the number of video ram channels (CIK). 506 * Used for display watermark bandwidth calculations 507 * Returns the number of dram channels 508 */ 509 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev) 510 { 511 u32 tmp = RREG32(mmMC_SHARED_CHMAP); 512 513 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { 514 case 0: 515 default: 516 return 1; 517 case 1: 518 return 2; 519 case 2: 520 return 4; 521 case 3: 522 return 8; 523 case 4: 524 return 3; 525 case 5: 526 return 6; 527 case 6: 528 return 10; 529 case 7: 530 return 12; 531 case 8: 532 return 16; 533 } 534 } 535 536 struct dce6_wm_params { 537 u32 dram_channels; /* number of dram channels */ 538 u32 yclk; /* bandwidth per dram data pin in kHz */ 539 u32 sclk; /* engine clock in kHz */ 540 u32 disp_clk; /* display clock in kHz */ 541 u32 src_width; /* viewport width */ 542 u32 active_time; /* active display time in ns */ 543 u32 blank_time; /* blank time in ns */ 544 bool interlaced; /* mode is interlaced */ 545 fixed20_12 vsc; /* vertical scale ratio */ 546 u32 num_heads; /* number of active crtcs */ 547 u32 bytes_per_pixel; /* bytes per pixel display + overlay */ 548 u32 lb_size; /* line buffer allocated to pipe */ 549 u32 vtaps; /* vertical scaler taps */ 550 }; 551 552 /** 553 * dce_v6_0_dram_bandwidth - get the dram bandwidth 554 * 555 * @wm: watermark calculation data 556 * 557 * Calculate the raw dram bandwidth (CIK). 558 * Used for display watermark bandwidth calculations 559 * Returns the dram bandwidth in MBytes/s 560 */ 561 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm) 562 { 563 /* Calculate raw DRAM Bandwidth */ 564 fixed20_12 dram_efficiency; /* 0.7 */ 565 fixed20_12 yclk, dram_channels, bandwidth; 566 fixed20_12 a; 567 568 a.full = dfixed_const(1000); 569 yclk.full = dfixed_const(wm->yclk); 570 yclk.full = dfixed_div(yclk, a); 571 dram_channels.full = dfixed_const(wm->dram_channels * 4); 572 a.full = dfixed_const(10); 573 dram_efficiency.full = dfixed_const(7); 574 dram_efficiency.full = dfixed_div(dram_efficiency, a); 575 bandwidth.full = dfixed_mul(dram_channels, yclk); 576 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); 577 578 return dfixed_trunc(bandwidth); 579 } 580 581 /** 582 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display 583 * 584 * @wm: watermark calculation data 585 * 586 * Calculate the dram bandwidth used for display (CIK). 587 * Used for display watermark bandwidth calculations 588 * Returns the dram bandwidth for display in MBytes/s 589 */ 590 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm) 591 { 592 /* Calculate DRAM Bandwidth and the part allocated to display. */ 593 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ 594 fixed20_12 yclk, dram_channels, bandwidth; 595 fixed20_12 a; 596 597 a.full = dfixed_const(1000); 598 yclk.full = dfixed_const(wm->yclk); 599 yclk.full = dfixed_div(yclk, a); 600 dram_channels.full = dfixed_const(wm->dram_channels * 4); 601 a.full = dfixed_const(10); 602 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ 603 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); 604 bandwidth.full = dfixed_mul(dram_channels, yclk); 605 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); 606 607 return dfixed_trunc(bandwidth); 608 } 609 610 /** 611 * dce_v6_0_data_return_bandwidth - get the data return bandwidth 612 * 613 * @wm: watermark calculation data 614 * 615 * Calculate the data return bandwidth used for display (CIK). 616 * Used for display watermark bandwidth calculations 617 * Returns the data return bandwidth in MBytes/s 618 */ 619 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm) 620 { 621 /* Calculate the display Data return Bandwidth */ 622 fixed20_12 return_efficiency; /* 0.8 */ 623 fixed20_12 sclk, bandwidth; 624 fixed20_12 a; 625 626 a.full = dfixed_const(1000); 627 sclk.full = dfixed_const(wm->sclk); 628 sclk.full = dfixed_div(sclk, a); 629 a.full = dfixed_const(10); 630 return_efficiency.full = dfixed_const(8); 631 return_efficiency.full = dfixed_div(return_efficiency, a); 632 a.full = dfixed_const(32); 633 bandwidth.full = dfixed_mul(a, sclk); 634 bandwidth.full = dfixed_mul(bandwidth, return_efficiency); 635 636 return dfixed_trunc(bandwidth); 637 } 638 639 /** 640 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth 641 * 642 * @wm: watermark calculation data 643 * 644 * Calculate the dmif bandwidth used for display (CIK). 645 * Used for display watermark bandwidth calculations 646 * Returns the dmif bandwidth in MBytes/s 647 */ 648 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm) 649 { 650 /* Calculate the DMIF Request Bandwidth */ 651 fixed20_12 disp_clk_request_efficiency; /* 0.8 */ 652 fixed20_12 disp_clk, bandwidth; 653 fixed20_12 a, b; 654 655 a.full = dfixed_const(1000); 656 disp_clk.full = dfixed_const(wm->disp_clk); 657 disp_clk.full = dfixed_div(disp_clk, a); 658 a.full = dfixed_const(32); 659 b.full = dfixed_mul(a, disp_clk); 660 661 a.full = dfixed_const(10); 662 disp_clk_request_efficiency.full = dfixed_const(8); 663 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); 664 665 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); 666 667 return dfixed_trunc(bandwidth); 668 } 669 670 /** 671 * dce_v6_0_available_bandwidth - get the min available bandwidth 672 * 673 * @wm: watermark calculation data 674 * 675 * Calculate the min available bandwidth used for display (CIK). 676 * Used for display watermark bandwidth calculations 677 * Returns the min available bandwidth in MBytes/s 678 */ 679 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm) 680 { 681 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ 682 u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm); 683 u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm); 684 u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm); 685 686 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); 687 } 688 689 /** 690 * dce_v6_0_average_bandwidth - get the average available bandwidth 691 * 692 * @wm: watermark calculation data 693 * 694 * Calculate the average available bandwidth used for display (CIK). 695 * Used for display watermark bandwidth calculations 696 * Returns the average available bandwidth in MBytes/s 697 */ 698 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm) 699 { 700 /* Calculate the display mode Average Bandwidth 701 * DisplayMode should contain the source and destination dimensions, 702 * timing, etc. 703 */ 704 fixed20_12 bpp; 705 fixed20_12 line_time; 706 fixed20_12 src_width; 707 fixed20_12 bandwidth; 708 fixed20_12 a; 709 710 a.full = dfixed_const(1000); 711 line_time.full = dfixed_const(wm->active_time + wm->blank_time); 712 line_time.full = dfixed_div(line_time, a); 713 bpp.full = dfixed_const(wm->bytes_per_pixel); 714 src_width.full = dfixed_const(wm->src_width); 715 bandwidth.full = dfixed_mul(src_width, bpp); 716 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); 717 bandwidth.full = dfixed_div(bandwidth, line_time); 718 719 return dfixed_trunc(bandwidth); 720 } 721 722 /** 723 * dce_v6_0_latency_watermark - get the latency watermark 724 * 725 * @wm: watermark calculation data 726 * 727 * Calculate the latency watermark (CIK). 728 * Used for display watermark bandwidth calculations 729 * Returns the latency watermark in ns 730 */ 731 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm) 732 { 733 /* First calculate the latency in ns */ 734 u32 mc_latency = 2000; /* 2000 ns. */ 735 u32 available_bandwidth = dce_v6_0_available_bandwidth(wm); 736 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; 737 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; 738 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ 739 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + 740 (wm->num_heads * cursor_line_pair_return_time); 741 u32 latency = mc_latency + other_heads_data_return_time + dc_latency; 742 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; 743 u32 tmp, dmif_size = 12288; 744 fixed20_12 a, b, c; 745 746 if (wm->num_heads == 0) 747 return 0; 748 749 a.full = dfixed_const(2); 750 b.full = dfixed_const(1); 751 if ((wm->vsc.full > a.full) || 752 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || 753 (wm->vtaps >= 5) || 754 ((wm->vsc.full >= a.full) && wm->interlaced)) 755 max_src_lines_per_dst_line = 4; 756 else 757 max_src_lines_per_dst_line = 2; 758 759 a.full = dfixed_const(available_bandwidth); 760 b.full = dfixed_const(wm->num_heads); 761 a.full = dfixed_div(a, b); 762 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); 763 tmp = min(dfixed_trunc(a), tmp); 764 765 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); 766 767 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 768 b.full = dfixed_const(1000); 769 c.full = dfixed_const(lb_fill_bw); 770 b.full = dfixed_div(c, b); 771 a.full = dfixed_div(a, b); 772 line_fill_time = dfixed_trunc(a); 773 774 if (line_fill_time < wm->active_time) 775 return latency; 776 else 777 return latency + (line_fill_time - wm->active_time); 778 779 } 780 781 /** 782 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check 783 * average and available dram bandwidth 784 * 785 * @wm: watermark calculation data 786 * 787 * Check if the display average bandwidth fits in the display 788 * dram bandwidth (CIK). 789 * Used for display watermark bandwidth calculations 790 * Returns true if the display fits, false if not. 791 */ 792 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm) 793 { 794 if (dce_v6_0_average_bandwidth(wm) <= 795 (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads)) 796 return true; 797 else 798 return false; 799 } 800 801 /** 802 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check 803 * average and available bandwidth 804 * 805 * @wm: watermark calculation data 806 * 807 * Check if the display average bandwidth fits in the display 808 * available bandwidth (CIK). 809 * Used for display watermark bandwidth calculations 810 * Returns true if the display fits, false if not. 811 */ 812 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm) 813 { 814 if (dce_v6_0_average_bandwidth(wm) <= 815 (dce_v6_0_available_bandwidth(wm) / wm->num_heads)) 816 return true; 817 else 818 return false; 819 } 820 821 /** 822 * dce_v6_0_check_latency_hiding - check latency hiding 823 * 824 * @wm: watermark calculation data 825 * 826 * Check latency hiding (CIK). 827 * Used for display watermark bandwidth calculations 828 * Returns true if the display fits, false if not. 829 */ 830 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm) 831 { 832 u32 lb_partitions = wm->lb_size / wm->src_width; 833 u32 line_time = wm->active_time + wm->blank_time; 834 u32 latency_tolerant_lines; 835 u32 latency_hiding; 836 fixed20_12 a; 837 838 a.full = dfixed_const(1); 839 if (wm->vsc.full > a.full) 840 latency_tolerant_lines = 1; 841 else { 842 if (lb_partitions <= (wm->vtaps + 1)) 843 latency_tolerant_lines = 1; 844 else 845 latency_tolerant_lines = 2; 846 } 847 848 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); 849 850 if (dce_v6_0_latency_watermark(wm) <= latency_hiding) 851 return true; 852 else 853 return false; 854 } 855 856 /** 857 * dce_v6_0_program_watermarks - program display watermarks 858 * 859 * @adev: amdgpu_device pointer 860 * @amdgpu_crtc: the selected display controller 861 * @lb_size: line buffer size 862 * @num_heads: number of display controllers in use 863 * 864 * Calculate and program the display watermarks for the 865 * selected display controller (CIK). 866 */ 867 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, 868 struct amdgpu_crtc *amdgpu_crtc, 869 u32 lb_size, u32 num_heads) 870 { 871 struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 872 struct dce6_wm_params wm_low, wm_high; 873 u32 dram_channels; 874 u32 active_time; 875 u32 line_time = 0; 876 u32 latency_watermark_a = 0, latency_watermark_b = 0; 877 u32 priority_a_mark = 0, priority_b_mark = 0; 878 u32 priority_a_cnt = PRIORITY_OFF; 879 u32 priority_b_cnt = PRIORITY_OFF; 880 u32 tmp, arb_control3, lb_vblank_lead_lines = 0; 881 fixed20_12 a, b, c; 882 883 if (amdgpu_crtc->base.enabled && num_heads && mode) { 884 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 885 (u32)mode->clock); 886 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 887 (u32)mode->clock); 888 line_time = min(line_time, (u32)65535); 889 priority_a_cnt = 0; 890 priority_b_cnt = 0; 891 892 dram_channels = si_get_number_of_dram_channels(adev); 893 894 /* watermark for high clocks */ 895 if (adev->pm.dpm_enabled) { 896 wm_high.yclk = 897 amdgpu_dpm_get_mclk(adev, false) * 10; 898 wm_high.sclk = 899 amdgpu_dpm_get_sclk(adev, false) * 10; 900 } else { 901 wm_high.yclk = adev->pm.current_mclk * 10; 902 wm_high.sclk = adev->pm.current_sclk * 10; 903 } 904 905 wm_high.disp_clk = mode->clock; 906 wm_high.src_width = mode->crtc_hdisplay; 907 wm_high.active_time = active_time; 908 wm_high.blank_time = line_time - wm_high.active_time; 909 wm_high.interlaced = false; 910 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 911 wm_high.interlaced = true; 912 wm_high.vsc = amdgpu_crtc->vsc; 913 wm_high.vtaps = 1; 914 if (amdgpu_crtc->rmx_type != RMX_OFF) 915 wm_high.vtaps = 2; 916 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ 917 wm_high.lb_size = lb_size; 918 wm_high.dram_channels = dram_channels; 919 wm_high.num_heads = num_heads; 920 921 if (adev->pm.dpm_enabled) { 922 /* watermark for low clocks */ 923 wm_low.yclk = 924 amdgpu_dpm_get_mclk(adev, true) * 10; 925 wm_low.sclk = 926 amdgpu_dpm_get_sclk(adev, true) * 10; 927 } else { 928 wm_low.yclk = adev->pm.current_mclk * 10; 929 wm_low.sclk = adev->pm.current_sclk * 10; 930 } 931 932 wm_low.disp_clk = mode->clock; 933 wm_low.src_width = mode->crtc_hdisplay; 934 wm_low.active_time = active_time; 935 wm_low.blank_time = line_time - wm_low.active_time; 936 wm_low.interlaced = false; 937 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 938 wm_low.interlaced = true; 939 wm_low.vsc = amdgpu_crtc->vsc; 940 wm_low.vtaps = 1; 941 if (amdgpu_crtc->rmx_type != RMX_OFF) 942 wm_low.vtaps = 2; 943 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ 944 wm_low.lb_size = lb_size; 945 wm_low.dram_channels = dram_channels; 946 wm_low.num_heads = num_heads; 947 948 /* set for high clocks */ 949 latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535); 950 /* set for low clocks */ 951 latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535); 952 953 /* possibly force display priority to high */ 954 /* should really do this at mode validation time... */ 955 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || 956 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) || 957 !dce_v6_0_check_latency_hiding(&wm_high) || 958 (adev->mode_info.disp_priority == 2)) { 959 DRM_DEBUG_KMS("force priority to high\n"); 960 priority_a_cnt |= PRIORITY_ALWAYS_ON; 961 priority_b_cnt |= PRIORITY_ALWAYS_ON; 962 } 963 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || 964 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) || 965 !dce_v6_0_check_latency_hiding(&wm_low) || 966 (adev->mode_info.disp_priority == 2)) { 967 DRM_DEBUG_KMS("force priority to high\n"); 968 priority_a_cnt |= PRIORITY_ALWAYS_ON; 969 priority_b_cnt |= PRIORITY_ALWAYS_ON; 970 } 971 972 a.full = dfixed_const(1000); 973 b.full = dfixed_const(mode->clock); 974 b.full = dfixed_div(b, a); 975 c.full = dfixed_const(latency_watermark_a); 976 c.full = dfixed_mul(c, b); 977 c.full = dfixed_mul(c, amdgpu_crtc->hsc); 978 c.full = dfixed_div(c, a); 979 a.full = dfixed_const(16); 980 c.full = dfixed_div(c, a); 981 priority_a_mark = dfixed_trunc(c); 982 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; 983 984 a.full = dfixed_const(1000); 985 b.full = dfixed_const(mode->clock); 986 b.full = dfixed_div(b, a); 987 c.full = dfixed_const(latency_watermark_b); 988 c.full = dfixed_mul(c, b); 989 c.full = dfixed_mul(c, amdgpu_crtc->hsc); 990 c.full = dfixed_div(c, a); 991 a.full = dfixed_const(16); 992 c.full = dfixed_div(c, a); 993 priority_b_mark = dfixed_trunc(c); 994 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; 995 996 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); 997 } 998 999 /* select wm A */ 1000 arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); 1001 tmp = arb_control3; 1002 tmp &= ~LATENCY_WATERMARK_MASK(3); 1003 tmp |= LATENCY_WATERMARK_MASK(1); 1004 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); 1005 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 1006 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 1007 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 1008 /* select wm B */ 1009 tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); 1010 tmp &= ~LATENCY_WATERMARK_MASK(3); 1011 tmp |= LATENCY_WATERMARK_MASK(2); 1012 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); 1013 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 1014 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 1015 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 1016 /* restore original selection */ 1017 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3); 1018 1019 /* write the priority marks */ 1020 WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt); 1021 WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt); 1022 1023 /* save values for DPM */ 1024 amdgpu_crtc->line_time = line_time; 1025 amdgpu_crtc->wm_high = latency_watermark_a; 1026 1027 /* Save number of lines the linebuffer leads before the scanout */ 1028 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; 1029 } 1030 1031 /* watermark setup */ 1032 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev, 1033 struct amdgpu_crtc *amdgpu_crtc, 1034 struct drm_display_mode *mode, 1035 struct drm_display_mode *other_mode) 1036 { 1037 u32 tmp, buffer_alloc, i; 1038 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8; 1039 /* 1040 * Line Buffer Setup 1041 * There are 3 line buffers, each one shared by 2 display controllers. 1042 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between 1043 * the display controllers. The paritioning is done via one of four 1044 * preset allocations specified in bits 21:20: 1045 * 0 - half lb 1046 * 2 - whole lb, other crtc must be disabled 1047 */ 1048 /* this can get tricky if we have two large displays on a paired group 1049 * of crtcs. Ideally for multiple large displays we'd assign them to 1050 * non-linked crtcs for maximum line buffer allocation. 1051 */ 1052 if (amdgpu_crtc->base.enabled && mode) { 1053 if (other_mode) { 1054 tmp = 0; /* 1/2 */ 1055 buffer_alloc = 1; 1056 } else { 1057 tmp = 2; /* whole */ 1058 buffer_alloc = 2; 1059 } 1060 } else { 1061 tmp = 0; 1062 buffer_alloc = 0; 1063 } 1064 1065 WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset, 1066 DC_LB_MEMORY_CONFIG(tmp)); 1067 1068 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, 1069 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT)); 1070 for (i = 0; i < adev->usec_timeout; i++) { 1071 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & 1072 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK) 1073 break; 1074 udelay(1); 1075 } 1076 1077 if (amdgpu_crtc->base.enabled && mode) { 1078 switch (tmp) { 1079 case 0: 1080 default: 1081 return 4096 * 2; 1082 case 2: 1083 return 8192 * 2; 1084 } 1085 } 1086 1087 /* controller not enabled, so no lb used */ 1088 return 0; 1089 } 1090 1091 1092 /** 1093 * 1094 * dce_v6_0_bandwidth_update - program display watermarks 1095 * 1096 * @adev: amdgpu_device pointer 1097 * 1098 * Calculate and program the display watermarks and line 1099 * buffer allocation (CIK). 1100 */ 1101 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev) 1102 { 1103 struct drm_display_mode *mode0 = NULL; 1104 struct drm_display_mode *mode1 = NULL; 1105 u32 num_heads = 0, lb_size; 1106 int i; 1107 1108 if (!adev->mode_info.mode_config_initialized) 1109 return; 1110 1111 amdgpu_update_display_priority(adev); 1112 1113 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1114 if (adev->mode_info.crtcs[i]->base.enabled) 1115 num_heads++; 1116 } 1117 for (i = 0; i < adev->mode_info.num_crtc; i += 2) { 1118 mode0 = &adev->mode_info.crtcs[i]->base.mode; 1119 mode1 = &adev->mode_info.crtcs[i+1]->base.mode; 1120 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1); 1121 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads); 1122 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0); 1123 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads); 1124 } 1125 } 1126 1127 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev) 1128 { 1129 int i; 1130 u32 tmp; 1131 1132 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1133 tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset, 1134 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); 1135 if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT, 1136 PORT_CONNECTIVITY)) 1137 adev->mode_info.audio.pin[i].connected = false; 1138 else 1139 adev->mode_info.audio.pin[i].connected = true; 1140 } 1141 1142 } 1143 1144 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev) 1145 { 1146 int i; 1147 1148 dce_v6_0_audio_get_connected_pins(adev); 1149 1150 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1151 if (adev->mode_info.audio.pin[i].connected) 1152 return &adev->mode_info.audio.pin[i]; 1153 } 1154 DRM_ERROR("No connected audio pins found!\n"); 1155 return NULL; 1156 } 1157 1158 static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder) 1159 { 1160 struct amdgpu_device *adev = encoder->dev->dev_private; 1161 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1162 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1163 1164 if (!dig || !dig->afmt || !dig->afmt->pin) 1165 return; 1166 1167 WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, 1168 REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, 1169 dig->afmt->pin->id)); 1170 } 1171 1172 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, 1173 struct drm_display_mode *mode) 1174 { 1175 struct amdgpu_device *adev = encoder->dev->dev_private; 1176 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1177 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1178 struct drm_connector *connector; 1179 struct amdgpu_connector *amdgpu_connector = NULL; 1180 int interlace = 0; 1181 u32 tmp; 1182 1183 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1184 if (connector->encoder == encoder) { 1185 amdgpu_connector = to_amdgpu_connector(connector); 1186 break; 1187 } 1188 } 1189 1190 if (!amdgpu_connector) { 1191 DRM_ERROR("Couldn't find encoder's connector\n"); 1192 return; 1193 } 1194 1195 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1196 interlace = 1; 1197 1198 if (connector->latency_present[interlace]) { 1199 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1200 VIDEO_LIPSYNC, connector->video_latency[interlace]); 1201 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1202 AUDIO_LIPSYNC, connector->audio_latency[interlace]); 1203 } else { 1204 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1205 VIDEO_LIPSYNC, 0); 1206 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1207 AUDIO_LIPSYNC, 0); 1208 } 1209 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1210 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 1211 } 1212 1213 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) 1214 { 1215 struct amdgpu_device *adev = encoder->dev->dev_private; 1216 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1217 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1218 struct drm_connector *connector; 1219 struct amdgpu_connector *amdgpu_connector = NULL; 1220 u8 *sadb = NULL; 1221 int sad_count; 1222 u32 tmp; 1223 1224 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1225 if (connector->encoder == encoder) { 1226 amdgpu_connector = to_amdgpu_connector(connector); 1227 break; 1228 } 1229 } 1230 1231 if (!amdgpu_connector) { 1232 DRM_ERROR("Couldn't find encoder's connector\n"); 1233 return; 1234 } 1235 1236 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb); 1237 if (sad_count < 0) { 1238 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 1239 sad_count = 0; 1240 } 1241 1242 /* program the speaker allocation */ 1243 tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1244 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 1245 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1246 HDMI_CONNECTION, 0); 1247 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1248 DP_CONNECTION, 0); 1249 1250 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) 1251 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1252 DP_CONNECTION, 1); 1253 else 1254 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1255 HDMI_CONNECTION, 1); 1256 1257 if (sad_count) 1258 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1259 SPEAKER_ALLOCATION, sadb[0]); 1260 else 1261 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1262 SPEAKER_ALLOCATION, 5); /* stereo */ 1263 1264 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1265 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 1266 1267 kfree(sadb); 1268 } 1269 1270 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) 1271 { 1272 struct amdgpu_device *adev = encoder->dev->dev_private; 1273 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1274 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1275 struct drm_connector *connector; 1276 struct amdgpu_connector *amdgpu_connector = NULL; 1277 struct cea_sad *sads; 1278 int i, sad_count; 1279 1280 static const u16 eld_reg_to_type[][2] = { 1281 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, 1282 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, 1283 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, 1284 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, 1285 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, 1286 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, 1287 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, 1288 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, 1289 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, 1290 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, 1291 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, 1292 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 1293 }; 1294 1295 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1296 if (connector->encoder == encoder) { 1297 amdgpu_connector = to_amdgpu_connector(connector); 1298 break; 1299 } 1300 } 1301 1302 if (!amdgpu_connector) { 1303 DRM_ERROR("Couldn't find encoder's connector\n"); 1304 return; 1305 } 1306 1307 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); 1308 if (sad_count <= 0) { 1309 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 1310 return; 1311 } 1312 1313 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 1314 u32 tmp = 0; 1315 u8 stereo_freqs = 0; 1316 int max_channels = -1; 1317 int j; 1318 1319 for (j = 0; j < sad_count; j++) { 1320 struct cea_sad *sad = &sads[j]; 1321 1322 if (sad->format == eld_reg_to_type[i][1]) { 1323 if (sad->channels > max_channels) { 1324 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1325 MAX_CHANNELS, sad->channels); 1326 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1327 DESCRIPTOR_BYTE_2, sad->byte2); 1328 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1329 SUPPORTED_FREQUENCIES, sad->freq); 1330 max_channels = sad->channels; 1331 } 1332 1333 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 1334 stereo_freqs |= sad->freq; 1335 else 1336 break; 1337 } 1338 } 1339 1340 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1341 SUPPORTED_FREQUENCIES_STEREO, stereo_freqs); 1342 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp); 1343 } 1344 1345 kfree(sads); 1346 1347 } 1348 1349 static void dce_v6_0_audio_enable(struct amdgpu_device *adev, 1350 struct amdgpu_audio_pin *pin, 1351 bool enable) 1352 { 1353 if (!pin) 1354 return; 1355 1356 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 1357 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); 1358 } 1359 1360 static const u32 pin_offsets[7] = 1361 { 1362 (0x1780 - 0x1780), 1363 (0x1786 - 0x1780), 1364 (0x178c - 0x1780), 1365 (0x1792 - 0x1780), 1366 (0x1798 - 0x1780), 1367 (0x179d - 0x1780), 1368 (0x17a4 - 0x1780), 1369 }; 1370 1371 static int dce_v6_0_audio_init(struct amdgpu_device *adev) 1372 { 1373 int i; 1374 1375 if (!amdgpu_audio) 1376 return 0; 1377 1378 adev->mode_info.audio.enabled = true; 1379 1380 switch (adev->asic_type) { 1381 case CHIP_TAHITI: 1382 case CHIP_PITCAIRN: 1383 case CHIP_VERDE: 1384 default: 1385 adev->mode_info.audio.num_pins = 6; 1386 break; 1387 case CHIP_OLAND: 1388 adev->mode_info.audio.num_pins = 2; 1389 break; 1390 } 1391 1392 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1393 adev->mode_info.audio.pin[i].channels = -1; 1394 adev->mode_info.audio.pin[i].rate = -1; 1395 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1396 adev->mode_info.audio.pin[i].status_bits = 0; 1397 adev->mode_info.audio.pin[i].category_code = 0; 1398 adev->mode_info.audio.pin[i].connected = false; 1399 adev->mode_info.audio.pin[i].offset = pin_offsets[i]; 1400 adev->mode_info.audio.pin[i].id = i; 1401 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1402 } 1403 1404 return 0; 1405 } 1406 1407 static void dce_v6_0_audio_fini(struct amdgpu_device *adev) 1408 { 1409 int i; 1410 1411 if (!amdgpu_audio) 1412 return; 1413 1414 if (!adev->mode_info.audio.enabled) 1415 return; 1416 1417 for (i = 0; i < adev->mode_info.audio.num_pins; i++) 1418 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1419 1420 adev->mode_info.audio.enabled = false; 1421 } 1422 1423 static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder) 1424 { 1425 struct drm_device *dev = encoder->dev; 1426 struct amdgpu_device *adev = dev->dev_private; 1427 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1428 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1429 u32 tmp; 1430 1431 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); 1432 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); 1433 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); 1434 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); 1435 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); 1436 } 1437 1438 static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder, 1439 uint32_t clock, int bpc) 1440 { 1441 struct drm_device *dev = encoder->dev; 1442 struct amdgpu_device *adev = dev->dev_private; 1443 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); 1444 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1445 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1446 u32 tmp; 1447 1448 tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset); 1449 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1); 1450 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1451 bpc > 8 ? 0 : 1); 1452 WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp); 1453 1454 tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset); 1455 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz); 1456 WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp); 1457 tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset); 1458 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz); 1459 WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp); 1460 1461 tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset); 1462 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz); 1463 WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp); 1464 tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset); 1465 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz); 1466 WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp); 1467 1468 tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset); 1469 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz); 1470 WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp); 1471 tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset); 1472 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz); 1473 WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp); 1474 } 1475 1476 static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder, 1477 struct drm_display_mode *mode) 1478 { 1479 struct drm_device *dev = encoder->dev; 1480 struct amdgpu_device *adev = dev->dev_private; 1481 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1482 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1483 struct hdmi_avi_infoframe frame; 1484 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 1485 uint8_t *payload = buffer + 3; 1486 uint8_t *header = buffer; 1487 ssize_t err; 1488 u32 tmp; 1489 1490 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); 1491 if (err < 0) { 1492 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1493 return; 1494 } 1495 1496 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); 1497 if (err < 0) { 1498 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); 1499 return; 1500 } 1501 1502 WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset, 1503 payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24)); 1504 WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset, 1505 payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24)); 1506 WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset, 1507 payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24)); 1508 WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset, 1509 payload[0xC] | (payload[0xD] << 8) | (header[1] << 24)); 1510 1511 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1512 /* anything other than 0 */ 1513 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, 1514 HDMI_AUDIO_INFO_LINE, 2); 1515 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1516 } 1517 1518 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) 1519 { 1520 struct drm_device *dev = encoder->dev; 1521 struct amdgpu_device *adev = dev->dev_private; 1522 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1523 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); 1524 u32 tmp; 1525 1526 /* 1527 * Two dtos: generally use dto0 for hdmi, dto1 for dp. 1528 * Express [24MHz / target pixel clock] as an exact rational 1529 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 1530 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 1531 */ 1532 tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE); 1533 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, 1534 DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id); 1535 if (em == ATOM_ENCODER_MODE_HDMI) { 1536 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, 1537 DCCG_AUDIO_DTO_SEL, 0); 1538 } else if (ENCODER_MODE_IS_DP(em)) { 1539 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, 1540 DCCG_AUDIO_DTO_SEL, 1); 1541 } 1542 WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp); 1543 if (em == ATOM_ENCODER_MODE_HDMI) { 1544 WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000); 1545 WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock); 1546 } else if (ENCODER_MODE_IS_DP(em)) { 1547 WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000); 1548 WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock); 1549 } 1550 } 1551 1552 static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder) 1553 { 1554 struct drm_device *dev = encoder->dev; 1555 struct amdgpu_device *adev = dev->dev_private; 1556 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1557 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1558 u32 tmp; 1559 1560 tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset); 1561 tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); 1562 WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1563 1564 tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset); 1565 tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1); 1566 WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp); 1567 1568 tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset); 1569 tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); 1570 WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp); 1571 1572 tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset); 1573 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3); 1574 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4); 1575 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5); 1576 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6); 1577 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7); 1578 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); 1579 WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp); 1580 1581 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset); 1582 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff); 1583 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp); 1584 1585 tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1586 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); 1587 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3); 1588 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1589 1590 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1591 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1); 1592 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); 1593 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1594 } 1595 1596 static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute) 1597 { 1598 struct drm_device *dev = encoder->dev; 1599 struct amdgpu_device *adev = dev->dev_private; 1600 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1601 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1602 u32 tmp; 1603 1604 tmp = RREG32(mmHDMI_GC + dig->afmt->offset); 1605 tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0); 1606 WREG32(mmHDMI_GC + dig->afmt->offset, tmp); 1607 } 1608 1609 static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable) 1610 { 1611 struct drm_device *dev = encoder->dev; 1612 struct amdgpu_device *adev = dev->dev_private; 1613 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1614 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1615 u32 tmp; 1616 1617 if (enable) { 1618 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1619 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1); 1620 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1); 1621 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); 1622 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1); 1623 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1624 1625 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1626 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2); 1627 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1628 1629 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1630 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); 1631 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1632 } else { 1633 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1634 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0); 1635 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0); 1636 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0); 1637 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0); 1638 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1639 1640 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1641 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0); 1642 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1643 } 1644 } 1645 1646 static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable) 1647 { 1648 struct drm_device *dev = encoder->dev; 1649 struct amdgpu_device *adev = dev->dev_private; 1650 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1651 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1652 u32 tmp; 1653 1654 if (enable) { 1655 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1656 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); 1657 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1658 1659 tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset); 1660 tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1); 1661 WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp); 1662 1663 tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset); 1664 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1); 1665 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1); 1666 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1); 1667 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); 1668 WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp); 1669 } else { 1670 WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0); 1671 } 1672 } 1673 1674 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, 1675 struct drm_display_mode *mode) 1676 { 1677 struct drm_device *dev = encoder->dev; 1678 struct amdgpu_device *adev = dev->dev_private; 1679 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1680 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1681 struct drm_connector *connector; 1682 struct amdgpu_connector *amdgpu_connector = NULL; 1683 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); 1684 int bpc = 8; 1685 1686 if (!dig || !dig->afmt) 1687 return; 1688 1689 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1690 if (connector->encoder == encoder) { 1691 amdgpu_connector = to_amdgpu_connector(connector); 1692 break; 1693 } 1694 } 1695 1696 if (!amdgpu_connector) { 1697 DRM_ERROR("Couldn't find encoder's connector\n"); 1698 return; 1699 } 1700 1701 if (!dig->afmt->enabled) 1702 return; 1703 1704 dig->afmt->pin = dce_v6_0_audio_get_pin(adev); 1705 if (!dig->afmt->pin) 1706 return; 1707 1708 if (encoder->crtc) { 1709 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1710 bpc = amdgpu_crtc->bpc; 1711 } 1712 1713 /* disable audio before setting up hw */ 1714 dce_v6_0_audio_enable(adev, dig->afmt->pin, false); 1715 1716 dce_v6_0_audio_set_mute(encoder, true); 1717 dce_v6_0_audio_write_speaker_allocation(encoder); 1718 dce_v6_0_audio_write_sad_regs(encoder); 1719 dce_v6_0_audio_write_latency_fields(encoder, mode); 1720 if (em == ATOM_ENCODER_MODE_HDMI) { 1721 dce_v6_0_audio_set_dto(encoder, mode->clock); 1722 dce_v6_0_audio_set_vbi_packet(encoder); 1723 dce_v6_0_audio_set_acr(encoder, mode->clock, bpc); 1724 } else if (ENCODER_MODE_IS_DP(em)) { 1725 dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10); 1726 } 1727 dce_v6_0_audio_set_packet(encoder); 1728 dce_v6_0_audio_select_pin(encoder); 1729 dce_v6_0_audio_set_avi_infoframe(encoder, mode); 1730 dce_v6_0_audio_set_mute(encoder, false); 1731 if (em == ATOM_ENCODER_MODE_HDMI) { 1732 dce_v6_0_audio_hdmi_enable(encoder, 1); 1733 } else if (ENCODER_MODE_IS_DP(em)) { 1734 dce_v6_0_audio_dp_enable(encoder, 1); 1735 } 1736 1737 /* enable audio after setting up hw */ 1738 dce_v6_0_audio_enable(adev, dig->afmt->pin, true); 1739 } 1740 1741 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable) 1742 { 1743 struct drm_device *dev = encoder->dev; 1744 struct amdgpu_device *adev = dev->dev_private; 1745 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1746 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1747 1748 if (!dig || !dig->afmt) 1749 return; 1750 1751 /* Silent, r600_hdmi_enable will raise WARN for us */ 1752 if (enable && dig->afmt->enabled) 1753 return; 1754 1755 if (!enable && !dig->afmt->enabled) 1756 return; 1757 1758 if (!enable && dig->afmt->pin) { 1759 dce_v6_0_audio_enable(adev, dig->afmt->pin, false); 1760 dig->afmt->pin = NULL; 1761 } 1762 1763 dig->afmt->enabled = enable; 1764 1765 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", 1766 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); 1767 } 1768 1769 static int dce_v6_0_afmt_init(struct amdgpu_device *adev) 1770 { 1771 int i, j; 1772 1773 for (i = 0; i < adev->mode_info.num_dig; i++) 1774 adev->mode_info.afmt[i] = NULL; 1775 1776 /* DCE6 has audio blocks tied to DIG encoders */ 1777 for (i = 0; i < adev->mode_info.num_dig; i++) { 1778 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); 1779 if (adev->mode_info.afmt[i]) { 1780 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1781 adev->mode_info.afmt[i]->id = i; 1782 } else { 1783 for (j = 0; j < i; j++) { 1784 kfree(adev->mode_info.afmt[j]); 1785 adev->mode_info.afmt[j] = NULL; 1786 } 1787 DRM_ERROR("Out of memory allocating afmt table\n"); 1788 return -ENOMEM; 1789 } 1790 } 1791 return 0; 1792 } 1793 1794 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev) 1795 { 1796 int i; 1797 1798 for (i = 0; i < adev->mode_info.num_dig; i++) { 1799 kfree(adev->mode_info.afmt[i]); 1800 adev->mode_info.afmt[i] = NULL; 1801 } 1802 } 1803 1804 static const u32 vga_control_regs[6] = 1805 { 1806 mmD1VGA_CONTROL, 1807 mmD2VGA_CONTROL, 1808 mmD3VGA_CONTROL, 1809 mmD4VGA_CONTROL, 1810 mmD5VGA_CONTROL, 1811 mmD6VGA_CONTROL, 1812 }; 1813 1814 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable) 1815 { 1816 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1817 struct drm_device *dev = crtc->dev; 1818 struct amdgpu_device *adev = dev->dev_private; 1819 u32 vga_control; 1820 1821 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; 1822 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0)); 1823 } 1824 1825 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable) 1826 { 1827 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1828 struct drm_device *dev = crtc->dev; 1829 struct amdgpu_device *adev = dev->dev_private; 1830 1831 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0); 1832 } 1833 1834 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, 1835 struct drm_framebuffer *fb, 1836 int x, int y, int atomic) 1837 { 1838 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1839 struct drm_device *dev = crtc->dev; 1840 struct amdgpu_device *adev = dev->dev_private; 1841 struct amdgpu_framebuffer *amdgpu_fb; 1842 struct drm_framebuffer *target_fb; 1843 struct drm_gem_object *obj; 1844 struct amdgpu_bo *abo; 1845 uint64_t fb_location, tiling_flags; 1846 uint32_t fb_format, fb_pitch_pixels, pipe_config; 1847 u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE); 1848 u32 viewport_w, viewport_h; 1849 int r; 1850 bool bypass_lut = false; 1851 struct drm_format_name_buf format_name; 1852 1853 /* no fb bound */ 1854 if (!atomic && !crtc->primary->fb) { 1855 DRM_DEBUG_KMS("No FB bound\n"); 1856 return 0; 1857 } 1858 1859 if (atomic) { 1860 amdgpu_fb = to_amdgpu_framebuffer(fb); 1861 target_fb = fb; 1862 } else { 1863 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 1864 target_fb = crtc->primary->fb; 1865 } 1866 1867 /* If atomic, assume fb object is pinned & idle & fenced and 1868 * just update base pointers 1869 */ 1870 obj = amdgpu_fb->obj; 1871 abo = gem_to_amdgpu_bo(obj); 1872 r = amdgpu_bo_reserve(abo, false); 1873 if (unlikely(r != 0)) 1874 return r; 1875 1876 if (atomic) { 1877 fb_location = amdgpu_bo_gpu_offset(abo); 1878 } else { 1879 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); 1880 if (unlikely(r != 0)) { 1881 amdgpu_bo_unreserve(abo); 1882 return -EINVAL; 1883 } 1884 } 1885 1886 amdgpu_bo_get_tiling_flags(abo, &tiling_flags); 1887 amdgpu_bo_unreserve(abo); 1888 1889 switch (target_fb->format->format) { 1890 case DRM_FORMAT_C8: 1891 fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) | 1892 GRPH_FORMAT(GRPH_FORMAT_INDEXED)); 1893 break; 1894 case DRM_FORMAT_XRGB4444: 1895 case DRM_FORMAT_ARGB4444: 1896 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | 1897 GRPH_FORMAT(GRPH_FORMAT_ARGB4444)); 1898 #ifdef __BIG_ENDIAN 1899 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); 1900 #endif 1901 break; 1902 case DRM_FORMAT_XRGB1555: 1903 case DRM_FORMAT_ARGB1555: 1904 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | 1905 GRPH_FORMAT(GRPH_FORMAT_ARGB1555)); 1906 #ifdef __BIG_ENDIAN 1907 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); 1908 #endif 1909 break; 1910 case DRM_FORMAT_BGRX5551: 1911 case DRM_FORMAT_BGRA5551: 1912 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | 1913 GRPH_FORMAT(GRPH_FORMAT_BGRA5551)); 1914 #ifdef __BIG_ENDIAN 1915 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); 1916 #endif 1917 break; 1918 case DRM_FORMAT_RGB565: 1919 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | 1920 GRPH_FORMAT(GRPH_FORMAT_ARGB565)); 1921 #ifdef __BIG_ENDIAN 1922 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); 1923 #endif 1924 break; 1925 case DRM_FORMAT_XRGB8888: 1926 case DRM_FORMAT_ARGB8888: 1927 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | 1928 GRPH_FORMAT(GRPH_FORMAT_ARGB8888)); 1929 #ifdef __BIG_ENDIAN 1930 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); 1931 #endif 1932 break; 1933 case DRM_FORMAT_XRGB2101010: 1934 case DRM_FORMAT_ARGB2101010: 1935 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | 1936 GRPH_FORMAT(GRPH_FORMAT_ARGB2101010)); 1937 #ifdef __BIG_ENDIAN 1938 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); 1939 #endif 1940 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 1941 bypass_lut = true; 1942 break; 1943 case DRM_FORMAT_BGRX1010102: 1944 case DRM_FORMAT_BGRA1010102: 1945 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | 1946 GRPH_FORMAT(GRPH_FORMAT_BGRA1010102)); 1947 #ifdef __BIG_ENDIAN 1948 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); 1949 #endif 1950 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 1951 bypass_lut = true; 1952 break; 1953 default: 1954 DRM_ERROR("Unsupported screen format %s\n", 1955 drm_get_format_name(target_fb->format->format, &format_name)); 1956 return -EINVAL; 1957 } 1958 1959 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { 1960 unsigned bankw, bankh, mtaspect, tile_split, num_banks; 1961 1962 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 1963 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 1964 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 1965 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 1966 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 1967 1968 fb_format |= GRPH_NUM_BANKS(num_banks); 1969 fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1); 1970 fb_format |= GRPH_TILE_SPLIT(tile_split); 1971 fb_format |= GRPH_BANK_WIDTH(bankw); 1972 fb_format |= GRPH_BANK_HEIGHT(bankh); 1973 fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect); 1974 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { 1975 fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1); 1976 } 1977 1978 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 1979 fb_format |= GRPH_PIPE_CONFIG(pipe_config); 1980 1981 dce_v6_0_vga_enable(crtc, false); 1982 1983 /* Make sure surface address is updated at vertical blank rather than 1984 * horizontal blank 1985 */ 1986 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0); 1987 1988 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 1989 upper_32_bits(fb_location)); 1990 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 1991 upper_32_bits(fb_location)); 1992 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 1993 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 1994 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 1995 (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 1996 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 1997 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); 1998 1999 /* 2000 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT 2001 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to 2002 * retain the full precision throughout the pipeline. 2003 */ 2004 WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, 2005 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0), 2006 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK); 2007 2008 if (bypass_lut) 2009 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); 2010 2011 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); 2012 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); 2013 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); 2014 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); 2015 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); 2016 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); 2017 2018 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; 2019 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); 2020 2021 dce_v6_0_grph_enable(crtc, true); 2022 2023 WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, 2024 target_fb->height); 2025 x &= ~3; 2026 y &= ~1; 2027 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, 2028 (x << 16) | y); 2029 viewport_w = crtc->mode.hdisplay; 2030 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 2031 2032 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, 2033 (viewport_w << 16) | viewport_h); 2034 2035 /* set pageflip to happen anywhere in vblank interval */ 2036 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); 2037 2038 if (!atomic && fb && fb != crtc->primary->fb) { 2039 amdgpu_fb = to_amdgpu_framebuffer(fb); 2040 abo = gem_to_amdgpu_bo(amdgpu_fb->obj); 2041 r = amdgpu_bo_reserve(abo, true); 2042 if (unlikely(r != 0)) 2043 return r; 2044 amdgpu_bo_unpin(abo); 2045 amdgpu_bo_unreserve(abo); 2046 } 2047 2048 /* Bytes per pixel may have changed */ 2049 dce_v6_0_bandwidth_update(adev); 2050 2051 return 0; 2052 2053 } 2054 2055 static void dce_v6_0_set_interleave(struct drm_crtc *crtc, 2056 struct drm_display_mode *mode) 2057 { 2058 struct drm_device *dev = crtc->dev; 2059 struct amdgpu_device *adev = dev->dev_private; 2060 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2061 2062 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 2063 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 2064 INTERLEAVE_EN); 2065 else 2066 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0); 2067 } 2068 2069 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc) 2070 { 2071 2072 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2073 struct drm_device *dev = crtc->dev; 2074 struct amdgpu_device *adev = dev->dev_private; 2075 u16 *r, *g, *b; 2076 int i; 2077 2078 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); 2079 2080 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 2081 ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) | 2082 (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT))); 2083 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, 2084 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK); 2085 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, 2086 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK); 2087 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2088 ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) | 2089 (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT))); 2090 2091 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); 2092 2093 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); 2094 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); 2095 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); 2096 2097 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); 2098 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); 2099 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); 2100 2101 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); 2102 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); 2103 2104 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); 2105 r = crtc->gamma_store; 2106 g = r + crtc->gamma_size; 2107 b = g + crtc->gamma_size; 2108 for (i = 0; i < 256; i++) { 2109 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, 2110 ((*r++ & 0xffc0) << 14) | 2111 ((*g++ & 0xffc0) << 4) | 2112 (*b++ >> 6)); 2113 } 2114 2115 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2116 ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) | 2117 (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) | 2118 ICON_DEGAMMA_MODE(0) | 2119 (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT))); 2120 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, 2121 ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) | 2122 (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT))); 2123 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2124 ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) | 2125 (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT))); 2126 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 2127 ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) | 2128 (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT))); 2129 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 2130 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0); 2131 2132 2133 } 2134 2135 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder) 2136 { 2137 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 2138 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 2139 2140 switch (amdgpu_encoder->encoder_id) { 2141 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2142 return dig->linkb ? 1 : 0; 2143 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2144 return dig->linkb ? 3 : 2; 2145 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2146 return dig->linkb ? 5 : 4; 2147 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2148 return 6; 2149 default: 2150 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); 2151 return 0; 2152 } 2153 } 2154 2155 /** 2156 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc. 2157 * 2158 * @crtc: drm crtc 2159 * 2160 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors 2161 * a single PPLL can be used for all DP crtcs/encoders. For non-DP 2162 * monitors a dedicated PPLL must be used. If a particular board has 2163 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming 2164 * as there is no need to program the PLL itself. If we are not able to 2165 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to 2166 * avoid messing up an existing monitor. 2167 * 2168 * 2169 */ 2170 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc) 2171 { 2172 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2173 struct drm_device *dev = crtc->dev; 2174 struct amdgpu_device *adev = dev->dev_private; 2175 u32 pll_in_use; 2176 int pll; 2177 2178 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { 2179 if (adev->clock.dp_extclk) 2180 /* skip PPLL programming if using ext clock */ 2181 return ATOM_PPLL_INVALID; 2182 else 2183 return ATOM_PPLL0; 2184 } else { 2185 /* use the same PPLL for all monitors with the same clock */ 2186 pll = amdgpu_pll_get_shared_nondp_ppll(crtc); 2187 if (pll != ATOM_PPLL_INVALID) 2188 return pll; 2189 } 2190 2191 /* PPLL1, and PPLL2 */ 2192 pll_in_use = amdgpu_pll_get_use_mask(crtc); 2193 if (!(pll_in_use & (1 << ATOM_PPLL2))) 2194 return ATOM_PPLL2; 2195 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2196 return ATOM_PPLL1; 2197 DRM_ERROR("unable to allocate a PPLL\n"); 2198 return ATOM_PPLL_INVALID; 2199 } 2200 2201 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock) 2202 { 2203 struct amdgpu_device *adev = crtc->dev->dev_private; 2204 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2205 uint32_t cur_lock; 2206 2207 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); 2208 if (lock) 2209 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 2210 else 2211 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 2212 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); 2213 } 2214 2215 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc) 2216 { 2217 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2218 struct amdgpu_device *adev = crtc->dev->dev_private; 2219 2220 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 2221 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 2222 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 2223 2224 2225 } 2226 2227 static void dce_v6_0_show_cursor(struct drm_crtc *crtc) 2228 { 2229 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2230 struct amdgpu_device *adev = crtc->dev->dev_private; 2231 2232 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2233 upper_32_bits(amdgpu_crtc->cursor_addr)); 2234 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2235 lower_32_bits(amdgpu_crtc->cursor_addr)); 2236 2237 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 2238 CUR_CONTROL__CURSOR_EN_MASK | 2239 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 2240 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 2241 2242 } 2243 2244 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, 2245 int x, int y) 2246 { 2247 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2248 struct amdgpu_device *adev = crtc->dev->dev_private; 2249 int xorigin = 0, yorigin = 0; 2250 2251 int w = amdgpu_crtc->cursor_width; 2252 2253 amdgpu_crtc->cursor_x = x; 2254 amdgpu_crtc->cursor_y = y; 2255 2256 /* avivo cursor are offset into the total surface */ 2257 x += crtc->x; 2258 y += crtc->y; 2259 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 2260 2261 if (x < 0) { 2262 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 2263 x = 0; 2264 } 2265 if (y < 0) { 2266 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 2267 y = 0; 2268 } 2269 2270 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2271 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2272 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2273 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2274 2275 return 0; 2276 } 2277 2278 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc, 2279 int x, int y) 2280 { 2281 int ret; 2282 2283 dce_v6_0_lock_cursor(crtc, true); 2284 ret = dce_v6_0_cursor_move_locked(crtc, x, y); 2285 dce_v6_0_lock_cursor(crtc, false); 2286 2287 return ret; 2288 } 2289 2290 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, 2291 struct drm_file *file_priv, 2292 uint32_t handle, 2293 uint32_t width, 2294 uint32_t height, 2295 int32_t hot_x, 2296 int32_t hot_y) 2297 { 2298 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2299 struct drm_gem_object *obj; 2300 struct amdgpu_bo *aobj; 2301 int ret; 2302 2303 if (!handle) { 2304 /* turn off cursor */ 2305 dce_v6_0_hide_cursor(crtc); 2306 obj = NULL; 2307 goto unpin; 2308 } 2309 2310 if ((width > amdgpu_crtc->max_cursor_width) || 2311 (height > amdgpu_crtc->max_cursor_height)) { 2312 DRM_ERROR("bad cursor width or height %d x %d\n", width, height); 2313 return -EINVAL; 2314 } 2315 2316 obj = drm_gem_object_lookup(file_priv, handle); 2317 if (!obj) { 2318 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); 2319 return -ENOENT; 2320 } 2321 2322 aobj = gem_to_amdgpu_bo(obj); 2323 ret = amdgpu_bo_reserve(aobj, false); 2324 if (ret != 0) { 2325 drm_gem_object_put_unlocked(obj); 2326 return ret; 2327 } 2328 2329 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr); 2330 amdgpu_bo_unreserve(aobj); 2331 if (ret) { 2332 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 2333 drm_gem_object_put_unlocked(obj); 2334 return ret; 2335 } 2336 2337 dce_v6_0_lock_cursor(crtc, true); 2338 2339 if (width != amdgpu_crtc->cursor_width || 2340 height != amdgpu_crtc->cursor_height || 2341 hot_x != amdgpu_crtc->cursor_hot_x || 2342 hot_y != amdgpu_crtc->cursor_hot_y) { 2343 int x, y; 2344 2345 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x; 2346 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y; 2347 2348 dce_v6_0_cursor_move_locked(crtc, x, y); 2349 2350 amdgpu_crtc->cursor_width = width; 2351 amdgpu_crtc->cursor_height = height; 2352 amdgpu_crtc->cursor_hot_x = hot_x; 2353 amdgpu_crtc->cursor_hot_y = hot_y; 2354 } 2355 2356 dce_v6_0_show_cursor(crtc); 2357 dce_v6_0_lock_cursor(crtc, false); 2358 2359 unpin: 2360 if (amdgpu_crtc->cursor_bo) { 2361 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2362 ret = amdgpu_bo_reserve(aobj, true); 2363 if (likely(ret == 0)) { 2364 amdgpu_bo_unpin(aobj); 2365 amdgpu_bo_unreserve(aobj); 2366 } 2367 drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); 2368 } 2369 2370 amdgpu_crtc->cursor_bo = obj; 2371 return 0; 2372 } 2373 2374 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) 2375 { 2376 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2377 2378 if (amdgpu_crtc->cursor_bo) { 2379 dce_v6_0_lock_cursor(crtc, true); 2380 2381 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2382 amdgpu_crtc->cursor_y); 2383 2384 dce_v6_0_show_cursor(crtc); 2385 dce_v6_0_lock_cursor(crtc, false); 2386 } 2387 } 2388 2389 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2390 u16 *blue, uint32_t size, 2391 struct drm_modeset_acquire_ctx *ctx) 2392 { 2393 dce_v6_0_crtc_load_lut(crtc); 2394 2395 return 0; 2396 } 2397 2398 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc) 2399 { 2400 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2401 2402 drm_crtc_cleanup(crtc); 2403 kfree(amdgpu_crtc); 2404 } 2405 2406 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = { 2407 .cursor_set2 = dce_v6_0_crtc_cursor_set2, 2408 .cursor_move = dce_v6_0_crtc_cursor_move, 2409 .gamma_set = dce_v6_0_crtc_gamma_set, 2410 .set_config = amdgpu_crtc_set_config, 2411 .destroy = dce_v6_0_crtc_destroy, 2412 .page_flip_target = amdgpu_crtc_page_flip_target, 2413 }; 2414 2415 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2416 { 2417 struct drm_device *dev = crtc->dev; 2418 struct amdgpu_device *adev = dev->dev_private; 2419 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2420 unsigned type; 2421 2422 switch (mode) { 2423 case DRM_MODE_DPMS_ON: 2424 amdgpu_crtc->enabled = true; 2425 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); 2426 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2427 /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2428 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2429 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2430 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2431 drm_crtc_vblank_on(crtc); 2432 dce_v6_0_crtc_load_lut(crtc); 2433 break; 2434 case DRM_MODE_DPMS_STANDBY: 2435 case DRM_MODE_DPMS_SUSPEND: 2436 case DRM_MODE_DPMS_OFF: 2437 drm_crtc_vblank_off(crtc); 2438 if (amdgpu_crtc->enabled) 2439 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2440 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); 2441 amdgpu_crtc->enabled = false; 2442 break; 2443 } 2444 /* adjust pm to dpms */ 2445 amdgpu_pm_compute_clocks(adev); 2446 } 2447 2448 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc) 2449 { 2450 /* disable crtc pair power gating before programming */ 2451 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); 2452 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); 2453 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2454 } 2455 2456 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc) 2457 { 2458 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 2459 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); 2460 } 2461 2462 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc) 2463 { 2464 2465 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2466 struct drm_device *dev = crtc->dev; 2467 struct amdgpu_device *adev = dev->dev_private; 2468 struct amdgpu_atom_ss ss; 2469 int i; 2470 2471 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2472 if (crtc->primary->fb) { 2473 int r; 2474 struct amdgpu_framebuffer *amdgpu_fb; 2475 struct amdgpu_bo *abo; 2476 2477 amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); 2478 abo = gem_to_amdgpu_bo(amdgpu_fb->obj); 2479 r = amdgpu_bo_reserve(abo, true); 2480 if (unlikely(r)) 2481 DRM_ERROR("failed to reserve abo before unpin\n"); 2482 else { 2483 amdgpu_bo_unpin(abo); 2484 amdgpu_bo_unreserve(abo); 2485 } 2486 } 2487 /* disable the GRPH */ 2488 dce_v6_0_grph_enable(crtc, false); 2489 2490 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); 2491 2492 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2493 if (adev->mode_info.crtcs[i] && 2494 adev->mode_info.crtcs[i]->enabled && 2495 i != amdgpu_crtc->crtc_id && 2496 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { 2497 /* one other crtc is using this pll don't turn 2498 * off the pll 2499 */ 2500 goto done; 2501 } 2502 } 2503 2504 switch (amdgpu_crtc->pll_id) { 2505 case ATOM_PPLL1: 2506 case ATOM_PPLL2: 2507 /* disable the ppll */ 2508 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, 2509 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2510 break; 2511 default: 2512 break; 2513 } 2514 done: 2515 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2516 amdgpu_crtc->adjusted_clock = 0; 2517 amdgpu_crtc->encoder = NULL; 2518 amdgpu_crtc->connector = NULL; 2519 } 2520 2521 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc, 2522 struct drm_display_mode *mode, 2523 struct drm_display_mode *adjusted_mode, 2524 int x, int y, struct drm_framebuffer *old_fb) 2525 { 2526 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2527 2528 if (!amdgpu_crtc->adjusted_clock) 2529 return -EINVAL; 2530 2531 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); 2532 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); 2533 dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2534 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); 2535 amdgpu_atombios_crtc_scaler_setup(crtc); 2536 dce_v6_0_cursor_reset(crtc); 2537 /* update the hw version fpr dpm */ 2538 amdgpu_crtc->hw_mode = *adjusted_mode; 2539 2540 return 0; 2541 } 2542 2543 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc, 2544 const struct drm_display_mode *mode, 2545 struct drm_display_mode *adjusted_mode) 2546 { 2547 2548 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2549 struct drm_device *dev = crtc->dev; 2550 struct drm_encoder *encoder; 2551 2552 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ 2553 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2554 if (encoder->crtc == crtc) { 2555 amdgpu_crtc->encoder = encoder; 2556 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); 2557 break; 2558 } 2559 } 2560 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { 2561 amdgpu_crtc->encoder = NULL; 2562 amdgpu_crtc->connector = NULL; 2563 return false; 2564 } 2565 if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2566 return false; 2567 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2568 return false; 2569 /* pick pll */ 2570 amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc); 2571 /* if we can't get a PPLL for a non-DP encoder, fail */ 2572 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && 2573 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) 2574 return false; 2575 2576 return true; 2577 } 2578 2579 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, 2580 struct drm_framebuffer *old_fb) 2581 { 2582 return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2583 } 2584 2585 static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc, 2586 struct drm_framebuffer *fb, 2587 int x, int y, enum mode_set_atomic state) 2588 { 2589 return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1); 2590 } 2591 2592 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = { 2593 .dpms = dce_v6_0_crtc_dpms, 2594 .mode_fixup = dce_v6_0_crtc_mode_fixup, 2595 .mode_set = dce_v6_0_crtc_mode_set, 2596 .mode_set_base = dce_v6_0_crtc_set_base, 2597 .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic, 2598 .prepare = dce_v6_0_crtc_prepare, 2599 .commit = dce_v6_0_crtc_commit, 2600 .disable = dce_v6_0_crtc_disable, 2601 }; 2602 2603 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) 2604 { 2605 struct amdgpu_crtc *amdgpu_crtc; 2606 2607 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + 2608 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 2609 if (amdgpu_crtc == NULL) 2610 return -ENOMEM; 2611 2612 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs); 2613 2614 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); 2615 amdgpu_crtc->crtc_id = index; 2616 adev->mode_info.crtcs[index] = amdgpu_crtc; 2617 2618 amdgpu_crtc->max_cursor_width = CURSOR_WIDTH; 2619 amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT; 2620 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; 2621 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; 2622 2623 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; 2624 2625 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2626 amdgpu_crtc->adjusted_clock = 0; 2627 amdgpu_crtc->encoder = NULL; 2628 amdgpu_crtc->connector = NULL; 2629 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs); 2630 2631 return 0; 2632 } 2633 2634 static int dce_v6_0_early_init(void *handle) 2635 { 2636 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2637 2638 adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg; 2639 adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg; 2640 2641 dce_v6_0_set_display_funcs(adev); 2642 dce_v6_0_set_irq_funcs(adev); 2643 2644 adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev); 2645 2646 switch (adev->asic_type) { 2647 case CHIP_TAHITI: 2648 case CHIP_PITCAIRN: 2649 case CHIP_VERDE: 2650 adev->mode_info.num_hpd = 6; 2651 adev->mode_info.num_dig = 6; 2652 break; 2653 case CHIP_OLAND: 2654 adev->mode_info.num_hpd = 2; 2655 adev->mode_info.num_dig = 2; 2656 break; 2657 default: 2658 return -EINVAL; 2659 } 2660 2661 return 0; 2662 } 2663 2664 static int dce_v6_0_sw_init(void *handle) 2665 { 2666 int r, i; 2667 bool ret; 2668 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2669 2670 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2671 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); 2672 if (r) 2673 return r; 2674 } 2675 2676 for (i = 8; i < 20; i += 2) { 2677 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq); 2678 if (r) 2679 return r; 2680 } 2681 2682 /* HPD hotplug */ 2683 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq); 2684 if (r) 2685 return r; 2686 2687 adev->mode_info.mode_config_initialized = true; 2688 2689 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; 2690 adev->ddev->mode_config.async_page_flip = true; 2691 adev->ddev->mode_config.max_width = 16384; 2692 adev->ddev->mode_config.max_height = 16384; 2693 adev->ddev->mode_config.preferred_depth = 24; 2694 adev->ddev->mode_config.prefer_shadow = 1; 2695 adev->ddev->mode_config.fb_base = adev->mc.aper_base; 2696 2697 r = amdgpu_modeset_create_props(adev); 2698 if (r) 2699 return r; 2700 2701 adev->ddev->mode_config.max_width = 16384; 2702 adev->ddev->mode_config.max_height = 16384; 2703 2704 /* allocate crtcs */ 2705 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2706 r = dce_v6_0_crtc_init(adev, i); 2707 if (r) 2708 return r; 2709 } 2710 2711 ret = amdgpu_atombios_get_connector_info_from_object_table(adev); 2712 if (ret) 2713 amdgpu_print_display_setup(adev->ddev); 2714 else 2715 return -EINVAL; 2716 2717 /* setup afmt */ 2718 r = dce_v6_0_afmt_init(adev); 2719 if (r) 2720 return r; 2721 2722 r = dce_v6_0_audio_init(adev); 2723 if (r) 2724 return r; 2725 2726 drm_kms_helper_poll_init(adev->ddev); 2727 2728 return r; 2729 } 2730 2731 static int dce_v6_0_sw_fini(void *handle) 2732 { 2733 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2734 2735 kfree(adev->mode_info.bios_hardcoded_edid); 2736 2737 drm_kms_helper_poll_fini(adev->ddev); 2738 2739 dce_v6_0_audio_fini(adev); 2740 dce_v6_0_afmt_fini(adev); 2741 2742 drm_mode_config_cleanup(adev->ddev); 2743 adev->mode_info.mode_config_initialized = false; 2744 2745 return 0; 2746 } 2747 2748 static int dce_v6_0_hw_init(void *handle) 2749 { 2750 int i; 2751 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2752 2753 /* disable vga render */ 2754 dce_v6_0_set_vga_render_state(adev, false); 2755 /* init dig PHYs, disp eng pll */ 2756 amdgpu_atombios_encoder_init_dig(adev); 2757 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); 2758 2759 /* initialize hpd */ 2760 dce_v6_0_hpd_init(adev); 2761 2762 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2763 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2764 } 2765 2766 dce_v6_0_pageflip_interrupt_init(adev); 2767 2768 return 0; 2769 } 2770 2771 static int dce_v6_0_hw_fini(void *handle) 2772 { 2773 int i; 2774 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2775 2776 dce_v6_0_hpd_fini(adev); 2777 2778 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2779 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2780 } 2781 2782 dce_v6_0_pageflip_interrupt_fini(adev); 2783 2784 return 0; 2785 } 2786 2787 static int dce_v6_0_suspend(void *handle) 2788 { 2789 return dce_v6_0_hw_fini(handle); 2790 } 2791 2792 static int dce_v6_0_resume(void *handle) 2793 { 2794 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2795 int ret; 2796 2797 ret = dce_v6_0_hw_init(handle); 2798 2799 /* turn on the BL */ 2800 if (adev->mode_info.bl_encoder) { 2801 u8 bl_level = amdgpu_display_backlight_get_level(adev, 2802 adev->mode_info.bl_encoder); 2803 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 2804 bl_level); 2805 } 2806 2807 return ret; 2808 } 2809 2810 static bool dce_v6_0_is_idle(void *handle) 2811 { 2812 return true; 2813 } 2814 2815 static int dce_v6_0_wait_for_idle(void *handle) 2816 { 2817 return 0; 2818 } 2819 2820 static int dce_v6_0_soft_reset(void *handle) 2821 { 2822 DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n"); 2823 return 0; 2824 } 2825 2826 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, 2827 int crtc, 2828 enum amdgpu_interrupt_state state) 2829 { 2830 u32 reg_block, interrupt_mask; 2831 2832 if (crtc >= adev->mode_info.num_crtc) { 2833 DRM_DEBUG("invalid crtc %d\n", crtc); 2834 return; 2835 } 2836 2837 switch (crtc) { 2838 case 0: 2839 reg_block = SI_CRTC0_REGISTER_OFFSET; 2840 break; 2841 case 1: 2842 reg_block = SI_CRTC1_REGISTER_OFFSET; 2843 break; 2844 case 2: 2845 reg_block = SI_CRTC2_REGISTER_OFFSET; 2846 break; 2847 case 3: 2848 reg_block = SI_CRTC3_REGISTER_OFFSET; 2849 break; 2850 case 4: 2851 reg_block = SI_CRTC4_REGISTER_OFFSET; 2852 break; 2853 case 5: 2854 reg_block = SI_CRTC5_REGISTER_OFFSET; 2855 break; 2856 default: 2857 DRM_DEBUG("invalid crtc %d\n", crtc); 2858 return; 2859 } 2860 2861 switch (state) { 2862 case AMDGPU_IRQ_STATE_DISABLE: 2863 interrupt_mask = RREG32(mmINT_MASK + reg_block); 2864 interrupt_mask &= ~VBLANK_INT_MASK; 2865 WREG32(mmINT_MASK + reg_block, interrupt_mask); 2866 break; 2867 case AMDGPU_IRQ_STATE_ENABLE: 2868 interrupt_mask = RREG32(mmINT_MASK + reg_block); 2869 interrupt_mask |= VBLANK_INT_MASK; 2870 WREG32(mmINT_MASK + reg_block, interrupt_mask); 2871 break; 2872 default: 2873 break; 2874 } 2875 } 2876 2877 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, 2878 int crtc, 2879 enum amdgpu_interrupt_state state) 2880 { 2881 2882 } 2883 2884 static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev, 2885 struct amdgpu_irq_src *src, 2886 unsigned type, 2887 enum amdgpu_interrupt_state state) 2888 { 2889 u32 dc_hpd_int_cntl; 2890 2891 if (type >= adev->mode_info.num_hpd) { 2892 DRM_DEBUG("invalid hdp %d\n", type); 2893 return 0; 2894 } 2895 2896 switch (state) { 2897 case AMDGPU_IRQ_STATE_DISABLE: 2898 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); 2899 dc_hpd_int_cntl &= ~DC_HPDx_INT_EN; 2900 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); 2901 break; 2902 case AMDGPU_IRQ_STATE_ENABLE: 2903 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); 2904 dc_hpd_int_cntl |= DC_HPDx_INT_EN; 2905 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); 2906 break; 2907 default: 2908 break; 2909 } 2910 2911 return 0; 2912 } 2913 2914 static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev, 2915 struct amdgpu_irq_src *src, 2916 unsigned type, 2917 enum amdgpu_interrupt_state state) 2918 { 2919 switch (type) { 2920 case AMDGPU_CRTC_IRQ_VBLANK1: 2921 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state); 2922 break; 2923 case AMDGPU_CRTC_IRQ_VBLANK2: 2924 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state); 2925 break; 2926 case AMDGPU_CRTC_IRQ_VBLANK3: 2927 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state); 2928 break; 2929 case AMDGPU_CRTC_IRQ_VBLANK4: 2930 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state); 2931 break; 2932 case AMDGPU_CRTC_IRQ_VBLANK5: 2933 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state); 2934 break; 2935 case AMDGPU_CRTC_IRQ_VBLANK6: 2936 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state); 2937 break; 2938 case AMDGPU_CRTC_IRQ_VLINE1: 2939 dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state); 2940 break; 2941 case AMDGPU_CRTC_IRQ_VLINE2: 2942 dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state); 2943 break; 2944 case AMDGPU_CRTC_IRQ_VLINE3: 2945 dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state); 2946 break; 2947 case AMDGPU_CRTC_IRQ_VLINE4: 2948 dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state); 2949 break; 2950 case AMDGPU_CRTC_IRQ_VLINE5: 2951 dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state); 2952 break; 2953 case AMDGPU_CRTC_IRQ_VLINE6: 2954 dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state); 2955 break; 2956 default: 2957 break; 2958 } 2959 return 0; 2960 } 2961 2962 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev, 2963 struct amdgpu_irq_src *source, 2964 struct amdgpu_iv_entry *entry) 2965 { 2966 unsigned crtc = entry->src_id - 1; 2967 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 2968 unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); 2969 2970 switch (entry->src_data[0]) { 2971 case 0: /* vblank */ 2972 if (disp_int & interrupt_status_offsets[crtc].vblank) 2973 WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK); 2974 else 2975 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 2976 2977 if (amdgpu_irq_enabled(adev, source, irq_type)) { 2978 drm_handle_vblank(adev->ddev, crtc); 2979 } 2980 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 2981 break; 2982 case 1: /* vline */ 2983 if (disp_int & interrupt_status_offsets[crtc].vline) 2984 WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK); 2985 else 2986 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 2987 2988 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 2989 break; 2990 default: 2991 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); 2992 break; 2993 } 2994 2995 return 0; 2996 } 2997 2998 static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev, 2999 struct amdgpu_irq_src *src, 3000 unsigned type, 3001 enum amdgpu_interrupt_state state) 3002 { 3003 u32 reg; 3004 3005 if (type >= adev->mode_info.num_crtc) { 3006 DRM_ERROR("invalid pageflip crtc %d\n", type); 3007 return -EINVAL; 3008 } 3009 3010 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]); 3011 if (state == AMDGPU_IRQ_STATE_DISABLE) 3012 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3013 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3014 else 3015 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3016 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3017 3018 return 0; 3019 } 3020 3021 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev, 3022 struct amdgpu_irq_src *source, 3023 struct amdgpu_iv_entry *entry) 3024 { 3025 unsigned long flags; 3026 unsigned crtc_id; 3027 struct amdgpu_crtc *amdgpu_crtc; 3028 struct amdgpu_flip_work *works; 3029 3030 crtc_id = (entry->src_id - 8) >> 1; 3031 amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 3032 3033 if (crtc_id >= adev->mode_info.num_crtc) { 3034 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); 3035 return -EINVAL; 3036 } 3037 3038 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) & 3039 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 3040 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id], 3041 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 3042 3043 /* IRQ could occur when in initial stage */ 3044 if (amdgpu_crtc == NULL) 3045 return 0; 3046 3047 spin_lock_irqsave(&adev->ddev->event_lock, flags); 3048 works = amdgpu_crtc->pflip_works; 3049 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ 3050 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " 3051 "AMDGPU_FLIP_SUBMITTED(%d)\n", 3052 amdgpu_crtc->pflip_status, 3053 AMDGPU_FLIP_SUBMITTED); 3054 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3055 return 0; 3056 } 3057 3058 /* page flip completed. clean up */ 3059 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 3060 amdgpu_crtc->pflip_works = NULL; 3061 3062 /* wakeup usersapce */ 3063 if (works->event) 3064 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); 3065 3066 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3067 3068 drm_crtc_vblank_put(&amdgpu_crtc->base); 3069 schedule_work(&works->unpin_work); 3070 3071 return 0; 3072 } 3073 3074 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, 3075 struct amdgpu_irq_src *source, 3076 struct amdgpu_iv_entry *entry) 3077 { 3078 uint32_t disp_int, mask, tmp; 3079 unsigned hpd; 3080 3081 if (entry->src_data[0] >= adev->mode_info.num_hpd) { 3082 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); 3083 return 0; 3084 } 3085 3086 hpd = entry->src_data[0]; 3087 disp_int = RREG32(interrupt_status_offsets[hpd].reg); 3088 mask = interrupt_status_offsets[hpd].hpd; 3089 3090 if (disp_int & mask) { 3091 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 3092 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 3093 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 3094 schedule_work(&adev->hotplug_work); 3095 DRM_INFO("IH: HPD%d\n", hpd + 1); 3096 } 3097 3098 return 0; 3099 3100 } 3101 3102 static int dce_v6_0_set_clockgating_state(void *handle, 3103 enum amd_clockgating_state state) 3104 { 3105 return 0; 3106 } 3107 3108 static int dce_v6_0_set_powergating_state(void *handle, 3109 enum amd_powergating_state state) 3110 { 3111 return 0; 3112 } 3113 3114 static const struct amd_ip_funcs dce_v6_0_ip_funcs = { 3115 .name = "dce_v6_0", 3116 .early_init = dce_v6_0_early_init, 3117 .late_init = NULL, 3118 .sw_init = dce_v6_0_sw_init, 3119 .sw_fini = dce_v6_0_sw_fini, 3120 .hw_init = dce_v6_0_hw_init, 3121 .hw_fini = dce_v6_0_hw_fini, 3122 .suspend = dce_v6_0_suspend, 3123 .resume = dce_v6_0_resume, 3124 .is_idle = dce_v6_0_is_idle, 3125 .wait_for_idle = dce_v6_0_wait_for_idle, 3126 .soft_reset = dce_v6_0_soft_reset, 3127 .set_clockgating_state = dce_v6_0_set_clockgating_state, 3128 .set_powergating_state = dce_v6_0_set_powergating_state, 3129 }; 3130 3131 static void 3132 dce_v6_0_encoder_mode_set(struct drm_encoder *encoder, 3133 struct drm_display_mode *mode, 3134 struct drm_display_mode *adjusted_mode) 3135 { 3136 3137 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3138 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); 3139 3140 amdgpu_encoder->pixel_clock = adjusted_mode->clock; 3141 3142 /* need to call this here rather than in prepare() since we need some crtc info */ 3143 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3144 3145 /* set scaler clears this on some chips */ 3146 dce_v6_0_set_interleave(encoder->crtc, mode); 3147 3148 if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) { 3149 dce_v6_0_afmt_enable(encoder, true); 3150 dce_v6_0_afmt_setmode(encoder, adjusted_mode); 3151 } 3152 } 3153 3154 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder) 3155 { 3156 3157 struct amdgpu_device *adev = encoder->dev->dev_private; 3158 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3159 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 3160 3161 if ((amdgpu_encoder->active_device & 3162 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || 3163 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != 3164 ENCODER_OBJECT_ID_NONE)) { 3165 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 3166 if (dig) { 3167 dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder); 3168 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) 3169 dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; 3170 } 3171 } 3172 3173 amdgpu_atombios_scratch_regs_lock(adev, true); 3174 3175 if (connector) { 3176 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 3177 3178 /* select the clock/data port if it uses a router */ 3179 if (amdgpu_connector->router.cd_valid) 3180 amdgpu_i2c_router_select_cd_port(amdgpu_connector); 3181 3182 /* turn eDP panel on for mode set */ 3183 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3184 amdgpu_atombios_encoder_set_edp_panel_power(connector, 3185 ATOM_TRANSMITTER_ACTION_POWER_ON); 3186 } 3187 3188 /* this is needed for the pll/ss setup to work correctly in some cases */ 3189 amdgpu_atombios_encoder_set_crtc_source(encoder); 3190 /* set up the FMT blocks */ 3191 dce_v6_0_program_fmt(encoder); 3192 } 3193 3194 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder) 3195 { 3196 3197 struct drm_device *dev = encoder->dev; 3198 struct amdgpu_device *adev = dev->dev_private; 3199 3200 /* need to call this here as we need the crtc set up */ 3201 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 3202 amdgpu_atombios_scratch_regs_lock(adev, false); 3203 } 3204 3205 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder) 3206 { 3207 3208 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3209 struct amdgpu_encoder_atom_dig *dig; 3210 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); 3211 3212 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3213 3214 if (amdgpu_atombios_encoder_is_digital(encoder)) { 3215 if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) 3216 dce_v6_0_afmt_enable(encoder, false); 3217 dig = amdgpu_encoder->enc_priv; 3218 dig->dig_encoder = -1; 3219 } 3220 amdgpu_encoder->active_device = 0; 3221 } 3222 3223 /* these are handled by the primary encoders */ 3224 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder) 3225 { 3226 3227 } 3228 3229 static void dce_v6_0_ext_commit(struct drm_encoder *encoder) 3230 { 3231 3232 } 3233 3234 static void 3235 dce_v6_0_ext_mode_set(struct drm_encoder *encoder, 3236 struct drm_display_mode *mode, 3237 struct drm_display_mode *adjusted_mode) 3238 { 3239 3240 } 3241 3242 static void dce_v6_0_ext_disable(struct drm_encoder *encoder) 3243 { 3244 3245 } 3246 3247 static void 3248 dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode) 3249 { 3250 3251 } 3252 3253 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder, 3254 const struct drm_display_mode *mode, 3255 struct drm_display_mode *adjusted_mode) 3256 { 3257 return true; 3258 } 3259 3260 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = { 3261 .dpms = dce_v6_0_ext_dpms, 3262 .mode_fixup = dce_v6_0_ext_mode_fixup, 3263 .prepare = dce_v6_0_ext_prepare, 3264 .mode_set = dce_v6_0_ext_mode_set, 3265 .commit = dce_v6_0_ext_commit, 3266 .disable = dce_v6_0_ext_disable, 3267 /* no detect for TMDS/LVDS yet */ 3268 }; 3269 3270 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = { 3271 .dpms = amdgpu_atombios_encoder_dpms, 3272 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3273 .prepare = dce_v6_0_encoder_prepare, 3274 .mode_set = dce_v6_0_encoder_mode_set, 3275 .commit = dce_v6_0_encoder_commit, 3276 .disable = dce_v6_0_encoder_disable, 3277 .detect = amdgpu_atombios_encoder_dig_detect, 3278 }; 3279 3280 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = { 3281 .dpms = amdgpu_atombios_encoder_dpms, 3282 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3283 .prepare = dce_v6_0_encoder_prepare, 3284 .mode_set = dce_v6_0_encoder_mode_set, 3285 .commit = dce_v6_0_encoder_commit, 3286 .detect = amdgpu_atombios_encoder_dac_detect, 3287 }; 3288 3289 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder) 3290 { 3291 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3292 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3293 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); 3294 kfree(amdgpu_encoder->enc_priv); 3295 drm_encoder_cleanup(encoder); 3296 kfree(amdgpu_encoder); 3297 } 3298 3299 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = { 3300 .destroy = dce_v6_0_encoder_destroy, 3301 }; 3302 3303 static void dce_v6_0_encoder_add(struct amdgpu_device *adev, 3304 uint32_t encoder_enum, 3305 uint32_t supported_device, 3306 u16 caps) 3307 { 3308 struct drm_device *dev = adev->ddev; 3309 struct drm_encoder *encoder; 3310 struct amdgpu_encoder *amdgpu_encoder; 3311 3312 /* see if we already added it */ 3313 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3314 amdgpu_encoder = to_amdgpu_encoder(encoder); 3315 if (amdgpu_encoder->encoder_enum == encoder_enum) { 3316 amdgpu_encoder->devices |= supported_device; 3317 return; 3318 } 3319 3320 } 3321 3322 /* add a new one */ 3323 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); 3324 if (!amdgpu_encoder) 3325 return; 3326 3327 encoder = &amdgpu_encoder->base; 3328 switch (adev->mode_info.num_crtc) { 3329 case 1: 3330 encoder->possible_crtcs = 0x1; 3331 break; 3332 case 2: 3333 default: 3334 encoder->possible_crtcs = 0x3; 3335 break; 3336 case 4: 3337 encoder->possible_crtcs = 0xf; 3338 break; 3339 case 6: 3340 encoder->possible_crtcs = 0x3f; 3341 break; 3342 } 3343 3344 amdgpu_encoder->enc_priv = NULL; 3345 amdgpu_encoder->encoder_enum = encoder_enum; 3346 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 3347 amdgpu_encoder->devices = supported_device; 3348 amdgpu_encoder->rmx_type = RMX_OFF; 3349 amdgpu_encoder->underscan_type = UNDERSCAN_OFF; 3350 amdgpu_encoder->is_ext_encoder = false; 3351 amdgpu_encoder->caps = caps; 3352 3353 switch (amdgpu_encoder->encoder_id) { 3354 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3355 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3356 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3357 DRM_MODE_ENCODER_DAC, NULL); 3358 drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs); 3359 break; 3360 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3361 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 3362 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 3363 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 3364 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 3365 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3366 amdgpu_encoder->rmx_type = RMX_FULL; 3367 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3368 DRM_MODE_ENCODER_LVDS, NULL); 3369 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3370 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3371 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3372 DRM_MODE_ENCODER_DAC, NULL); 3373 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3374 } else { 3375 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3376 DRM_MODE_ENCODER_TMDS, NULL); 3377 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3378 } 3379 drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs); 3380 break; 3381 case ENCODER_OBJECT_ID_SI170B: 3382 case ENCODER_OBJECT_ID_CH7303: 3383 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: 3384 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: 3385 case ENCODER_OBJECT_ID_TITFP513: 3386 case ENCODER_OBJECT_ID_VT1623: 3387 case ENCODER_OBJECT_ID_HDMI_SI1930: 3388 case ENCODER_OBJECT_ID_TRAVIS: 3389 case ENCODER_OBJECT_ID_NUTMEG: 3390 /* these are handled by the primary encoders */ 3391 amdgpu_encoder->is_ext_encoder = true; 3392 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3393 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3394 DRM_MODE_ENCODER_LVDS, NULL); 3395 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3396 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3397 DRM_MODE_ENCODER_DAC, NULL); 3398 else 3399 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3400 DRM_MODE_ENCODER_TMDS, NULL); 3401 drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs); 3402 break; 3403 } 3404 } 3405 3406 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { 3407 .bandwidth_update = &dce_v6_0_bandwidth_update, 3408 .vblank_get_counter = &dce_v6_0_vblank_get_counter, 3409 .vblank_wait = &dce_v6_0_vblank_wait, 3410 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3411 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3412 .hpd_sense = &dce_v6_0_hpd_sense, 3413 .hpd_set_polarity = &dce_v6_0_hpd_set_polarity, 3414 .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg, 3415 .page_flip = &dce_v6_0_page_flip, 3416 .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos, 3417 .add_encoder = &dce_v6_0_encoder_add, 3418 .add_connector = &amdgpu_connector_add, 3419 }; 3420 3421 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev) 3422 { 3423 if (adev->mode_info.funcs == NULL) 3424 adev->mode_info.funcs = &dce_v6_0_display_funcs; 3425 } 3426 3427 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = { 3428 .set = dce_v6_0_set_crtc_interrupt_state, 3429 .process = dce_v6_0_crtc_irq, 3430 }; 3431 3432 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = { 3433 .set = dce_v6_0_set_pageflip_interrupt_state, 3434 .process = dce_v6_0_pageflip_irq, 3435 }; 3436 3437 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = { 3438 .set = dce_v6_0_set_hpd_interrupt_state, 3439 .process = dce_v6_0_hpd_irq, 3440 }; 3441 3442 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev) 3443 { 3444 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; 3445 adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs; 3446 3447 adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; 3448 adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs; 3449 3450 adev->hpd_irq.num_types = AMDGPU_HPD_LAST; 3451 adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs; 3452 } 3453 3454 const struct amdgpu_ip_block_version dce_v6_0_ip_block = 3455 { 3456 .type = AMD_IP_BLOCK_TYPE_DCE, 3457 .major = 6, 3458 .minor = 0, 3459 .rev = 0, 3460 .funcs = &dce_v6_0_ip_funcs, 3461 }; 3462 3463 const struct amdgpu_ip_block_version dce_v6_4_ip_block = 3464 { 3465 .type = AMD_IP_BLOCK_TYPE_DCE, 3466 .major = 6, 3467 .minor = 4, 3468 .rev = 0, 3469 .funcs = &dce_v6_0_ip_funcs, 3470 }; 3471