1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <drm/drm_fourcc.h> 25 #include <drm/drm_vblank.h> 26 27 #include "amdgpu.h" 28 #include "amdgpu_pm.h" 29 #include "amdgpu_i2c.h" 30 #include "vid.h" 31 #include "atom.h" 32 #include "amdgpu_atombios.h" 33 #include "atombios_crtc.h" 34 #include "atombios_encoders.h" 35 #include "amdgpu_pll.h" 36 #include "amdgpu_connectors.h" 37 #include "amdgpu_display.h" 38 #include "dce_v10_0.h" 39 40 #include "dce/dce_10_0_d.h" 41 #include "dce/dce_10_0_sh_mask.h" 42 #include "dce/dce_10_0_enum.h" 43 #include "oss/oss_3_0_d.h" 44 #include "oss/oss_3_0_sh_mask.h" 45 #include "gmc/gmc_8_1_d.h" 46 #include "gmc/gmc_8_1_sh_mask.h" 47 48 #include "ivsrcid/ivsrcid_vislands30.h" 49 50 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev); 51 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev); 52 53 static const u32 crtc_offsets[] = 54 { 55 CRTC0_REGISTER_OFFSET, 56 CRTC1_REGISTER_OFFSET, 57 CRTC2_REGISTER_OFFSET, 58 CRTC3_REGISTER_OFFSET, 59 CRTC4_REGISTER_OFFSET, 60 CRTC5_REGISTER_OFFSET, 61 CRTC6_REGISTER_OFFSET 62 }; 63 64 static const u32 hpd_offsets[] = 65 { 66 HPD0_REGISTER_OFFSET, 67 HPD1_REGISTER_OFFSET, 68 HPD2_REGISTER_OFFSET, 69 HPD3_REGISTER_OFFSET, 70 HPD4_REGISTER_OFFSET, 71 HPD5_REGISTER_OFFSET 72 }; 73 74 static const uint32_t dig_offsets[] = { 75 DIG0_REGISTER_OFFSET, 76 DIG1_REGISTER_OFFSET, 77 DIG2_REGISTER_OFFSET, 78 DIG3_REGISTER_OFFSET, 79 DIG4_REGISTER_OFFSET, 80 DIG5_REGISTER_OFFSET, 81 DIG6_REGISTER_OFFSET 82 }; 83 84 static const struct { 85 uint32_t reg; 86 uint32_t vblank; 87 uint32_t vline; 88 uint32_t hpd; 89 90 } interrupt_status_offsets[] = { { 91 .reg = mmDISP_INTERRUPT_STATUS, 92 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, 93 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, 94 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 95 }, { 96 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, 97 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, 98 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, 99 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 100 }, { 101 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, 102 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, 103 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, 104 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 105 }, { 106 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, 107 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, 108 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, 109 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 110 }, { 111 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, 112 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, 113 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, 114 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 115 }, { 116 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, 117 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, 118 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, 119 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 120 } }; 121 122 static const u32 golden_settings_tonga_a11[] = 123 { 124 mmDCI_CLK_CNTL, 0x00000080, 0x00000000, 125 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, 126 mmFBC_MISC, 0x1f311fff, 0x12300000, 127 mmHDMI_CONTROL, 0x31000111, 0x00000011, 128 }; 129 130 static const u32 tonga_mgcg_cgcg_init[] = 131 { 132 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, 133 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, 134 }; 135 136 static const u32 golden_settings_fiji_a10[] = 137 { 138 mmDCI_CLK_CNTL, 0x00000080, 0x00000000, 139 mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, 140 mmFBC_MISC, 0x1f311fff, 0x12300000, 141 mmHDMI_CONTROL, 0x31000111, 0x00000011, 142 }; 143 144 static const u32 fiji_mgcg_cgcg_init[] = 145 { 146 mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, 147 mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, 148 }; 149 150 static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev) 151 { 152 switch (adev->asic_type) { 153 case CHIP_FIJI: 154 amdgpu_device_program_register_sequence(adev, 155 fiji_mgcg_cgcg_init, 156 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 157 amdgpu_device_program_register_sequence(adev, 158 golden_settings_fiji_a10, 159 ARRAY_SIZE(golden_settings_fiji_a10)); 160 break; 161 case CHIP_TONGA: 162 amdgpu_device_program_register_sequence(adev, 163 tonga_mgcg_cgcg_init, 164 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 165 amdgpu_device_program_register_sequence(adev, 166 golden_settings_tonga_a11, 167 ARRAY_SIZE(golden_settings_tonga_a11)); 168 break; 169 default: 170 break; 171 } 172 } 173 174 static u32 dce_v10_0_audio_endpt_rreg(struct amdgpu_device *adev, 175 u32 block_offset, u32 reg) 176 { 177 unsigned long flags; 178 u32 r; 179 180 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 181 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 182 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); 183 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 184 185 return r; 186 } 187 188 static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev, 189 u32 block_offset, u32 reg, u32 v) 190 { 191 unsigned long flags; 192 193 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 194 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 195 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); 196 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 197 } 198 199 static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 200 { 201 if (crtc >= adev->mode_info.num_crtc) 202 return 0; 203 else 204 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 205 } 206 207 static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev) 208 { 209 unsigned i; 210 211 /* Enable pflip interrupts */ 212 for (i = 0; i < adev->mode_info.num_crtc; i++) 213 amdgpu_irq_get(adev, &adev->pageflip_irq, i); 214 } 215 216 static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev) 217 { 218 unsigned i; 219 220 /* Disable pflip interrupts */ 221 for (i = 0; i < adev->mode_info.num_crtc; i++) 222 amdgpu_irq_put(adev, &adev->pageflip_irq, i); 223 } 224 225 /** 226 * dce_v10_0_page_flip - pageflip callback. 227 * 228 * @adev: amdgpu_device pointer 229 * @crtc_id: crtc to cleanup pageflip on 230 * @crtc_base: new address of the crtc (GPU MC address) 231 * @async: asynchronous flip 232 * 233 * Triggers the actual pageflip by updating the primary 234 * surface base address. 235 */ 236 static void dce_v10_0_page_flip(struct amdgpu_device *adev, 237 int crtc_id, u64 crtc_base, bool async) 238 { 239 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 240 struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb; 241 u32 tmp; 242 243 /* flip at hsync for async, default is vsync */ 244 tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); 245 tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, 246 GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0); 247 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); 248 /* update pitch */ 249 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, 250 fb->pitches[0] / fb->format->cpp[0]); 251 /* update the primary scanout address */ 252 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 253 upper_32_bits(crtc_base)); 254 /* writing to the low address triggers the update */ 255 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 256 lower_32_bits(crtc_base)); 257 /* post the write */ 258 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); 259 } 260 261 static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 262 u32 *vbl, u32 *position) 263 { 264 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 265 return -EINVAL; 266 267 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); 268 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 269 270 return 0; 271 } 272 273 /** 274 * dce_v10_0_hpd_sense - hpd sense callback. 275 * 276 * @adev: amdgpu_device pointer 277 * @hpd: hpd (hotplug detect) pin 278 * 279 * Checks if a digital monitor is connected (evergreen+). 280 * Returns true if connected, false if not connected. 281 */ 282 static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev, 283 enum amdgpu_hpd_id hpd) 284 { 285 bool connected = false; 286 287 if (hpd >= adev->mode_info.num_hpd) 288 return connected; 289 290 if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) & 291 DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) 292 connected = true; 293 294 return connected; 295 } 296 297 /** 298 * dce_v10_0_hpd_set_polarity - hpd set polarity callback. 299 * 300 * @adev: amdgpu_device pointer 301 * @hpd: hpd (hotplug detect) pin 302 * 303 * Set the polarity of the hpd pin (evergreen+). 304 */ 305 static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev, 306 enum amdgpu_hpd_id hpd) 307 { 308 u32 tmp; 309 bool connected = dce_v10_0_hpd_sense(adev, hpd); 310 311 if (hpd >= adev->mode_info.num_hpd) 312 return; 313 314 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 315 if (connected) 316 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); 317 else 318 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); 319 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 320 } 321 322 /** 323 * dce_v10_0_hpd_init - hpd setup callback. 324 * 325 * @adev: amdgpu_device pointer 326 * 327 * Setup the hpd pins used by the card (evergreen+). 328 * Enable the pin, set the polarity, and enable the hpd interrupts. 329 */ 330 static void dce_v10_0_hpd_init(struct amdgpu_device *adev) 331 { 332 struct drm_device *dev = adev_to_drm(adev); 333 struct drm_connector *connector; 334 struct drm_connector_list_iter iter; 335 u32 tmp; 336 337 drm_connector_list_iter_begin(dev, &iter); 338 drm_for_each_connector_iter(connector, &iter) { 339 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 340 341 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 342 continue; 343 344 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 345 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 346 /* don't try to enable hpd on eDP or LVDS avoid breaking the 347 * aux dp channel on imac and help (but not completely fix) 348 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 349 * also avoid interrupt storms during dpms. 350 */ 351 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 352 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); 353 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 354 continue; 355 } 356 357 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 358 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); 359 WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 360 361 tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]); 362 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, 363 DC_HPD_CONNECT_INT_DELAY, 364 AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); 365 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, 366 DC_HPD_DISCONNECT_INT_DELAY, 367 AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); 368 WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 369 370 dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); 371 amdgpu_irq_get(adev, &adev->hpd_irq, 372 amdgpu_connector->hpd.hpd); 373 } 374 drm_connector_list_iter_end(&iter); 375 } 376 377 /** 378 * dce_v10_0_hpd_fini - hpd tear down callback. 379 * 380 * @adev: amdgpu_device pointer 381 * 382 * Tear down the hpd pins used by the card (evergreen+). 383 * Disable the hpd interrupts. 384 */ 385 static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) 386 { 387 struct drm_device *dev = adev_to_drm(adev); 388 struct drm_connector *connector; 389 struct drm_connector_list_iter iter; 390 u32 tmp; 391 392 drm_connector_list_iter_begin(dev, &iter); 393 drm_for_each_connector_iter(connector, &iter) { 394 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 395 396 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 397 continue; 398 399 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 400 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); 401 WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 402 403 amdgpu_irq_put(adev, &adev->hpd_irq, 404 amdgpu_connector->hpd.hpd); 405 } 406 drm_connector_list_iter_end(&iter); 407 } 408 409 static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev) 410 { 411 return mmDC_GPIO_HPD_A; 412 } 413 414 static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev) 415 { 416 u32 crtc_hung = 0; 417 u32 crtc_status[6]; 418 u32 i, j, tmp; 419 420 for (i = 0; i < adev->mode_info.num_crtc; i++) { 421 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 422 if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) { 423 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 424 crtc_hung |= (1 << i); 425 } 426 } 427 428 for (j = 0; j < 10; j++) { 429 for (i = 0; i < adev->mode_info.num_crtc; i++) { 430 if (crtc_hung & (1 << i)) { 431 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 432 if (tmp != crtc_status[i]) 433 crtc_hung &= ~(1 << i); 434 } 435 } 436 if (crtc_hung == 0) 437 return false; 438 udelay(100); 439 } 440 441 return true; 442 } 443 444 static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev, 445 bool render) 446 { 447 u32 tmp; 448 449 /* Lockout access through VGA aperture*/ 450 tmp = RREG32(mmVGA_HDP_CONTROL); 451 if (render) 452 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); 453 else 454 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 455 WREG32(mmVGA_HDP_CONTROL, tmp); 456 457 /* disable VGA render */ 458 tmp = RREG32(mmVGA_RENDER_CONTROL); 459 if (render) 460 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); 461 else 462 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 463 WREG32(mmVGA_RENDER_CONTROL, tmp); 464 } 465 466 static int dce_v10_0_get_num_crtc(struct amdgpu_device *adev) 467 { 468 int num_crtc = 0; 469 470 switch (adev->asic_type) { 471 case CHIP_FIJI: 472 case CHIP_TONGA: 473 num_crtc = 6; 474 break; 475 default: 476 num_crtc = 0; 477 } 478 return num_crtc; 479 } 480 481 void dce_v10_0_disable_dce(struct amdgpu_device *adev) 482 { 483 /*Disable VGA render and enabled crtc, if has DCE engine*/ 484 if (amdgpu_atombios_has_dce_engine_info(adev)) { 485 u32 tmp; 486 int crtc_enabled, i; 487 488 dce_v10_0_set_vga_render_state(adev, false); 489 490 /*Disable crtc*/ 491 for (i = 0; i < dce_v10_0_get_num_crtc(adev); i++) { 492 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), 493 CRTC_CONTROL, CRTC_MASTER_EN); 494 if (crtc_enabled) { 495 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 496 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 497 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); 498 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); 499 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 500 } 501 } 502 } 503 } 504 505 static void dce_v10_0_program_fmt(struct drm_encoder *encoder) 506 { 507 struct drm_device *dev = encoder->dev; 508 struct amdgpu_device *adev = drm_to_adev(dev); 509 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 510 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 511 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 512 int bpc = 0; 513 u32 tmp = 0; 514 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; 515 516 if (connector) { 517 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 518 bpc = amdgpu_connector_get_monitor_bpc(connector); 519 dither = amdgpu_connector->dither; 520 } 521 522 /* LVDS/eDP FMT is set up by atom */ 523 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) 524 return; 525 526 /* not needed for analog */ 527 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || 528 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) 529 return; 530 531 if (bpc == 0) 532 return; 533 534 switch (bpc) { 535 case 6: 536 if (dither == AMDGPU_FMT_DITHER_ENABLE) { 537 /* XXX sort out optimal dither settings */ 538 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); 539 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); 540 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); 541 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0); 542 } else { 543 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); 544 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0); 545 } 546 break; 547 case 8: 548 if (dither == AMDGPU_FMT_DITHER_ENABLE) { 549 /* XXX sort out optimal dither settings */ 550 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); 551 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); 552 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); 553 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); 554 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1); 555 } else { 556 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); 557 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1); 558 } 559 break; 560 case 10: 561 if (dither == AMDGPU_FMT_DITHER_ENABLE) { 562 /* XXX sort out optimal dither settings */ 563 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); 564 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); 565 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); 566 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); 567 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2); 568 } else { 569 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); 570 tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2); 571 } 572 break; 573 default: 574 /* not needed */ 575 break; 576 } 577 578 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 579 } 580 581 582 /* display watermark setup */ 583 /** 584 * dce_v10_0_line_buffer_adjust - Set up the line buffer 585 * 586 * @adev: amdgpu_device pointer 587 * @amdgpu_crtc: the selected display controller 588 * @mode: the current display mode on the selected display 589 * controller 590 * 591 * Setup up the line buffer allocation for 592 * the selected display controller (CIK). 593 * Returns the line buffer size in pixels. 594 */ 595 static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev, 596 struct amdgpu_crtc *amdgpu_crtc, 597 struct drm_display_mode *mode) 598 { 599 u32 tmp, buffer_alloc, i, mem_cfg; 600 u32 pipe_offset = amdgpu_crtc->crtc_id; 601 /* 602 * Line Buffer Setup 603 * There are 6 line buffers, one for each display controllers. 604 * There are 3 partitions per LB. Select the number of partitions 605 * to enable based on the display width. For display widths larger 606 * than 4096, you need use to use 2 display controllers and combine 607 * them using the stereo blender. 608 */ 609 if (amdgpu_crtc->base.enabled && mode) { 610 if (mode->crtc_hdisplay < 1920) { 611 mem_cfg = 1; 612 buffer_alloc = 2; 613 } else if (mode->crtc_hdisplay < 2560) { 614 mem_cfg = 2; 615 buffer_alloc = 2; 616 } else if (mode->crtc_hdisplay < 4096) { 617 mem_cfg = 0; 618 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 619 } else { 620 DRM_DEBUG_KMS("Mode too big for LB!\n"); 621 mem_cfg = 0; 622 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 623 } 624 } else { 625 mem_cfg = 1; 626 buffer_alloc = 0; 627 } 628 629 tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset); 630 tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg); 631 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp); 632 633 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); 634 tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc); 635 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp); 636 637 for (i = 0; i < adev->usec_timeout; i++) { 638 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); 639 if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED)) 640 break; 641 udelay(1); 642 } 643 644 if (amdgpu_crtc->base.enabled && mode) { 645 switch (mem_cfg) { 646 case 0: 647 default: 648 return 4096 * 2; 649 case 1: 650 return 1920 * 2; 651 case 2: 652 return 2560 * 2; 653 } 654 } 655 656 /* controller not enabled, so no lb used */ 657 return 0; 658 } 659 660 /** 661 * cik_get_number_of_dram_channels - get the number of dram channels 662 * 663 * @adev: amdgpu_device pointer 664 * 665 * Look up the number of video ram channels (CIK). 666 * Used for display watermark bandwidth calculations 667 * Returns the number of dram channels 668 */ 669 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) 670 { 671 u32 tmp = RREG32(mmMC_SHARED_CHMAP); 672 673 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { 674 case 0: 675 default: 676 return 1; 677 case 1: 678 return 2; 679 case 2: 680 return 4; 681 case 3: 682 return 8; 683 case 4: 684 return 3; 685 case 5: 686 return 6; 687 case 6: 688 return 10; 689 case 7: 690 return 12; 691 case 8: 692 return 16; 693 } 694 } 695 696 struct dce10_wm_params { 697 u32 dram_channels; /* number of dram channels */ 698 u32 yclk; /* bandwidth per dram data pin in kHz */ 699 u32 sclk; /* engine clock in kHz */ 700 u32 disp_clk; /* display clock in kHz */ 701 u32 src_width; /* viewport width */ 702 u32 active_time; /* active display time in ns */ 703 u32 blank_time; /* blank time in ns */ 704 bool interlaced; /* mode is interlaced */ 705 fixed20_12 vsc; /* vertical scale ratio */ 706 u32 num_heads; /* number of active crtcs */ 707 u32 bytes_per_pixel; /* bytes per pixel display + overlay */ 708 u32 lb_size; /* line buffer allocated to pipe */ 709 u32 vtaps; /* vertical scaler taps */ 710 }; 711 712 /** 713 * dce_v10_0_dram_bandwidth - get the dram bandwidth 714 * 715 * @wm: watermark calculation data 716 * 717 * Calculate the raw dram bandwidth (CIK). 718 * Used for display watermark bandwidth calculations 719 * Returns the dram bandwidth in MBytes/s 720 */ 721 static u32 dce_v10_0_dram_bandwidth(struct dce10_wm_params *wm) 722 { 723 /* Calculate raw DRAM Bandwidth */ 724 fixed20_12 dram_efficiency; /* 0.7 */ 725 fixed20_12 yclk, dram_channels, bandwidth; 726 fixed20_12 a; 727 728 a.full = dfixed_const(1000); 729 yclk.full = dfixed_const(wm->yclk); 730 yclk.full = dfixed_div(yclk, a); 731 dram_channels.full = dfixed_const(wm->dram_channels * 4); 732 a.full = dfixed_const(10); 733 dram_efficiency.full = dfixed_const(7); 734 dram_efficiency.full = dfixed_div(dram_efficiency, a); 735 bandwidth.full = dfixed_mul(dram_channels, yclk); 736 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); 737 738 return dfixed_trunc(bandwidth); 739 } 740 741 /** 742 * dce_v10_0_dram_bandwidth_for_display - get the dram bandwidth for display 743 * 744 * @wm: watermark calculation data 745 * 746 * Calculate the dram bandwidth used for display (CIK). 747 * Used for display watermark bandwidth calculations 748 * Returns the dram bandwidth for display in MBytes/s 749 */ 750 static u32 dce_v10_0_dram_bandwidth_for_display(struct dce10_wm_params *wm) 751 { 752 /* Calculate DRAM Bandwidth and the part allocated to display. */ 753 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ 754 fixed20_12 yclk, dram_channels, bandwidth; 755 fixed20_12 a; 756 757 a.full = dfixed_const(1000); 758 yclk.full = dfixed_const(wm->yclk); 759 yclk.full = dfixed_div(yclk, a); 760 dram_channels.full = dfixed_const(wm->dram_channels * 4); 761 a.full = dfixed_const(10); 762 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ 763 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); 764 bandwidth.full = dfixed_mul(dram_channels, yclk); 765 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); 766 767 return dfixed_trunc(bandwidth); 768 } 769 770 /** 771 * dce_v10_0_data_return_bandwidth - get the data return bandwidth 772 * 773 * @wm: watermark calculation data 774 * 775 * Calculate the data return bandwidth used for display (CIK). 776 * Used for display watermark bandwidth calculations 777 * Returns the data return bandwidth in MBytes/s 778 */ 779 static u32 dce_v10_0_data_return_bandwidth(struct dce10_wm_params *wm) 780 { 781 /* Calculate the display Data return Bandwidth */ 782 fixed20_12 return_efficiency; /* 0.8 */ 783 fixed20_12 sclk, bandwidth; 784 fixed20_12 a; 785 786 a.full = dfixed_const(1000); 787 sclk.full = dfixed_const(wm->sclk); 788 sclk.full = dfixed_div(sclk, a); 789 a.full = dfixed_const(10); 790 return_efficiency.full = dfixed_const(8); 791 return_efficiency.full = dfixed_div(return_efficiency, a); 792 a.full = dfixed_const(32); 793 bandwidth.full = dfixed_mul(a, sclk); 794 bandwidth.full = dfixed_mul(bandwidth, return_efficiency); 795 796 return dfixed_trunc(bandwidth); 797 } 798 799 /** 800 * dce_v10_0_dmif_request_bandwidth - get the dmif bandwidth 801 * 802 * @wm: watermark calculation data 803 * 804 * Calculate the dmif bandwidth used for display (CIK). 805 * Used for display watermark bandwidth calculations 806 * Returns the dmif bandwidth in MBytes/s 807 */ 808 static u32 dce_v10_0_dmif_request_bandwidth(struct dce10_wm_params *wm) 809 { 810 /* Calculate the DMIF Request Bandwidth */ 811 fixed20_12 disp_clk_request_efficiency; /* 0.8 */ 812 fixed20_12 disp_clk, bandwidth; 813 fixed20_12 a, b; 814 815 a.full = dfixed_const(1000); 816 disp_clk.full = dfixed_const(wm->disp_clk); 817 disp_clk.full = dfixed_div(disp_clk, a); 818 a.full = dfixed_const(32); 819 b.full = dfixed_mul(a, disp_clk); 820 821 a.full = dfixed_const(10); 822 disp_clk_request_efficiency.full = dfixed_const(8); 823 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); 824 825 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); 826 827 return dfixed_trunc(bandwidth); 828 } 829 830 /** 831 * dce_v10_0_available_bandwidth - get the min available bandwidth 832 * 833 * @wm: watermark calculation data 834 * 835 * Calculate the min available bandwidth used for display (CIK). 836 * Used for display watermark bandwidth calculations 837 * Returns the min available bandwidth in MBytes/s 838 */ 839 static u32 dce_v10_0_available_bandwidth(struct dce10_wm_params *wm) 840 { 841 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ 842 u32 dram_bandwidth = dce_v10_0_dram_bandwidth(wm); 843 u32 data_return_bandwidth = dce_v10_0_data_return_bandwidth(wm); 844 u32 dmif_req_bandwidth = dce_v10_0_dmif_request_bandwidth(wm); 845 846 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); 847 } 848 849 /** 850 * dce_v10_0_average_bandwidth - get the average available bandwidth 851 * 852 * @wm: watermark calculation data 853 * 854 * Calculate the average available bandwidth used for display (CIK). 855 * Used for display watermark bandwidth calculations 856 * Returns the average available bandwidth in MBytes/s 857 */ 858 static u32 dce_v10_0_average_bandwidth(struct dce10_wm_params *wm) 859 { 860 /* Calculate the display mode Average Bandwidth 861 * DisplayMode should contain the source and destination dimensions, 862 * timing, etc. 863 */ 864 fixed20_12 bpp; 865 fixed20_12 line_time; 866 fixed20_12 src_width; 867 fixed20_12 bandwidth; 868 fixed20_12 a; 869 870 a.full = dfixed_const(1000); 871 line_time.full = dfixed_const(wm->active_time + wm->blank_time); 872 line_time.full = dfixed_div(line_time, a); 873 bpp.full = dfixed_const(wm->bytes_per_pixel); 874 src_width.full = dfixed_const(wm->src_width); 875 bandwidth.full = dfixed_mul(src_width, bpp); 876 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); 877 bandwidth.full = dfixed_div(bandwidth, line_time); 878 879 return dfixed_trunc(bandwidth); 880 } 881 882 /** 883 * dce_v10_0_latency_watermark - get the latency watermark 884 * 885 * @wm: watermark calculation data 886 * 887 * Calculate the latency watermark (CIK). 888 * Used for display watermark bandwidth calculations 889 * Returns the latency watermark in ns 890 */ 891 static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm) 892 { 893 /* First calculate the latency in ns */ 894 u32 mc_latency = 2000; /* 2000 ns. */ 895 u32 available_bandwidth = dce_v10_0_available_bandwidth(wm); 896 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; 897 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; 898 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ 899 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + 900 (wm->num_heads * cursor_line_pair_return_time); 901 u32 latency = mc_latency + other_heads_data_return_time + dc_latency; 902 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; 903 u32 tmp, dmif_size = 12288; 904 fixed20_12 a, b, c; 905 906 if (wm->num_heads == 0) 907 return 0; 908 909 a.full = dfixed_const(2); 910 b.full = dfixed_const(1); 911 if ((wm->vsc.full > a.full) || 912 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || 913 (wm->vtaps >= 5) || 914 ((wm->vsc.full >= a.full) && wm->interlaced)) 915 max_src_lines_per_dst_line = 4; 916 else 917 max_src_lines_per_dst_line = 2; 918 919 a.full = dfixed_const(available_bandwidth); 920 b.full = dfixed_const(wm->num_heads); 921 a.full = dfixed_div(a, b); 922 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); 923 tmp = min(dfixed_trunc(a), tmp); 924 925 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); 926 927 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 928 b.full = dfixed_const(1000); 929 c.full = dfixed_const(lb_fill_bw); 930 b.full = dfixed_div(c, b); 931 a.full = dfixed_div(a, b); 932 line_fill_time = dfixed_trunc(a); 933 934 if (line_fill_time < wm->active_time) 935 return latency; 936 else 937 return latency + (line_fill_time - wm->active_time); 938 939 } 940 941 /** 942 * dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display - check 943 * average and available dram bandwidth 944 * 945 * @wm: watermark calculation data 946 * 947 * Check if the display average bandwidth fits in the display 948 * dram bandwidth (CIK). 949 * Used for display watermark bandwidth calculations 950 * Returns true if the display fits, false if not. 951 */ 952 static bool dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm) 953 { 954 if (dce_v10_0_average_bandwidth(wm) <= 955 (dce_v10_0_dram_bandwidth_for_display(wm) / wm->num_heads)) 956 return true; 957 else 958 return false; 959 } 960 961 /** 962 * dce_v10_0_average_bandwidth_vs_available_bandwidth - check 963 * average and available bandwidth 964 * 965 * @wm: watermark calculation data 966 * 967 * Check if the display average bandwidth fits in the display 968 * available bandwidth (CIK). 969 * Used for display watermark bandwidth calculations 970 * Returns true if the display fits, false if not. 971 */ 972 static bool dce_v10_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm) 973 { 974 if (dce_v10_0_average_bandwidth(wm) <= 975 (dce_v10_0_available_bandwidth(wm) / wm->num_heads)) 976 return true; 977 else 978 return false; 979 } 980 981 /** 982 * dce_v10_0_check_latency_hiding - check latency hiding 983 * 984 * @wm: watermark calculation data 985 * 986 * Check latency hiding (CIK). 987 * Used for display watermark bandwidth calculations 988 * Returns true if the display fits, false if not. 989 */ 990 static bool dce_v10_0_check_latency_hiding(struct dce10_wm_params *wm) 991 { 992 u32 lb_partitions = wm->lb_size / wm->src_width; 993 u32 line_time = wm->active_time + wm->blank_time; 994 u32 latency_tolerant_lines; 995 u32 latency_hiding; 996 fixed20_12 a; 997 998 a.full = dfixed_const(1); 999 if (wm->vsc.full > a.full) 1000 latency_tolerant_lines = 1; 1001 else { 1002 if (lb_partitions <= (wm->vtaps + 1)) 1003 latency_tolerant_lines = 1; 1004 else 1005 latency_tolerant_lines = 2; 1006 } 1007 1008 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); 1009 1010 if (dce_v10_0_latency_watermark(wm) <= latency_hiding) 1011 return true; 1012 else 1013 return false; 1014 } 1015 1016 /** 1017 * dce_v10_0_program_watermarks - program display watermarks 1018 * 1019 * @adev: amdgpu_device pointer 1020 * @amdgpu_crtc: the selected display controller 1021 * @lb_size: line buffer size 1022 * @num_heads: number of display controllers in use 1023 * 1024 * Calculate and program the display watermarks for the 1025 * selected display controller (CIK). 1026 */ 1027 static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, 1028 struct amdgpu_crtc *amdgpu_crtc, 1029 u32 lb_size, u32 num_heads) 1030 { 1031 struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 1032 struct dce10_wm_params wm_low, wm_high; 1033 u32 active_time; 1034 u32 line_time = 0; 1035 u32 latency_watermark_a = 0, latency_watermark_b = 0; 1036 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1037 1038 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1039 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 1040 (u32)mode->clock); 1041 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 1042 (u32)mode->clock); 1043 line_time = min(line_time, (u32)65535); 1044 1045 /* watermark for high clocks */ 1046 if (adev->pm.dpm_enabled) { 1047 wm_high.yclk = 1048 amdgpu_dpm_get_mclk(adev, false) * 10; 1049 wm_high.sclk = 1050 amdgpu_dpm_get_sclk(adev, false) * 10; 1051 } else { 1052 wm_high.yclk = adev->pm.current_mclk * 10; 1053 wm_high.sclk = adev->pm.current_sclk * 10; 1054 } 1055 1056 wm_high.disp_clk = mode->clock; 1057 wm_high.src_width = mode->crtc_hdisplay; 1058 wm_high.active_time = active_time; 1059 wm_high.blank_time = line_time - wm_high.active_time; 1060 wm_high.interlaced = false; 1061 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1062 wm_high.interlaced = true; 1063 wm_high.vsc = amdgpu_crtc->vsc; 1064 wm_high.vtaps = 1; 1065 if (amdgpu_crtc->rmx_type != RMX_OFF) 1066 wm_high.vtaps = 2; 1067 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1068 wm_high.lb_size = lb_size; 1069 wm_high.dram_channels = cik_get_number_of_dram_channels(adev); 1070 wm_high.num_heads = num_heads; 1071 1072 /* set for high clocks */ 1073 latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535); 1074 1075 /* possibly force display priority to high */ 1076 /* should really do this at mode validation time... */ 1077 if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || 1078 !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_high) || 1079 !dce_v10_0_check_latency_hiding(&wm_high) || 1080 (adev->mode_info.disp_priority == 2)) { 1081 DRM_DEBUG_KMS("force priority to high\n"); 1082 } 1083 1084 /* watermark for low clocks */ 1085 if (adev->pm.dpm_enabled) { 1086 wm_low.yclk = 1087 amdgpu_dpm_get_mclk(adev, true) * 10; 1088 wm_low.sclk = 1089 amdgpu_dpm_get_sclk(adev, true) * 10; 1090 } else { 1091 wm_low.yclk = adev->pm.current_mclk * 10; 1092 wm_low.sclk = adev->pm.current_sclk * 10; 1093 } 1094 1095 wm_low.disp_clk = mode->clock; 1096 wm_low.src_width = mode->crtc_hdisplay; 1097 wm_low.active_time = active_time; 1098 wm_low.blank_time = line_time - wm_low.active_time; 1099 wm_low.interlaced = false; 1100 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1101 wm_low.interlaced = true; 1102 wm_low.vsc = amdgpu_crtc->vsc; 1103 wm_low.vtaps = 1; 1104 if (amdgpu_crtc->rmx_type != RMX_OFF) 1105 wm_low.vtaps = 2; 1106 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1107 wm_low.lb_size = lb_size; 1108 wm_low.dram_channels = cik_get_number_of_dram_channels(adev); 1109 wm_low.num_heads = num_heads; 1110 1111 /* set for low clocks */ 1112 latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535); 1113 1114 /* possibly force display priority to high */ 1115 /* should really do this at mode validation time... */ 1116 if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || 1117 !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_low) || 1118 !dce_v10_0_check_latency_hiding(&wm_low) || 1119 (adev->mode_info.disp_priority == 2)) { 1120 DRM_DEBUG_KMS("force priority to high\n"); 1121 } 1122 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); 1123 } 1124 1125 /* select wm A */ 1126 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); 1127 tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1); 1128 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1129 tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); 1130 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); 1131 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); 1132 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1133 /* select wm B */ 1134 tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2); 1135 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1136 tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); 1137 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b); 1138 tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); 1139 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1140 /* restore original selection */ 1141 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); 1142 1143 /* save values for DPM */ 1144 amdgpu_crtc->line_time = line_time; 1145 amdgpu_crtc->wm_high = latency_watermark_a; 1146 amdgpu_crtc->wm_low = latency_watermark_b; 1147 /* Save number of lines the linebuffer leads before the scanout */ 1148 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; 1149 } 1150 1151 /** 1152 * dce_v10_0_bandwidth_update - program display watermarks 1153 * 1154 * @adev: amdgpu_device pointer 1155 * 1156 * Calculate and program the display watermarks and line 1157 * buffer allocation (CIK). 1158 */ 1159 static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev) 1160 { 1161 struct drm_display_mode *mode = NULL; 1162 u32 num_heads = 0, lb_size; 1163 int i; 1164 1165 amdgpu_display_update_priority(adev); 1166 1167 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1168 if (adev->mode_info.crtcs[i]->base.enabled) 1169 num_heads++; 1170 } 1171 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1172 mode = &adev->mode_info.crtcs[i]->base.mode; 1173 lb_size = dce_v10_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); 1174 dce_v10_0_program_watermarks(adev, adev->mode_info.crtcs[i], 1175 lb_size, num_heads); 1176 } 1177 } 1178 1179 static void dce_v10_0_audio_get_connected_pins(struct amdgpu_device *adev) 1180 { 1181 int i; 1182 u32 offset, tmp; 1183 1184 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1185 offset = adev->mode_info.audio.pin[i].offset; 1186 tmp = RREG32_AUDIO_ENDPT(offset, 1187 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); 1188 if (((tmp & 1189 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> 1190 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) 1191 adev->mode_info.audio.pin[i].connected = false; 1192 else 1193 adev->mode_info.audio.pin[i].connected = true; 1194 } 1195 } 1196 1197 static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *adev) 1198 { 1199 int i; 1200 1201 dce_v10_0_audio_get_connected_pins(adev); 1202 1203 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1204 if (adev->mode_info.audio.pin[i].connected) 1205 return &adev->mode_info.audio.pin[i]; 1206 } 1207 DRM_ERROR("No connected audio pins found!\n"); 1208 return NULL; 1209 } 1210 1211 static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder) 1212 { 1213 struct amdgpu_device *adev = drm_to_adev(encoder->dev); 1214 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1215 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1216 u32 tmp; 1217 1218 if (!dig || !dig->afmt || !dig->afmt->pin) 1219 return; 1220 1221 tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset); 1222 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id); 1223 WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp); 1224 } 1225 1226 static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, 1227 struct drm_display_mode *mode) 1228 { 1229 struct drm_device *dev = encoder->dev; 1230 struct amdgpu_device *adev = drm_to_adev(dev); 1231 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1232 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1233 struct drm_connector *connector; 1234 struct drm_connector_list_iter iter; 1235 struct amdgpu_connector *amdgpu_connector = NULL; 1236 u32 tmp; 1237 int interlace = 0; 1238 1239 if (!dig || !dig->afmt || !dig->afmt->pin) 1240 return; 1241 1242 drm_connector_list_iter_begin(dev, &iter); 1243 drm_for_each_connector_iter(connector, &iter) { 1244 if (connector->encoder == encoder) { 1245 amdgpu_connector = to_amdgpu_connector(connector); 1246 break; 1247 } 1248 } 1249 drm_connector_list_iter_end(&iter); 1250 1251 if (!amdgpu_connector) { 1252 DRM_ERROR("Couldn't find encoder's connector\n"); 1253 return; 1254 } 1255 1256 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1257 interlace = 1; 1258 if (connector->latency_present[interlace]) { 1259 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1260 VIDEO_LIPSYNC, connector->video_latency[interlace]); 1261 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1262 AUDIO_LIPSYNC, connector->audio_latency[interlace]); 1263 } else { 1264 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1265 VIDEO_LIPSYNC, 0); 1266 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1267 AUDIO_LIPSYNC, 0); 1268 } 1269 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1270 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 1271 } 1272 1273 static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder) 1274 { 1275 struct drm_device *dev = encoder->dev; 1276 struct amdgpu_device *adev = drm_to_adev(dev); 1277 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1278 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1279 struct drm_connector *connector; 1280 struct drm_connector_list_iter iter; 1281 struct amdgpu_connector *amdgpu_connector = NULL; 1282 u32 tmp; 1283 u8 *sadb = NULL; 1284 int sad_count; 1285 1286 if (!dig || !dig->afmt || !dig->afmt->pin) 1287 return; 1288 1289 drm_connector_list_iter_begin(dev, &iter); 1290 drm_for_each_connector_iter(connector, &iter) { 1291 if (connector->encoder == encoder) { 1292 amdgpu_connector = to_amdgpu_connector(connector); 1293 break; 1294 } 1295 } 1296 drm_connector_list_iter_end(&iter); 1297 1298 if (!amdgpu_connector) { 1299 DRM_ERROR("Couldn't find encoder's connector\n"); 1300 return; 1301 } 1302 1303 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb); 1304 if (sad_count < 0) { 1305 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 1306 sad_count = 0; 1307 } 1308 1309 /* program the speaker allocation */ 1310 tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1311 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 1312 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1313 DP_CONNECTION, 0); 1314 /* set HDMI mode */ 1315 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1316 HDMI_CONNECTION, 1); 1317 if (sad_count) 1318 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1319 SPEAKER_ALLOCATION, sadb[0]); 1320 else 1321 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1322 SPEAKER_ALLOCATION, 5); /* stereo */ 1323 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1324 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 1325 1326 kfree(sadb); 1327 } 1328 1329 static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) 1330 { 1331 struct drm_device *dev = encoder->dev; 1332 struct amdgpu_device *adev = drm_to_adev(dev); 1333 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1334 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1335 struct drm_connector *connector; 1336 struct drm_connector_list_iter iter; 1337 struct amdgpu_connector *amdgpu_connector = NULL; 1338 struct cea_sad *sads; 1339 int i, sad_count; 1340 1341 static const u16 eld_reg_to_type[][2] = { 1342 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, 1343 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, 1344 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, 1345 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, 1346 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, 1347 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, 1348 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, 1349 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, 1350 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, 1351 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, 1352 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, 1353 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 1354 }; 1355 1356 if (!dig || !dig->afmt || !dig->afmt->pin) 1357 return; 1358 1359 drm_connector_list_iter_begin(dev, &iter); 1360 drm_for_each_connector_iter(connector, &iter) { 1361 if (connector->encoder == encoder) { 1362 amdgpu_connector = to_amdgpu_connector(connector); 1363 break; 1364 } 1365 } 1366 drm_connector_list_iter_end(&iter); 1367 1368 if (!amdgpu_connector) { 1369 DRM_ERROR("Couldn't find encoder's connector\n"); 1370 return; 1371 } 1372 1373 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); 1374 if (sad_count < 0) 1375 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 1376 if (sad_count <= 0) 1377 return; 1378 BUG_ON(!sads); 1379 1380 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 1381 u32 tmp = 0; 1382 u8 stereo_freqs = 0; 1383 int max_channels = -1; 1384 int j; 1385 1386 for (j = 0; j < sad_count; j++) { 1387 struct cea_sad *sad = &sads[j]; 1388 1389 if (sad->format == eld_reg_to_type[i][1]) { 1390 if (sad->channels > max_channels) { 1391 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1392 MAX_CHANNELS, sad->channels); 1393 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1394 DESCRIPTOR_BYTE_2, sad->byte2); 1395 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1396 SUPPORTED_FREQUENCIES, sad->freq); 1397 max_channels = sad->channels; 1398 } 1399 1400 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 1401 stereo_freqs |= sad->freq; 1402 else 1403 break; 1404 } 1405 } 1406 1407 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1408 SUPPORTED_FREQUENCIES_STEREO, stereo_freqs); 1409 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp); 1410 } 1411 1412 kfree(sads); 1413 } 1414 1415 static void dce_v10_0_audio_enable(struct amdgpu_device *adev, 1416 struct amdgpu_audio_pin *pin, 1417 bool enable) 1418 { 1419 if (!pin) 1420 return; 1421 1422 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 1423 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); 1424 } 1425 1426 static const u32 pin_offsets[] = 1427 { 1428 AUD0_REGISTER_OFFSET, 1429 AUD1_REGISTER_OFFSET, 1430 AUD2_REGISTER_OFFSET, 1431 AUD3_REGISTER_OFFSET, 1432 AUD4_REGISTER_OFFSET, 1433 AUD5_REGISTER_OFFSET, 1434 AUD6_REGISTER_OFFSET, 1435 }; 1436 1437 static int dce_v10_0_audio_init(struct amdgpu_device *adev) 1438 { 1439 int i; 1440 1441 if (!amdgpu_audio) 1442 return 0; 1443 1444 adev->mode_info.audio.enabled = true; 1445 1446 adev->mode_info.audio.num_pins = 7; 1447 1448 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1449 adev->mode_info.audio.pin[i].channels = -1; 1450 adev->mode_info.audio.pin[i].rate = -1; 1451 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1452 adev->mode_info.audio.pin[i].status_bits = 0; 1453 adev->mode_info.audio.pin[i].category_code = 0; 1454 adev->mode_info.audio.pin[i].connected = false; 1455 adev->mode_info.audio.pin[i].offset = pin_offsets[i]; 1456 adev->mode_info.audio.pin[i].id = i; 1457 /* disable audio. it will be set up later */ 1458 /* XXX remove once we switch to ip funcs */ 1459 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1460 } 1461 1462 return 0; 1463 } 1464 1465 static void dce_v10_0_audio_fini(struct amdgpu_device *adev) 1466 { 1467 int i; 1468 1469 if (!amdgpu_audio) 1470 return; 1471 1472 if (!adev->mode_info.audio.enabled) 1473 return; 1474 1475 for (i = 0; i < adev->mode_info.audio.num_pins; i++) 1476 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1477 1478 adev->mode_info.audio.enabled = false; 1479 } 1480 1481 /* 1482 * update the N and CTS parameters for a given pixel clock rate 1483 */ 1484 static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) 1485 { 1486 struct drm_device *dev = encoder->dev; 1487 struct amdgpu_device *adev = drm_to_adev(dev); 1488 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); 1489 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1490 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1491 u32 tmp; 1492 1493 tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset); 1494 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz); 1495 WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp); 1496 tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset); 1497 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz); 1498 WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp); 1499 1500 tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset); 1501 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz); 1502 WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp); 1503 tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset); 1504 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz); 1505 WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp); 1506 1507 tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset); 1508 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz); 1509 WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp); 1510 tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset); 1511 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz); 1512 WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp); 1513 1514 } 1515 1516 /* 1517 * build a HDMI Video Info Frame 1518 */ 1519 static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, 1520 void *buffer, size_t size) 1521 { 1522 struct drm_device *dev = encoder->dev; 1523 struct amdgpu_device *adev = drm_to_adev(dev); 1524 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1525 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1526 uint8_t *frame = buffer + 3; 1527 uint8_t *header = buffer; 1528 1529 WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset, 1530 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 1531 WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset, 1532 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); 1533 WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset, 1534 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); 1535 WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset, 1536 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); 1537 } 1538 1539 static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) 1540 { 1541 struct drm_device *dev = encoder->dev; 1542 struct amdgpu_device *adev = drm_to_adev(dev); 1543 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1544 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1545 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1546 u32 dto_phase = 24 * 1000; 1547 u32 dto_modulo = clock; 1548 u32 tmp; 1549 1550 if (!dig || !dig->afmt) 1551 return; 1552 1553 /* XXX two dtos; generally use dto0 for hdmi */ 1554 /* Express [24MHz / target pixel clock] as an exact rational 1555 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 1556 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 1557 */ 1558 tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE); 1559 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, 1560 amdgpu_crtc->crtc_id); 1561 WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp); 1562 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); 1563 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); 1564 } 1565 1566 /* 1567 * update the info frames with the data from the current display mode 1568 */ 1569 static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder, 1570 struct drm_display_mode *mode) 1571 { 1572 struct drm_device *dev = encoder->dev; 1573 struct amdgpu_device *adev = drm_to_adev(dev); 1574 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1575 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1576 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 1577 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 1578 struct hdmi_avi_infoframe frame; 1579 ssize_t err; 1580 u32 tmp; 1581 int bpc = 8; 1582 1583 if (!dig || !dig->afmt) 1584 return; 1585 1586 /* Silent, r600_hdmi_enable will raise WARN for us */ 1587 if (!dig->afmt->enabled) 1588 return; 1589 1590 /* hdmi deep color mode general control packets setup, if bpc > 8 */ 1591 if (encoder->crtc) { 1592 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1593 bpc = amdgpu_crtc->bpc; 1594 } 1595 1596 /* disable audio prior to setting up hw */ 1597 dig->afmt->pin = dce_v10_0_audio_get_pin(adev); 1598 dce_v10_0_audio_enable(adev, dig->afmt->pin, false); 1599 1600 dce_v10_0_audio_set_dto(encoder, mode->clock); 1601 1602 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); 1603 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); 1604 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */ 1605 1606 WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000); 1607 1608 tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset); 1609 switch (bpc) { 1610 case 0: 1611 case 6: 1612 case 8: 1613 case 16: 1614 default: 1615 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0); 1616 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); 1617 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", 1618 connector->name, bpc); 1619 break; 1620 case 10: 1621 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); 1622 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1); 1623 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", 1624 connector->name); 1625 break; 1626 case 12: 1627 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); 1628 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2); 1629 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", 1630 connector->name); 1631 break; 1632 } 1633 WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp); 1634 1635 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); 1636 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */ 1637 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */ 1638 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */ 1639 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); 1640 1641 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1642 /* enable audio info frames (frames won't be set until audio is enabled) */ 1643 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); 1644 /* required for audio info values to be updated */ 1645 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1); 1646 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1647 1648 tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset); 1649 /* required for audio info values to be updated */ 1650 tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); 1651 WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1652 1653 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1654 /* anything other than 0 */ 1655 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2); 1656 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1657 1658 WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */ 1659 1660 tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1661 /* set the default audio delay */ 1662 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); 1663 /* should be suffient for all audio modes and small enough for all hblanks */ 1664 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3); 1665 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1666 1667 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1668 /* allow 60958 channel status fields to be updated */ 1669 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); 1670 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1671 1672 tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset); 1673 if (bpc > 8) 1674 /* clear SW CTS value */ 1675 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0); 1676 else 1677 /* select SW CTS value */ 1678 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1); 1679 /* allow hw to sent ACR packets when required */ 1680 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1); 1681 WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp); 1682 1683 dce_v10_0_afmt_update_ACR(encoder, mode->clock); 1684 1685 tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset); 1686 tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1); 1687 WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp); 1688 1689 tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset); 1690 tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); 1691 WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp); 1692 1693 tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset); 1694 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3); 1695 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4); 1696 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5); 1697 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6); 1698 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7); 1699 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); 1700 WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp); 1701 1702 dce_v10_0_audio_write_speaker_allocation(encoder); 1703 1704 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, 1705 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); 1706 1707 dce_v10_0_afmt_audio_select_pin(encoder); 1708 dce_v10_0_audio_write_sad_regs(encoder); 1709 dce_v10_0_audio_write_latency_fields(encoder, mode); 1710 1711 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); 1712 if (err < 0) { 1713 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1714 return; 1715 } 1716 1717 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); 1718 if (err < 0) { 1719 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); 1720 return; 1721 } 1722 1723 dce_v10_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); 1724 1725 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1726 /* enable AVI info frames */ 1727 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1); 1728 /* required for audio info values to be updated */ 1729 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1); 1730 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1731 1732 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1733 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2); 1734 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1735 1736 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1737 /* send audio packets */ 1738 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); 1739 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1740 1741 WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF); 1742 WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF); 1743 WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001); 1744 WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001); 1745 1746 /* enable audio after to setting up hw */ 1747 dce_v10_0_audio_enable(adev, dig->afmt->pin, true); 1748 } 1749 1750 static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable) 1751 { 1752 struct drm_device *dev = encoder->dev; 1753 struct amdgpu_device *adev = drm_to_adev(dev); 1754 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1755 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1756 1757 if (!dig || !dig->afmt) 1758 return; 1759 1760 /* Silent, r600_hdmi_enable will raise WARN for us */ 1761 if (enable && dig->afmt->enabled) 1762 return; 1763 if (!enable && !dig->afmt->enabled) 1764 return; 1765 1766 if (!enable && dig->afmt->pin) { 1767 dce_v10_0_audio_enable(adev, dig->afmt->pin, false); 1768 dig->afmt->pin = NULL; 1769 } 1770 1771 dig->afmt->enabled = enable; 1772 1773 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", 1774 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); 1775 } 1776 1777 static int dce_v10_0_afmt_init(struct amdgpu_device *adev) 1778 { 1779 int i; 1780 1781 for (i = 0; i < adev->mode_info.num_dig; i++) 1782 adev->mode_info.afmt[i] = NULL; 1783 1784 /* DCE10 has audio blocks tied to DIG encoders */ 1785 for (i = 0; i < adev->mode_info.num_dig; i++) { 1786 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); 1787 if (adev->mode_info.afmt[i]) { 1788 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1789 adev->mode_info.afmt[i]->id = i; 1790 } else { 1791 int j; 1792 for (j = 0; j < i; j++) { 1793 kfree(adev->mode_info.afmt[j]); 1794 adev->mode_info.afmt[j] = NULL; 1795 } 1796 return -ENOMEM; 1797 } 1798 } 1799 return 0; 1800 } 1801 1802 static void dce_v10_0_afmt_fini(struct amdgpu_device *adev) 1803 { 1804 int i; 1805 1806 for (i = 0; i < adev->mode_info.num_dig; i++) { 1807 kfree(adev->mode_info.afmt[i]); 1808 adev->mode_info.afmt[i] = NULL; 1809 } 1810 } 1811 1812 static const u32 vga_control_regs[6] = 1813 { 1814 mmD1VGA_CONTROL, 1815 mmD2VGA_CONTROL, 1816 mmD3VGA_CONTROL, 1817 mmD4VGA_CONTROL, 1818 mmD5VGA_CONTROL, 1819 mmD6VGA_CONTROL, 1820 }; 1821 1822 static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable) 1823 { 1824 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1825 struct drm_device *dev = crtc->dev; 1826 struct amdgpu_device *adev = drm_to_adev(dev); 1827 u32 vga_control; 1828 1829 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; 1830 if (enable) 1831 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); 1832 else 1833 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); 1834 } 1835 1836 static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable) 1837 { 1838 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1839 struct drm_device *dev = crtc->dev; 1840 struct amdgpu_device *adev = drm_to_adev(dev); 1841 1842 if (enable) 1843 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); 1844 else 1845 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); 1846 } 1847 1848 static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, 1849 struct drm_framebuffer *fb, 1850 int x, int y, int atomic) 1851 { 1852 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1853 struct drm_device *dev = crtc->dev; 1854 struct amdgpu_device *adev = drm_to_adev(dev); 1855 struct drm_framebuffer *target_fb; 1856 struct drm_gem_object *obj; 1857 struct amdgpu_bo *abo; 1858 uint64_t fb_location, tiling_flags; 1859 uint32_t fb_format, fb_pitch_pixels; 1860 u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); 1861 u32 pipe_config; 1862 u32 tmp, viewport_w, viewport_h; 1863 int r; 1864 bool bypass_lut = false; 1865 1866 /* no fb bound */ 1867 if (!atomic && !crtc->primary->fb) { 1868 DRM_DEBUG_KMS("No FB bound\n"); 1869 return 0; 1870 } 1871 1872 if (atomic) 1873 target_fb = fb; 1874 else 1875 target_fb = crtc->primary->fb; 1876 1877 /* If atomic, assume fb object is pinned & idle & fenced and 1878 * just update base pointers 1879 */ 1880 obj = target_fb->obj[0]; 1881 abo = gem_to_amdgpu_bo(obj); 1882 r = amdgpu_bo_reserve(abo, false); 1883 if (unlikely(r != 0)) 1884 return r; 1885 1886 if (!atomic) { 1887 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); 1888 if (unlikely(r != 0)) { 1889 amdgpu_bo_unreserve(abo); 1890 return -EINVAL; 1891 } 1892 } 1893 fb_location = amdgpu_bo_gpu_offset(abo); 1894 1895 amdgpu_bo_get_tiling_flags(abo, &tiling_flags); 1896 amdgpu_bo_unreserve(abo); 1897 1898 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 1899 1900 switch (target_fb->format->format) { 1901 case DRM_FORMAT_C8: 1902 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); 1903 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 1904 break; 1905 case DRM_FORMAT_XRGB4444: 1906 case DRM_FORMAT_ARGB4444: 1907 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 1908 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2); 1909 #ifdef __BIG_ENDIAN 1910 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1911 ENDIAN_8IN16); 1912 #endif 1913 break; 1914 case DRM_FORMAT_XRGB1555: 1915 case DRM_FORMAT_ARGB1555: 1916 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 1917 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 1918 #ifdef __BIG_ENDIAN 1919 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1920 ENDIAN_8IN16); 1921 #endif 1922 break; 1923 case DRM_FORMAT_BGRX5551: 1924 case DRM_FORMAT_BGRA5551: 1925 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 1926 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5); 1927 #ifdef __BIG_ENDIAN 1928 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1929 ENDIAN_8IN16); 1930 #endif 1931 break; 1932 case DRM_FORMAT_RGB565: 1933 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 1934 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); 1935 #ifdef __BIG_ENDIAN 1936 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1937 ENDIAN_8IN16); 1938 #endif 1939 break; 1940 case DRM_FORMAT_XRGB8888: 1941 case DRM_FORMAT_ARGB8888: 1942 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 1943 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 1944 #ifdef __BIG_ENDIAN 1945 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1946 ENDIAN_8IN32); 1947 #endif 1948 break; 1949 case DRM_FORMAT_XRGB2101010: 1950 case DRM_FORMAT_ARGB2101010: 1951 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 1952 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); 1953 #ifdef __BIG_ENDIAN 1954 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1955 ENDIAN_8IN32); 1956 #endif 1957 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 1958 bypass_lut = true; 1959 break; 1960 case DRM_FORMAT_BGRX1010102: 1961 case DRM_FORMAT_BGRA1010102: 1962 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 1963 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4); 1964 #ifdef __BIG_ENDIAN 1965 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1966 ENDIAN_8IN32); 1967 #endif 1968 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 1969 bypass_lut = true; 1970 break; 1971 case DRM_FORMAT_XBGR8888: 1972 case DRM_FORMAT_ABGR8888: 1973 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 1974 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 1975 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2); 1976 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2); 1977 #ifdef __BIG_ENDIAN 1978 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1979 ENDIAN_8IN32); 1980 #endif 1981 break; 1982 default: 1983 DRM_ERROR("Unsupported screen format %p4cc\n", 1984 &target_fb->format->format); 1985 return -EINVAL; 1986 } 1987 1988 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { 1989 unsigned bankw, bankh, mtaspect, tile_split, num_banks; 1990 1991 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 1992 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 1993 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 1994 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 1995 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 1996 1997 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks); 1998 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, 1999 ARRAY_2D_TILED_THIN1); 2000 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT, 2001 tile_split); 2002 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw); 2003 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh); 2004 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, 2005 mtaspect); 2006 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, 2007 ADDR_SURF_MICRO_TILING_DISPLAY); 2008 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { 2009 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, 2010 ARRAY_1D_TILED_THIN1); 2011 } 2012 2013 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG, 2014 pipe_config); 2015 2016 dce_v10_0_vga_enable(crtc, false); 2017 2018 /* Make sure surface address is updated at vertical blank rather than 2019 * horizontal blank 2020 */ 2021 tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); 2022 tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, 2023 GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); 2024 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2025 2026 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2027 upper_32_bits(fb_location)); 2028 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2029 upper_32_bits(fb_location)); 2030 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2031 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 2032 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2033 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); 2034 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 2035 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); 2036 2037 /* 2038 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT 2039 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to 2040 * retain the full precision throughout the pipeline. 2041 */ 2042 tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset); 2043 if (bypass_lut) 2044 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1); 2045 else 2046 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0); 2047 WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp); 2048 2049 if (bypass_lut) 2050 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); 2051 2052 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); 2053 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); 2054 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); 2055 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); 2056 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); 2057 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); 2058 2059 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; 2060 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); 2061 2062 dce_v10_0_grph_enable(crtc, true); 2063 2064 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, 2065 target_fb->height); 2066 2067 x &= ~3; 2068 y &= ~1; 2069 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, 2070 (x << 16) | y); 2071 viewport_w = crtc->mode.hdisplay; 2072 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 2073 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, 2074 (viewport_w << 16) | viewport_h); 2075 2076 /* set pageflip to happen anywhere in vblank interval */ 2077 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); 2078 2079 if (!atomic && fb && fb != crtc->primary->fb) { 2080 abo = gem_to_amdgpu_bo(fb->obj[0]); 2081 r = amdgpu_bo_reserve(abo, true); 2082 if (unlikely(r != 0)) 2083 return r; 2084 amdgpu_bo_unpin(abo); 2085 amdgpu_bo_unreserve(abo); 2086 } 2087 2088 /* Bytes per pixel may have changed */ 2089 dce_v10_0_bandwidth_update(adev); 2090 2091 return 0; 2092 } 2093 2094 static void dce_v10_0_set_interleave(struct drm_crtc *crtc, 2095 struct drm_display_mode *mode) 2096 { 2097 struct drm_device *dev = crtc->dev; 2098 struct amdgpu_device *adev = drm_to_adev(dev); 2099 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2100 u32 tmp; 2101 2102 tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset); 2103 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 2104 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1); 2105 else 2106 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0); 2107 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp); 2108 } 2109 2110 static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc) 2111 { 2112 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2113 struct drm_device *dev = crtc->dev; 2114 struct amdgpu_device *adev = drm_to_adev(dev); 2115 u16 *r, *g, *b; 2116 int i; 2117 u32 tmp; 2118 2119 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); 2120 2121 tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); 2122 tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0); 2123 tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_OVL_MODE, 0); 2124 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2125 2126 tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset); 2127 tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1); 2128 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2129 2130 tmp = RREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset); 2131 tmp = REG_SET_FIELD(tmp, PRESCALE_OVL_CONTROL, OVL_PRESCALE_BYPASS, 1); 2132 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2133 2134 tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset); 2135 tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0); 2136 tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, OVL_INPUT_GAMMA_MODE, 0); 2137 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2138 2139 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); 2140 2141 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); 2142 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); 2143 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); 2144 2145 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); 2146 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); 2147 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); 2148 2149 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); 2150 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); 2151 2152 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); 2153 r = crtc->gamma_store; 2154 g = r + crtc->gamma_size; 2155 b = g + crtc->gamma_size; 2156 for (i = 0; i < 256; i++) { 2157 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, 2158 ((*r++ & 0xffc0) << 14) | 2159 ((*g++ & 0xffc0) << 4) | 2160 (*b++ >> 6)); 2161 } 2162 2163 tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset); 2164 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0); 2165 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, OVL_DEGAMMA_MODE, 0); 2166 tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0); 2167 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2168 2169 tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset); 2170 tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0); 2171 tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, OVL_GAMUT_REMAP_MODE, 0); 2172 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2173 2174 tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset); 2175 tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0); 2176 tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, OVL_REGAMMA_MODE, 0); 2177 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2178 2179 tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); 2180 tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0); 2181 tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_OVL_MODE, 0); 2182 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2183 2184 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 2185 WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0); 2186 /* XXX this only needs to be programmed once per crtc at startup, 2187 * not sure where the best place for it is 2188 */ 2189 tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset); 2190 tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1); 2191 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2192 } 2193 2194 static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder) 2195 { 2196 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 2197 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 2198 2199 switch (amdgpu_encoder->encoder_id) { 2200 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2201 if (dig->linkb) 2202 return 1; 2203 else 2204 return 0; 2205 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2206 if (dig->linkb) 2207 return 3; 2208 else 2209 return 2; 2210 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2211 if (dig->linkb) 2212 return 5; 2213 else 2214 return 4; 2215 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2216 return 6; 2217 default: 2218 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); 2219 return 0; 2220 } 2221 } 2222 2223 /** 2224 * dce_v10_0_pick_pll - Allocate a PPLL for use by the crtc. 2225 * 2226 * @crtc: drm crtc 2227 * 2228 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors 2229 * a single PPLL can be used for all DP crtcs/encoders. For non-DP 2230 * monitors a dedicated PPLL must be used. If a particular board has 2231 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming 2232 * as there is no need to program the PLL itself. If we are not able to 2233 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to 2234 * avoid messing up an existing monitor. 2235 * 2236 * Asic specific PLL information 2237 * 2238 * DCE 10.x 2239 * Tonga 2240 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) 2241 * CI 2242 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC 2243 * 2244 */ 2245 static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc) 2246 { 2247 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2248 struct drm_device *dev = crtc->dev; 2249 struct amdgpu_device *adev = drm_to_adev(dev); 2250 u32 pll_in_use; 2251 int pll; 2252 2253 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { 2254 if (adev->clock.dp_extclk) 2255 /* skip PPLL programming if using ext clock */ 2256 return ATOM_PPLL_INVALID; 2257 else { 2258 /* use the same PPLL for all DP monitors */ 2259 pll = amdgpu_pll_get_shared_dp_ppll(crtc); 2260 if (pll != ATOM_PPLL_INVALID) 2261 return pll; 2262 } 2263 } else { 2264 /* use the same PPLL for all monitors with the same clock */ 2265 pll = amdgpu_pll_get_shared_nondp_ppll(crtc); 2266 if (pll != ATOM_PPLL_INVALID) 2267 return pll; 2268 } 2269 2270 /* DCE10 has PPLL0, PPLL1, and PPLL2 */ 2271 pll_in_use = amdgpu_pll_get_use_mask(crtc); 2272 if (!(pll_in_use & (1 << ATOM_PPLL2))) 2273 return ATOM_PPLL2; 2274 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2275 return ATOM_PPLL1; 2276 if (!(pll_in_use & (1 << ATOM_PPLL0))) 2277 return ATOM_PPLL0; 2278 DRM_ERROR("unable to allocate a PPLL\n"); 2279 return ATOM_PPLL_INVALID; 2280 } 2281 2282 static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock) 2283 { 2284 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2285 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2286 uint32_t cur_lock; 2287 2288 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); 2289 if (lock) 2290 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1); 2291 else 2292 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0); 2293 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); 2294 } 2295 2296 static void dce_v10_0_hide_cursor(struct drm_crtc *crtc) 2297 { 2298 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2299 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2300 u32 tmp; 2301 2302 tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); 2303 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); 2304 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2305 } 2306 2307 static void dce_v10_0_show_cursor(struct drm_crtc *crtc) 2308 { 2309 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2310 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2311 u32 tmp; 2312 2313 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2314 upper_32_bits(amdgpu_crtc->cursor_addr)); 2315 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2316 lower_32_bits(amdgpu_crtc->cursor_addr)); 2317 2318 tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); 2319 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); 2320 tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); 2321 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2322 } 2323 2324 static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc, 2325 int x, int y) 2326 { 2327 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2328 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2329 int xorigin = 0, yorigin = 0; 2330 2331 amdgpu_crtc->cursor_x = x; 2332 amdgpu_crtc->cursor_y = y; 2333 2334 /* avivo cursor are offset into the total surface */ 2335 x += crtc->x; 2336 y += crtc->y; 2337 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 2338 2339 if (x < 0) { 2340 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 2341 x = 0; 2342 } 2343 if (y < 0) { 2344 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 2345 y = 0; 2346 } 2347 2348 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2349 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2350 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2351 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2352 2353 return 0; 2354 } 2355 2356 static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc, 2357 int x, int y) 2358 { 2359 int ret; 2360 2361 dce_v10_0_lock_cursor(crtc, true); 2362 ret = dce_v10_0_cursor_move_locked(crtc, x, y); 2363 dce_v10_0_lock_cursor(crtc, false); 2364 2365 return ret; 2366 } 2367 2368 static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, 2369 struct drm_file *file_priv, 2370 uint32_t handle, 2371 uint32_t width, 2372 uint32_t height, 2373 int32_t hot_x, 2374 int32_t hot_y) 2375 { 2376 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2377 struct drm_gem_object *obj; 2378 struct amdgpu_bo *aobj; 2379 int ret; 2380 2381 if (!handle) { 2382 /* turn off cursor */ 2383 dce_v10_0_hide_cursor(crtc); 2384 obj = NULL; 2385 goto unpin; 2386 } 2387 2388 if ((width > amdgpu_crtc->max_cursor_width) || 2389 (height > amdgpu_crtc->max_cursor_height)) { 2390 DRM_ERROR("bad cursor width or height %d x %d\n", width, height); 2391 return -EINVAL; 2392 } 2393 2394 obj = drm_gem_object_lookup(file_priv, handle); 2395 if (!obj) { 2396 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); 2397 return -ENOENT; 2398 } 2399 2400 aobj = gem_to_amdgpu_bo(obj); 2401 ret = amdgpu_bo_reserve(aobj, false); 2402 if (ret != 0) { 2403 drm_gem_object_put(obj); 2404 return ret; 2405 } 2406 2407 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); 2408 amdgpu_bo_unreserve(aobj); 2409 if (ret) { 2410 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 2411 drm_gem_object_put(obj); 2412 return ret; 2413 } 2414 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); 2415 2416 dce_v10_0_lock_cursor(crtc, true); 2417 2418 if (width != amdgpu_crtc->cursor_width || 2419 height != amdgpu_crtc->cursor_height || 2420 hot_x != amdgpu_crtc->cursor_hot_x || 2421 hot_y != amdgpu_crtc->cursor_hot_y) { 2422 int x, y; 2423 2424 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x; 2425 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y; 2426 2427 dce_v10_0_cursor_move_locked(crtc, x, y); 2428 2429 amdgpu_crtc->cursor_width = width; 2430 amdgpu_crtc->cursor_height = height; 2431 amdgpu_crtc->cursor_hot_x = hot_x; 2432 amdgpu_crtc->cursor_hot_y = hot_y; 2433 } 2434 2435 dce_v10_0_show_cursor(crtc); 2436 dce_v10_0_lock_cursor(crtc, false); 2437 2438 unpin: 2439 if (amdgpu_crtc->cursor_bo) { 2440 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2441 ret = amdgpu_bo_reserve(aobj, true); 2442 if (likely(ret == 0)) { 2443 amdgpu_bo_unpin(aobj); 2444 amdgpu_bo_unreserve(aobj); 2445 } 2446 drm_gem_object_put(amdgpu_crtc->cursor_bo); 2447 } 2448 2449 amdgpu_crtc->cursor_bo = obj; 2450 return 0; 2451 } 2452 2453 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) 2454 { 2455 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2456 2457 if (amdgpu_crtc->cursor_bo) { 2458 dce_v10_0_lock_cursor(crtc, true); 2459 2460 dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2461 amdgpu_crtc->cursor_y); 2462 2463 dce_v10_0_show_cursor(crtc); 2464 2465 dce_v10_0_lock_cursor(crtc, false); 2466 } 2467 } 2468 2469 static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2470 u16 *blue, uint32_t size, 2471 struct drm_modeset_acquire_ctx *ctx) 2472 { 2473 dce_v10_0_crtc_load_lut(crtc); 2474 2475 return 0; 2476 } 2477 2478 static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) 2479 { 2480 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2481 2482 drm_crtc_cleanup(crtc); 2483 kfree(amdgpu_crtc); 2484 } 2485 2486 static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = { 2487 .cursor_set2 = dce_v10_0_crtc_cursor_set2, 2488 .cursor_move = dce_v10_0_crtc_cursor_move, 2489 .gamma_set = dce_v10_0_crtc_gamma_set, 2490 .set_config = amdgpu_display_crtc_set_config, 2491 .destroy = dce_v10_0_crtc_destroy, 2492 .page_flip_target = amdgpu_display_crtc_page_flip_target, 2493 .get_vblank_counter = amdgpu_get_vblank_counter_kms, 2494 .enable_vblank = amdgpu_enable_vblank_kms, 2495 .disable_vblank = amdgpu_disable_vblank_kms, 2496 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 2497 }; 2498 2499 static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2500 { 2501 struct drm_device *dev = crtc->dev; 2502 struct amdgpu_device *adev = drm_to_adev(dev); 2503 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2504 unsigned type; 2505 2506 switch (mode) { 2507 case DRM_MODE_DPMS_ON: 2508 amdgpu_crtc->enabled = true; 2509 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); 2510 dce_v10_0_vga_enable(crtc, true); 2511 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2512 dce_v10_0_vga_enable(crtc, false); 2513 /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2514 type = amdgpu_display_crtc_idx_to_irq_type(adev, 2515 amdgpu_crtc->crtc_id); 2516 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2517 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2518 drm_crtc_vblank_on(crtc); 2519 dce_v10_0_crtc_load_lut(crtc); 2520 break; 2521 case DRM_MODE_DPMS_STANDBY: 2522 case DRM_MODE_DPMS_SUSPEND: 2523 case DRM_MODE_DPMS_OFF: 2524 drm_crtc_vblank_off(crtc); 2525 if (amdgpu_crtc->enabled) { 2526 dce_v10_0_vga_enable(crtc, true); 2527 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2528 dce_v10_0_vga_enable(crtc, false); 2529 } 2530 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); 2531 amdgpu_crtc->enabled = false; 2532 break; 2533 } 2534 /* adjust pm to dpms */ 2535 amdgpu_pm_compute_clocks(adev); 2536 } 2537 2538 static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc) 2539 { 2540 /* disable crtc pair power gating before programming */ 2541 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); 2542 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); 2543 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2544 } 2545 2546 static void dce_v10_0_crtc_commit(struct drm_crtc *crtc) 2547 { 2548 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 2549 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); 2550 } 2551 2552 static void dce_v10_0_crtc_disable(struct drm_crtc *crtc) 2553 { 2554 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2555 struct drm_device *dev = crtc->dev; 2556 struct amdgpu_device *adev = drm_to_adev(dev); 2557 struct amdgpu_atom_ss ss; 2558 int i; 2559 2560 dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2561 if (crtc->primary->fb) { 2562 int r; 2563 struct amdgpu_bo *abo; 2564 2565 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); 2566 r = amdgpu_bo_reserve(abo, true); 2567 if (unlikely(r)) 2568 DRM_ERROR("failed to reserve abo before unpin\n"); 2569 else { 2570 amdgpu_bo_unpin(abo); 2571 amdgpu_bo_unreserve(abo); 2572 } 2573 } 2574 /* disable the GRPH */ 2575 dce_v10_0_grph_enable(crtc, false); 2576 2577 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); 2578 2579 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2580 if (adev->mode_info.crtcs[i] && 2581 adev->mode_info.crtcs[i]->enabled && 2582 i != amdgpu_crtc->crtc_id && 2583 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { 2584 /* one other crtc is using this pll don't turn 2585 * off the pll 2586 */ 2587 goto done; 2588 } 2589 } 2590 2591 switch (amdgpu_crtc->pll_id) { 2592 case ATOM_PPLL0: 2593 case ATOM_PPLL1: 2594 case ATOM_PPLL2: 2595 /* disable the ppll */ 2596 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, 2597 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2598 break; 2599 default: 2600 break; 2601 } 2602 done: 2603 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2604 amdgpu_crtc->adjusted_clock = 0; 2605 amdgpu_crtc->encoder = NULL; 2606 amdgpu_crtc->connector = NULL; 2607 } 2608 2609 static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc, 2610 struct drm_display_mode *mode, 2611 struct drm_display_mode *adjusted_mode, 2612 int x, int y, struct drm_framebuffer *old_fb) 2613 { 2614 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2615 2616 if (!amdgpu_crtc->adjusted_clock) 2617 return -EINVAL; 2618 2619 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); 2620 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); 2621 dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2622 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); 2623 amdgpu_atombios_crtc_scaler_setup(crtc); 2624 dce_v10_0_cursor_reset(crtc); 2625 /* update the hw version fpr dpm */ 2626 amdgpu_crtc->hw_mode = *adjusted_mode; 2627 2628 return 0; 2629 } 2630 2631 static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc, 2632 const struct drm_display_mode *mode, 2633 struct drm_display_mode *adjusted_mode) 2634 { 2635 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2636 struct drm_device *dev = crtc->dev; 2637 struct drm_encoder *encoder; 2638 2639 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ 2640 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2641 if (encoder->crtc == crtc) { 2642 amdgpu_crtc->encoder = encoder; 2643 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); 2644 break; 2645 } 2646 } 2647 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { 2648 amdgpu_crtc->encoder = NULL; 2649 amdgpu_crtc->connector = NULL; 2650 return false; 2651 } 2652 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2653 return false; 2654 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2655 return false; 2656 /* pick pll */ 2657 amdgpu_crtc->pll_id = dce_v10_0_pick_pll(crtc); 2658 /* if we can't get a PPLL for a non-DP encoder, fail */ 2659 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && 2660 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) 2661 return false; 2662 2663 return true; 2664 } 2665 2666 static int dce_v10_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, 2667 struct drm_framebuffer *old_fb) 2668 { 2669 return dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2670 } 2671 2672 static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc, 2673 struct drm_framebuffer *fb, 2674 int x, int y, enum mode_set_atomic state) 2675 { 2676 return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1); 2677 } 2678 2679 static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = { 2680 .dpms = dce_v10_0_crtc_dpms, 2681 .mode_fixup = dce_v10_0_crtc_mode_fixup, 2682 .mode_set = dce_v10_0_crtc_mode_set, 2683 .mode_set_base = dce_v10_0_crtc_set_base, 2684 .mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic, 2685 .prepare = dce_v10_0_crtc_prepare, 2686 .commit = dce_v10_0_crtc_commit, 2687 .disable = dce_v10_0_crtc_disable, 2688 .get_scanout_position = amdgpu_crtc_get_scanout_position, 2689 }; 2690 2691 static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) 2692 { 2693 struct amdgpu_crtc *amdgpu_crtc; 2694 2695 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + 2696 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 2697 if (amdgpu_crtc == NULL) 2698 return -ENOMEM; 2699 2700 drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v10_0_crtc_funcs); 2701 2702 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); 2703 amdgpu_crtc->crtc_id = index; 2704 adev->mode_info.crtcs[index] = amdgpu_crtc; 2705 2706 amdgpu_crtc->max_cursor_width = 128; 2707 amdgpu_crtc->max_cursor_height = 128; 2708 adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; 2709 adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; 2710 2711 switch (amdgpu_crtc->crtc_id) { 2712 case 0: 2713 default: 2714 amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET; 2715 break; 2716 case 1: 2717 amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET; 2718 break; 2719 case 2: 2720 amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET; 2721 break; 2722 case 3: 2723 amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET; 2724 break; 2725 case 4: 2726 amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET; 2727 break; 2728 case 5: 2729 amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET; 2730 break; 2731 } 2732 2733 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2734 amdgpu_crtc->adjusted_clock = 0; 2735 amdgpu_crtc->encoder = NULL; 2736 amdgpu_crtc->connector = NULL; 2737 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs); 2738 2739 return 0; 2740 } 2741 2742 static int dce_v10_0_early_init(void *handle) 2743 { 2744 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2745 2746 adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg; 2747 adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg; 2748 2749 dce_v10_0_set_display_funcs(adev); 2750 2751 adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev); 2752 2753 switch (adev->asic_type) { 2754 case CHIP_FIJI: 2755 case CHIP_TONGA: 2756 adev->mode_info.num_hpd = 6; 2757 adev->mode_info.num_dig = 7; 2758 break; 2759 default: 2760 /* FIXME: not supported yet */ 2761 return -EINVAL; 2762 } 2763 2764 dce_v10_0_set_irq_funcs(adev); 2765 2766 return 0; 2767 } 2768 2769 static int dce_v10_0_sw_init(void *handle) 2770 { 2771 int r, i; 2772 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2773 2774 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2775 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); 2776 if (r) 2777 return r; 2778 } 2779 2780 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) { 2781 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq); 2782 if (r) 2783 return r; 2784 } 2785 2786 /* HPD hotplug */ 2787 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 2788 if (r) 2789 return r; 2790 2791 adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; 2792 2793 adev_to_drm(adev)->mode_config.async_page_flip = true; 2794 2795 adev_to_drm(adev)->mode_config.max_width = 16384; 2796 adev_to_drm(adev)->mode_config.max_height = 16384; 2797 2798 adev_to_drm(adev)->mode_config.preferred_depth = 24; 2799 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 2800 2801 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; 2802 2803 r = amdgpu_display_modeset_create_props(adev); 2804 if (r) 2805 return r; 2806 2807 adev_to_drm(adev)->mode_config.max_width = 16384; 2808 adev_to_drm(adev)->mode_config.max_height = 16384; 2809 2810 /* allocate crtcs */ 2811 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2812 r = dce_v10_0_crtc_init(adev, i); 2813 if (r) 2814 return r; 2815 } 2816 2817 if (amdgpu_atombios_get_connector_info_from_object_table(adev)) 2818 amdgpu_display_print_display_setup(adev_to_drm(adev)); 2819 else 2820 return -EINVAL; 2821 2822 /* setup afmt */ 2823 r = dce_v10_0_afmt_init(adev); 2824 if (r) 2825 return r; 2826 2827 r = dce_v10_0_audio_init(adev); 2828 if (r) 2829 return r; 2830 2831 drm_kms_helper_poll_init(adev_to_drm(adev)); 2832 2833 adev->mode_info.mode_config_initialized = true; 2834 return 0; 2835 } 2836 2837 static int dce_v10_0_sw_fini(void *handle) 2838 { 2839 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2840 2841 kfree(adev->mode_info.bios_hardcoded_edid); 2842 2843 drm_kms_helper_poll_fini(adev_to_drm(adev)); 2844 2845 dce_v10_0_audio_fini(adev); 2846 2847 dce_v10_0_afmt_fini(adev); 2848 2849 drm_mode_config_cleanup(adev_to_drm(adev)); 2850 adev->mode_info.mode_config_initialized = false; 2851 2852 return 0; 2853 } 2854 2855 static int dce_v10_0_hw_init(void *handle) 2856 { 2857 int i; 2858 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2859 2860 dce_v10_0_init_golden_registers(adev); 2861 2862 /* disable vga render */ 2863 dce_v10_0_set_vga_render_state(adev, false); 2864 /* init dig PHYs, disp eng pll */ 2865 amdgpu_atombios_encoder_init_dig(adev); 2866 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); 2867 2868 /* initialize hpd */ 2869 dce_v10_0_hpd_init(adev); 2870 2871 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2872 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2873 } 2874 2875 dce_v10_0_pageflip_interrupt_init(adev); 2876 2877 return 0; 2878 } 2879 2880 static int dce_v10_0_hw_fini(void *handle) 2881 { 2882 int i; 2883 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2884 2885 dce_v10_0_hpd_fini(adev); 2886 2887 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2888 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2889 } 2890 2891 dce_v10_0_pageflip_interrupt_fini(adev); 2892 2893 return 0; 2894 } 2895 2896 static int dce_v10_0_suspend(void *handle) 2897 { 2898 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2899 int r; 2900 2901 r = amdgpu_display_suspend_helper(adev); 2902 if (r) 2903 return r; 2904 2905 adev->mode_info.bl_level = 2906 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); 2907 2908 return dce_v10_0_hw_fini(handle); 2909 } 2910 2911 static int dce_v10_0_resume(void *handle) 2912 { 2913 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2914 int ret; 2915 2916 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, 2917 adev->mode_info.bl_level); 2918 2919 ret = dce_v10_0_hw_init(handle); 2920 2921 /* turn on the BL */ 2922 if (adev->mode_info.bl_encoder) { 2923 u8 bl_level = amdgpu_display_backlight_get_level(adev, 2924 adev->mode_info.bl_encoder); 2925 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 2926 bl_level); 2927 } 2928 if (ret) 2929 return ret; 2930 2931 return amdgpu_display_resume_helper(adev); 2932 } 2933 2934 static bool dce_v10_0_is_idle(void *handle) 2935 { 2936 return true; 2937 } 2938 2939 static int dce_v10_0_wait_for_idle(void *handle) 2940 { 2941 return 0; 2942 } 2943 2944 static bool dce_v10_0_check_soft_reset(void *handle) 2945 { 2946 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2947 2948 return dce_v10_0_is_display_hung(adev); 2949 } 2950 2951 static int dce_v10_0_soft_reset(void *handle) 2952 { 2953 u32 srbm_soft_reset = 0, tmp; 2954 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2955 2956 if (dce_v10_0_is_display_hung(adev)) 2957 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; 2958 2959 if (srbm_soft_reset) { 2960 tmp = RREG32(mmSRBM_SOFT_RESET); 2961 tmp |= srbm_soft_reset; 2962 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 2963 WREG32(mmSRBM_SOFT_RESET, tmp); 2964 tmp = RREG32(mmSRBM_SOFT_RESET); 2965 2966 udelay(50); 2967 2968 tmp &= ~srbm_soft_reset; 2969 WREG32(mmSRBM_SOFT_RESET, tmp); 2970 tmp = RREG32(mmSRBM_SOFT_RESET); 2971 2972 /* Wait a little for things to settle down */ 2973 udelay(50); 2974 } 2975 return 0; 2976 } 2977 2978 static void dce_v10_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, 2979 int crtc, 2980 enum amdgpu_interrupt_state state) 2981 { 2982 u32 lb_interrupt_mask; 2983 2984 if (crtc >= adev->mode_info.num_crtc) { 2985 DRM_DEBUG("invalid crtc %d\n", crtc); 2986 return; 2987 } 2988 2989 switch (state) { 2990 case AMDGPU_IRQ_STATE_DISABLE: 2991 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 2992 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 2993 VBLANK_INTERRUPT_MASK, 0); 2994 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 2995 break; 2996 case AMDGPU_IRQ_STATE_ENABLE: 2997 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 2998 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 2999 VBLANK_INTERRUPT_MASK, 1); 3000 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3001 break; 3002 default: 3003 break; 3004 } 3005 } 3006 3007 static void dce_v10_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, 3008 int crtc, 3009 enum amdgpu_interrupt_state state) 3010 { 3011 u32 lb_interrupt_mask; 3012 3013 if (crtc >= adev->mode_info.num_crtc) { 3014 DRM_DEBUG("invalid crtc %d\n", crtc); 3015 return; 3016 } 3017 3018 switch (state) { 3019 case AMDGPU_IRQ_STATE_DISABLE: 3020 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3021 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3022 VLINE_INTERRUPT_MASK, 0); 3023 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3024 break; 3025 case AMDGPU_IRQ_STATE_ENABLE: 3026 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3027 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3028 VLINE_INTERRUPT_MASK, 1); 3029 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3030 break; 3031 default: 3032 break; 3033 } 3034 } 3035 3036 static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev, 3037 struct amdgpu_irq_src *source, 3038 unsigned hpd, 3039 enum amdgpu_interrupt_state state) 3040 { 3041 u32 tmp; 3042 3043 if (hpd >= adev->mode_info.num_hpd) { 3044 DRM_DEBUG("invalid hdp %d\n", hpd); 3045 return 0; 3046 } 3047 3048 switch (state) { 3049 case AMDGPU_IRQ_STATE_DISABLE: 3050 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 3051 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); 3052 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 3053 break; 3054 case AMDGPU_IRQ_STATE_ENABLE: 3055 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 3056 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1); 3057 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 3058 break; 3059 default: 3060 break; 3061 } 3062 3063 return 0; 3064 } 3065 3066 static int dce_v10_0_set_crtc_irq_state(struct amdgpu_device *adev, 3067 struct amdgpu_irq_src *source, 3068 unsigned type, 3069 enum amdgpu_interrupt_state state) 3070 { 3071 switch (type) { 3072 case AMDGPU_CRTC_IRQ_VBLANK1: 3073 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 0, state); 3074 break; 3075 case AMDGPU_CRTC_IRQ_VBLANK2: 3076 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 1, state); 3077 break; 3078 case AMDGPU_CRTC_IRQ_VBLANK3: 3079 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 2, state); 3080 break; 3081 case AMDGPU_CRTC_IRQ_VBLANK4: 3082 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 3, state); 3083 break; 3084 case AMDGPU_CRTC_IRQ_VBLANK5: 3085 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 4, state); 3086 break; 3087 case AMDGPU_CRTC_IRQ_VBLANK6: 3088 dce_v10_0_set_crtc_vblank_interrupt_state(adev, 5, state); 3089 break; 3090 case AMDGPU_CRTC_IRQ_VLINE1: 3091 dce_v10_0_set_crtc_vline_interrupt_state(adev, 0, state); 3092 break; 3093 case AMDGPU_CRTC_IRQ_VLINE2: 3094 dce_v10_0_set_crtc_vline_interrupt_state(adev, 1, state); 3095 break; 3096 case AMDGPU_CRTC_IRQ_VLINE3: 3097 dce_v10_0_set_crtc_vline_interrupt_state(adev, 2, state); 3098 break; 3099 case AMDGPU_CRTC_IRQ_VLINE4: 3100 dce_v10_0_set_crtc_vline_interrupt_state(adev, 3, state); 3101 break; 3102 case AMDGPU_CRTC_IRQ_VLINE5: 3103 dce_v10_0_set_crtc_vline_interrupt_state(adev, 4, state); 3104 break; 3105 case AMDGPU_CRTC_IRQ_VLINE6: 3106 dce_v10_0_set_crtc_vline_interrupt_state(adev, 5, state); 3107 break; 3108 default: 3109 break; 3110 } 3111 return 0; 3112 } 3113 3114 static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev, 3115 struct amdgpu_irq_src *src, 3116 unsigned type, 3117 enum amdgpu_interrupt_state state) 3118 { 3119 u32 reg; 3120 3121 if (type >= adev->mode_info.num_crtc) { 3122 DRM_ERROR("invalid pageflip crtc %d\n", type); 3123 return -EINVAL; 3124 } 3125 3126 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]); 3127 if (state == AMDGPU_IRQ_STATE_DISABLE) 3128 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3129 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3130 else 3131 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3132 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3133 3134 return 0; 3135 } 3136 3137 static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, 3138 struct amdgpu_irq_src *source, 3139 struct amdgpu_iv_entry *entry) 3140 { 3141 unsigned long flags; 3142 unsigned crtc_id; 3143 struct amdgpu_crtc *amdgpu_crtc; 3144 struct amdgpu_flip_work *works; 3145 3146 crtc_id = (entry->src_id - 8) >> 1; 3147 amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 3148 3149 if (crtc_id >= adev->mode_info.num_crtc) { 3150 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); 3151 return -EINVAL; 3152 } 3153 3154 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) & 3155 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 3156 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id], 3157 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 3158 3159 /* IRQ could occur when in initial stage */ 3160 if (amdgpu_crtc == NULL) 3161 return 0; 3162 3163 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 3164 works = amdgpu_crtc->pflip_works; 3165 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { 3166 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " 3167 "AMDGPU_FLIP_SUBMITTED(%d)\n", 3168 amdgpu_crtc->pflip_status, 3169 AMDGPU_FLIP_SUBMITTED); 3170 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 3171 return 0; 3172 } 3173 3174 /* page flip completed. clean up */ 3175 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 3176 amdgpu_crtc->pflip_works = NULL; 3177 3178 /* wakeup usersapce */ 3179 if (works->event) 3180 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); 3181 3182 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 3183 3184 drm_crtc_vblank_put(&amdgpu_crtc->base); 3185 schedule_work(&works->unpin_work); 3186 3187 return 0; 3188 } 3189 3190 static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev, 3191 int hpd) 3192 { 3193 u32 tmp; 3194 3195 if (hpd >= adev->mode_info.num_hpd) { 3196 DRM_DEBUG("invalid hdp %d\n", hpd); 3197 return; 3198 } 3199 3200 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 3201 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1); 3202 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 3203 } 3204 3205 static void dce_v10_0_crtc_vblank_int_ack(struct amdgpu_device *adev, 3206 int crtc) 3207 { 3208 u32 tmp; 3209 3210 if (crtc >= adev->mode_info.num_crtc) { 3211 DRM_DEBUG("invalid crtc %d\n", crtc); 3212 return; 3213 } 3214 3215 tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]); 3216 tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1); 3217 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp); 3218 } 3219 3220 static void dce_v10_0_crtc_vline_int_ack(struct amdgpu_device *adev, 3221 int crtc) 3222 { 3223 u32 tmp; 3224 3225 if (crtc >= adev->mode_info.num_crtc) { 3226 DRM_DEBUG("invalid crtc %d\n", crtc); 3227 return; 3228 } 3229 3230 tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]); 3231 tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1); 3232 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp); 3233 } 3234 3235 static int dce_v10_0_crtc_irq(struct amdgpu_device *adev, 3236 struct amdgpu_irq_src *source, 3237 struct amdgpu_iv_entry *entry) 3238 { 3239 unsigned crtc = entry->src_id - 1; 3240 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 3241 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, crtc); 3242 3243 switch (entry->src_data[0]) { 3244 case 0: /* vblank */ 3245 if (disp_int & interrupt_status_offsets[crtc].vblank) 3246 dce_v10_0_crtc_vblank_int_ack(adev, crtc); 3247 else 3248 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3249 3250 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3251 drm_handle_vblank(adev_to_drm(adev), crtc); 3252 } 3253 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3254 3255 break; 3256 case 1: /* vline */ 3257 if (disp_int & interrupt_status_offsets[crtc].vline) 3258 dce_v10_0_crtc_vline_int_ack(adev, crtc); 3259 else 3260 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3261 3262 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3263 3264 break; 3265 default: 3266 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); 3267 break; 3268 } 3269 3270 return 0; 3271 } 3272 3273 static int dce_v10_0_hpd_irq(struct amdgpu_device *adev, 3274 struct amdgpu_irq_src *source, 3275 struct amdgpu_iv_entry *entry) 3276 { 3277 uint32_t disp_int, mask; 3278 unsigned hpd; 3279 3280 if (entry->src_data[0] >= adev->mode_info.num_hpd) { 3281 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); 3282 return 0; 3283 } 3284 3285 hpd = entry->src_data[0]; 3286 disp_int = RREG32(interrupt_status_offsets[hpd].reg); 3287 mask = interrupt_status_offsets[hpd].hpd; 3288 3289 if (disp_int & mask) { 3290 dce_v10_0_hpd_int_ack(adev, hpd); 3291 schedule_work(&adev->hotplug_work); 3292 DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3293 } 3294 3295 return 0; 3296 } 3297 3298 static int dce_v10_0_set_clockgating_state(void *handle, 3299 enum amd_clockgating_state state) 3300 { 3301 return 0; 3302 } 3303 3304 static int dce_v10_0_set_powergating_state(void *handle, 3305 enum amd_powergating_state state) 3306 { 3307 return 0; 3308 } 3309 3310 static const struct amd_ip_funcs dce_v10_0_ip_funcs = { 3311 .name = "dce_v10_0", 3312 .early_init = dce_v10_0_early_init, 3313 .late_init = NULL, 3314 .sw_init = dce_v10_0_sw_init, 3315 .sw_fini = dce_v10_0_sw_fini, 3316 .hw_init = dce_v10_0_hw_init, 3317 .hw_fini = dce_v10_0_hw_fini, 3318 .suspend = dce_v10_0_suspend, 3319 .resume = dce_v10_0_resume, 3320 .is_idle = dce_v10_0_is_idle, 3321 .wait_for_idle = dce_v10_0_wait_for_idle, 3322 .check_soft_reset = dce_v10_0_check_soft_reset, 3323 .soft_reset = dce_v10_0_soft_reset, 3324 .set_clockgating_state = dce_v10_0_set_clockgating_state, 3325 .set_powergating_state = dce_v10_0_set_powergating_state, 3326 }; 3327 3328 static void 3329 dce_v10_0_encoder_mode_set(struct drm_encoder *encoder, 3330 struct drm_display_mode *mode, 3331 struct drm_display_mode *adjusted_mode) 3332 { 3333 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3334 3335 amdgpu_encoder->pixel_clock = adjusted_mode->clock; 3336 3337 /* need to call this here rather than in prepare() since we need some crtc info */ 3338 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3339 3340 /* set scaler clears this on some chips */ 3341 dce_v10_0_set_interleave(encoder->crtc, mode); 3342 3343 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 3344 dce_v10_0_afmt_enable(encoder, true); 3345 dce_v10_0_afmt_setmode(encoder, adjusted_mode); 3346 } 3347 } 3348 3349 static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder) 3350 { 3351 struct amdgpu_device *adev = drm_to_adev(encoder->dev); 3352 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3353 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 3354 3355 if ((amdgpu_encoder->active_device & 3356 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || 3357 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != 3358 ENCODER_OBJECT_ID_NONE)) { 3359 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 3360 if (dig) { 3361 dig->dig_encoder = dce_v10_0_pick_dig_encoder(encoder); 3362 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) 3363 dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; 3364 } 3365 } 3366 3367 amdgpu_atombios_scratch_regs_lock(adev, true); 3368 3369 if (connector) { 3370 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 3371 3372 /* select the clock/data port if it uses a router */ 3373 if (amdgpu_connector->router.cd_valid) 3374 amdgpu_i2c_router_select_cd_port(amdgpu_connector); 3375 3376 /* turn eDP panel on for mode set */ 3377 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3378 amdgpu_atombios_encoder_set_edp_panel_power(connector, 3379 ATOM_TRANSMITTER_ACTION_POWER_ON); 3380 } 3381 3382 /* this is needed for the pll/ss setup to work correctly in some cases */ 3383 amdgpu_atombios_encoder_set_crtc_source(encoder); 3384 /* set up the FMT blocks */ 3385 dce_v10_0_program_fmt(encoder); 3386 } 3387 3388 static void dce_v10_0_encoder_commit(struct drm_encoder *encoder) 3389 { 3390 struct drm_device *dev = encoder->dev; 3391 struct amdgpu_device *adev = drm_to_adev(dev); 3392 3393 /* need to call this here as we need the crtc set up */ 3394 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 3395 amdgpu_atombios_scratch_regs_lock(adev, false); 3396 } 3397 3398 static void dce_v10_0_encoder_disable(struct drm_encoder *encoder) 3399 { 3400 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3401 struct amdgpu_encoder_atom_dig *dig; 3402 3403 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3404 3405 if (amdgpu_atombios_encoder_is_digital(encoder)) { 3406 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 3407 dce_v10_0_afmt_enable(encoder, false); 3408 dig = amdgpu_encoder->enc_priv; 3409 dig->dig_encoder = -1; 3410 } 3411 amdgpu_encoder->active_device = 0; 3412 } 3413 3414 /* these are handled by the primary encoders */ 3415 static void dce_v10_0_ext_prepare(struct drm_encoder *encoder) 3416 { 3417 3418 } 3419 3420 static void dce_v10_0_ext_commit(struct drm_encoder *encoder) 3421 { 3422 3423 } 3424 3425 static void 3426 dce_v10_0_ext_mode_set(struct drm_encoder *encoder, 3427 struct drm_display_mode *mode, 3428 struct drm_display_mode *adjusted_mode) 3429 { 3430 3431 } 3432 3433 static void dce_v10_0_ext_disable(struct drm_encoder *encoder) 3434 { 3435 3436 } 3437 3438 static void 3439 dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode) 3440 { 3441 3442 } 3443 3444 static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = { 3445 .dpms = dce_v10_0_ext_dpms, 3446 .prepare = dce_v10_0_ext_prepare, 3447 .mode_set = dce_v10_0_ext_mode_set, 3448 .commit = dce_v10_0_ext_commit, 3449 .disable = dce_v10_0_ext_disable, 3450 /* no detect for TMDS/LVDS yet */ 3451 }; 3452 3453 static const struct drm_encoder_helper_funcs dce_v10_0_dig_helper_funcs = { 3454 .dpms = amdgpu_atombios_encoder_dpms, 3455 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3456 .prepare = dce_v10_0_encoder_prepare, 3457 .mode_set = dce_v10_0_encoder_mode_set, 3458 .commit = dce_v10_0_encoder_commit, 3459 .disable = dce_v10_0_encoder_disable, 3460 .detect = amdgpu_atombios_encoder_dig_detect, 3461 }; 3462 3463 static const struct drm_encoder_helper_funcs dce_v10_0_dac_helper_funcs = { 3464 .dpms = amdgpu_atombios_encoder_dpms, 3465 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3466 .prepare = dce_v10_0_encoder_prepare, 3467 .mode_set = dce_v10_0_encoder_mode_set, 3468 .commit = dce_v10_0_encoder_commit, 3469 .detect = amdgpu_atombios_encoder_dac_detect, 3470 }; 3471 3472 static void dce_v10_0_encoder_destroy(struct drm_encoder *encoder) 3473 { 3474 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3475 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3476 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); 3477 kfree(amdgpu_encoder->enc_priv); 3478 drm_encoder_cleanup(encoder); 3479 kfree(amdgpu_encoder); 3480 } 3481 3482 static const struct drm_encoder_funcs dce_v10_0_encoder_funcs = { 3483 .destroy = dce_v10_0_encoder_destroy, 3484 }; 3485 3486 static void dce_v10_0_encoder_add(struct amdgpu_device *adev, 3487 uint32_t encoder_enum, 3488 uint32_t supported_device, 3489 u16 caps) 3490 { 3491 struct drm_device *dev = adev_to_drm(adev); 3492 struct drm_encoder *encoder; 3493 struct amdgpu_encoder *amdgpu_encoder; 3494 3495 /* see if we already added it */ 3496 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3497 amdgpu_encoder = to_amdgpu_encoder(encoder); 3498 if (amdgpu_encoder->encoder_enum == encoder_enum) { 3499 amdgpu_encoder->devices |= supported_device; 3500 return; 3501 } 3502 3503 } 3504 3505 /* add a new one */ 3506 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); 3507 if (!amdgpu_encoder) 3508 return; 3509 3510 encoder = &amdgpu_encoder->base; 3511 switch (adev->mode_info.num_crtc) { 3512 case 1: 3513 encoder->possible_crtcs = 0x1; 3514 break; 3515 case 2: 3516 default: 3517 encoder->possible_crtcs = 0x3; 3518 break; 3519 case 4: 3520 encoder->possible_crtcs = 0xf; 3521 break; 3522 case 6: 3523 encoder->possible_crtcs = 0x3f; 3524 break; 3525 } 3526 3527 amdgpu_encoder->enc_priv = NULL; 3528 3529 amdgpu_encoder->encoder_enum = encoder_enum; 3530 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 3531 amdgpu_encoder->devices = supported_device; 3532 amdgpu_encoder->rmx_type = RMX_OFF; 3533 amdgpu_encoder->underscan_type = UNDERSCAN_OFF; 3534 amdgpu_encoder->is_ext_encoder = false; 3535 amdgpu_encoder->caps = caps; 3536 3537 switch (amdgpu_encoder->encoder_id) { 3538 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3539 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3540 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3541 DRM_MODE_ENCODER_DAC, NULL); 3542 drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs); 3543 break; 3544 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3545 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 3546 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 3547 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 3548 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 3549 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3550 amdgpu_encoder->rmx_type = RMX_FULL; 3551 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3552 DRM_MODE_ENCODER_LVDS, NULL); 3553 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3554 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3555 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3556 DRM_MODE_ENCODER_DAC, NULL); 3557 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3558 } else { 3559 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3560 DRM_MODE_ENCODER_TMDS, NULL); 3561 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3562 } 3563 drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs); 3564 break; 3565 case ENCODER_OBJECT_ID_SI170B: 3566 case ENCODER_OBJECT_ID_CH7303: 3567 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: 3568 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: 3569 case ENCODER_OBJECT_ID_TITFP513: 3570 case ENCODER_OBJECT_ID_VT1623: 3571 case ENCODER_OBJECT_ID_HDMI_SI1930: 3572 case ENCODER_OBJECT_ID_TRAVIS: 3573 case ENCODER_OBJECT_ID_NUTMEG: 3574 /* these are handled by the primary encoders */ 3575 amdgpu_encoder->is_ext_encoder = true; 3576 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3577 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3578 DRM_MODE_ENCODER_LVDS, NULL); 3579 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3580 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3581 DRM_MODE_ENCODER_DAC, NULL); 3582 else 3583 drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, 3584 DRM_MODE_ENCODER_TMDS, NULL); 3585 drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs); 3586 break; 3587 } 3588 } 3589 3590 static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { 3591 .bandwidth_update = &dce_v10_0_bandwidth_update, 3592 .vblank_get_counter = &dce_v10_0_vblank_get_counter, 3593 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3594 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3595 .hpd_sense = &dce_v10_0_hpd_sense, 3596 .hpd_set_polarity = &dce_v10_0_hpd_set_polarity, 3597 .hpd_get_gpio_reg = &dce_v10_0_hpd_get_gpio_reg, 3598 .page_flip = &dce_v10_0_page_flip, 3599 .page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos, 3600 .add_encoder = &dce_v10_0_encoder_add, 3601 .add_connector = &amdgpu_connector_add, 3602 }; 3603 3604 static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev) 3605 { 3606 adev->mode_info.funcs = &dce_v10_0_display_funcs; 3607 } 3608 3609 static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = { 3610 .set = dce_v10_0_set_crtc_irq_state, 3611 .process = dce_v10_0_crtc_irq, 3612 }; 3613 3614 static const struct amdgpu_irq_src_funcs dce_v10_0_pageflip_irq_funcs = { 3615 .set = dce_v10_0_set_pageflip_irq_state, 3616 .process = dce_v10_0_pageflip_irq, 3617 }; 3618 3619 static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = { 3620 .set = dce_v10_0_set_hpd_irq_state, 3621 .process = dce_v10_0_hpd_irq, 3622 }; 3623 3624 static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev) 3625 { 3626 if (adev->mode_info.num_crtc > 0) 3627 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; 3628 else 3629 adev->crtc_irq.num_types = 0; 3630 adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs; 3631 3632 adev->pageflip_irq.num_types = adev->mode_info.num_crtc; 3633 adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs; 3634 3635 adev->hpd_irq.num_types = adev->mode_info.num_hpd; 3636 adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs; 3637 } 3638 3639 const struct amdgpu_ip_block_version dce_v10_0_ip_block = 3640 { 3641 .type = AMD_IP_BLOCK_TYPE_DCE, 3642 .major = 10, 3643 .minor = 0, 3644 .rev = 0, 3645 .funcs = &dce_v10_0_ip_funcs, 3646 }; 3647 3648 const struct amdgpu_ip_block_version dce_v10_1_ip_block = 3649 { 3650 .type = AMD_IP_BLOCK_TYPE_DCE, 3651 .major = 10, 3652 .minor = 1, 3653 .rev = 0, 3654 .funcs = &dce_v10_0_ip_funcs, 3655 }; 3656