1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <drm/drm_fourcc.h> 25 #include <drm/drm_vblank.h> 26 27 #include "amdgpu.h" 28 #include "amdgpu_pm.h" 29 #include "amdgpu_i2c.h" 30 #include "cikd.h" 31 #include "atom.h" 32 #include "amdgpu_atombios.h" 33 #include "atombios_crtc.h" 34 #include "atombios_encoders.h" 35 #include "amdgpu_pll.h" 36 #include "amdgpu_connectors.h" 37 #include "amdgpu_display.h" 38 #include "dce_v8_0.h" 39 40 #include "dce/dce_8_0_d.h" 41 #include "dce/dce_8_0_sh_mask.h" 42 43 #include "gca/gfx_7_2_enum.h" 44 45 #include "gmc/gmc_7_1_d.h" 46 #include "gmc/gmc_7_1_sh_mask.h" 47 48 #include "oss/oss_2_0_d.h" 49 #include "oss/oss_2_0_sh_mask.h" 50 51 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev); 52 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev); 53 54 static const u32 crtc_offsets[6] = 55 { 56 CRTC0_REGISTER_OFFSET, 57 CRTC1_REGISTER_OFFSET, 58 CRTC2_REGISTER_OFFSET, 59 CRTC3_REGISTER_OFFSET, 60 CRTC4_REGISTER_OFFSET, 61 CRTC5_REGISTER_OFFSET 62 }; 63 64 static const u32 hpd_offsets[] = 65 { 66 HPD0_REGISTER_OFFSET, 67 HPD1_REGISTER_OFFSET, 68 HPD2_REGISTER_OFFSET, 69 HPD3_REGISTER_OFFSET, 70 HPD4_REGISTER_OFFSET, 71 HPD5_REGISTER_OFFSET 72 }; 73 74 static const uint32_t dig_offsets[] = { 75 CRTC0_REGISTER_OFFSET, 76 CRTC1_REGISTER_OFFSET, 77 CRTC2_REGISTER_OFFSET, 78 CRTC3_REGISTER_OFFSET, 79 CRTC4_REGISTER_OFFSET, 80 CRTC5_REGISTER_OFFSET, 81 (0x13830 - 0x7030) >> 2, 82 }; 83 84 static const struct { 85 uint32_t reg; 86 uint32_t vblank; 87 uint32_t vline; 88 uint32_t hpd; 89 90 } interrupt_status_offsets[6] = { { 91 .reg = mmDISP_INTERRUPT_STATUS, 92 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, 93 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, 94 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 95 }, { 96 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, 97 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, 98 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, 99 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 100 }, { 101 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, 102 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, 103 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, 104 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 105 }, { 106 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, 107 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, 108 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, 109 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 110 }, { 111 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, 112 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, 113 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, 114 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 115 }, { 116 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, 117 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, 118 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, 119 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 120 } }; 121 122 static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev, 123 u32 block_offset, u32 reg) 124 { 125 unsigned long flags; 126 u32 r; 127 128 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 129 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 130 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); 131 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 132 133 return r; 134 } 135 136 static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev, 137 u32 block_offset, u32 reg, u32 v) 138 { 139 unsigned long flags; 140 141 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 142 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 143 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); 144 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 145 } 146 147 static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 148 { 149 if (crtc >= adev->mode_info.num_crtc) 150 return 0; 151 else 152 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 153 } 154 155 static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev) 156 { 157 unsigned i; 158 159 /* Enable pflip interrupts */ 160 for (i = 0; i < adev->mode_info.num_crtc; i++) 161 amdgpu_irq_get(adev, &adev->pageflip_irq, i); 162 } 163 164 static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev) 165 { 166 unsigned i; 167 168 /* Disable pflip interrupts */ 169 for (i = 0; i < adev->mode_info.num_crtc; i++) 170 amdgpu_irq_put(adev, &adev->pageflip_irq, i); 171 } 172 173 /** 174 * dce_v8_0_page_flip - pageflip callback. 175 * 176 * @adev: amdgpu_device pointer 177 * @crtc_id: crtc to cleanup pageflip on 178 * @crtc_base: new address of the crtc (GPU MC address) 179 * @async: asynchronous flip 180 * 181 * Triggers the actual pageflip by updating the primary 182 * surface base address. 183 */ 184 static void dce_v8_0_page_flip(struct amdgpu_device *adev, 185 int crtc_id, u64 crtc_base, bool async) 186 { 187 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 188 struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb; 189 190 /* flip at hsync for async, default is vsync */ 191 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ? 192 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0); 193 /* update pitch */ 194 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, 195 fb->pitches[0] / fb->format->cpp[0]); 196 /* update the primary scanout addresses */ 197 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 198 upper_32_bits(crtc_base)); 199 /* writing to the low address triggers the update */ 200 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 201 lower_32_bits(crtc_base)); 202 /* post the write */ 203 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); 204 } 205 206 static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 207 u32 *vbl, u32 *position) 208 { 209 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 210 return -EINVAL; 211 212 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); 213 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 214 215 return 0; 216 } 217 218 /** 219 * dce_v8_0_hpd_sense - hpd sense callback. 220 * 221 * @adev: amdgpu_device pointer 222 * @hpd: hpd (hotplug detect) pin 223 * 224 * Checks if a digital monitor is connected (evergreen+). 225 * Returns true if connected, false if not connected. 226 */ 227 static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev, 228 enum amdgpu_hpd_id hpd) 229 { 230 bool connected = false; 231 232 if (hpd >= adev->mode_info.num_hpd) 233 return connected; 234 235 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & 236 DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK) 237 connected = true; 238 239 return connected; 240 } 241 242 /** 243 * dce_v8_0_hpd_set_polarity - hpd set polarity callback. 244 * 245 * @adev: amdgpu_device pointer 246 * @hpd: hpd (hotplug detect) pin 247 * 248 * Set the polarity of the hpd pin (evergreen+). 249 */ 250 static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev, 251 enum amdgpu_hpd_id hpd) 252 { 253 u32 tmp; 254 bool connected = dce_v8_0_hpd_sense(adev, hpd); 255 256 if (hpd >= adev->mode_info.num_hpd) 257 return; 258 259 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 260 if (connected) 261 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 262 else 263 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 264 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 265 } 266 267 /** 268 * dce_v8_0_hpd_init - hpd setup callback. 269 * 270 * @adev: amdgpu_device pointer 271 * 272 * Setup the hpd pins used by the card (evergreen+). 273 * Enable the pin, set the polarity, and enable the hpd interrupts. 274 */ 275 static void dce_v8_0_hpd_init(struct amdgpu_device *adev) 276 { 277 struct drm_device *dev = adev_to_drm(adev); 278 struct drm_connector *connector; 279 struct drm_connector_list_iter iter; 280 u32 tmp; 281 282 drm_connector_list_iter_begin(dev, &iter); 283 drm_for_each_connector_iter(connector, &iter) { 284 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 285 286 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 287 continue; 288 289 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 290 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK; 291 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 292 293 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 294 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 295 /* don't try to enable hpd on eDP or LVDS avoid breaking the 296 * aux dp channel on imac and help (but not completely fix) 297 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 298 * also avoid interrupt storms during dpms. 299 */ 300 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 301 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 302 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 303 continue; 304 } 305 306 dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); 307 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 308 } 309 drm_connector_list_iter_end(&iter); 310 } 311 312 /** 313 * dce_v8_0_hpd_fini - hpd tear down callback. 314 * 315 * @adev: amdgpu_device pointer 316 * 317 * Tear down the hpd pins used by the card (evergreen+). 318 * Disable the hpd interrupts. 319 */ 320 static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) 321 { 322 struct drm_device *dev = adev_to_drm(adev); 323 struct drm_connector *connector; 324 struct drm_connector_list_iter iter; 325 u32 tmp; 326 327 drm_connector_list_iter_begin(dev, &iter); 328 drm_for_each_connector_iter(connector, &iter) { 329 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 330 331 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 332 continue; 333 334 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 335 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK; 336 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0); 337 338 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 339 } 340 drm_connector_list_iter_end(&iter); 341 } 342 343 static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev) 344 { 345 return mmDC_GPIO_HPD_A; 346 } 347 348 static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev) 349 { 350 u32 crtc_hung = 0; 351 u32 crtc_status[6]; 352 u32 i, j, tmp; 353 354 for (i = 0; i < adev->mode_info.num_crtc; i++) { 355 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) { 356 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 357 crtc_hung |= (1 << i); 358 } 359 } 360 361 for (j = 0; j < 10; j++) { 362 for (i = 0; i < adev->mode_info.num_crtc; i++) { 363 if (crtc_hung & (1 << i)) { 364 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 365 if (tmp != crtc_status[i]) 366 crtc_hung &= ~(1 << i); 367 } 368 } 369 if (crtc_hung == 0) 370 return false; 371 udelay(100); 372 } 373 374 return true; 375 } 376 377 static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev, 378 bool render) 379 { 380 u32 tmp; 381 382 /* Lockout access through VGA aperture*/ 383 tmp = RREG32(mmVGA_HDP_CONTROL); 384 if (render) 385 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); 386 else 387 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 388 WREG32(mmVGA_HDP_CONTROL, tmp); 389 390 /* disable VGA render */ 391 tmp = RREG32(mmVGA_RENDER_CONTROL); 392 if (render) 393 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); 394 else 395 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 396 WREG32(mmVGA_RENDER_CONTROL, tmp); 397 } 398 399 static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev) 400 { 401 int num_crtc = 0; 402 403 switch (adev->asic_type) { 404 case CHIP_BONAIRE: 405 case CHIP_HAWAII: 406 num_crtc = 6; 407 break; 408 case CHIP_KAVERI: 409 num_crtc = 4; 410 break; 411 case CHIP_KABINI: 412 case CHIP_MULLINS: 413 num_crtc = 2; 414 break; 415 default: 416 num_crtc = 0; 417 } 418 return num_crtc; 419 } 420 421 void dce_v8_0_disable_dce(struct amdgpu_device *adev) 422 { 423 /*Disable VGA render and enabled crtc, if has DCE engine*/ 424 if (amdgpu_atombios_has_dce_engine_info(adev)) { 425 u32 tmp; 426 int crtc_enabled, i; 427 428 dce_v8_0_set_vga_render_state(adev, false); 429 430 /*Disable crtc*/ 431 for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) { 432 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), 433 CRTC_CONTROL, CRTC_MASTER_EN); 434 if (crtc_enabled) { 435 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 436 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 437 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); 438 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); 439 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 440 } 441 } 442 } 443 } 444 445 static void dce_v8_0_program_fmt(struct drm_encoder *encoder) 446 { 447 struct drm_device *dev = encoder->dev; 448 struct amdgpu_device *adev = drm_to_adev(dev); 449 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 450 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 451 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 452 int bpc = 0; 453 u32 tmp = 0; 454 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; 455 456 if (connector) { 457 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 458 bpc = amdgpu_connector_get_monitor_bpc(connector); 459 dither = amdgpu_connector->dither; 460 } 461 462 /* LVDS/eDP FMT is set up by atom */ 463 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) 464 return; 465 466 /* not needed for analog */ 467 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || 468 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) 469 return; 470 471 if (bpc == 0) 472 return; 473 474 switch (bpc) { 475 case 6: 476 if (dither == AMDGPU_FMT_DITHER_ENABLE) 477 /* XXX sort out optimal dither settings */ 478 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 479 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 480 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | 481 (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT)); 482 else 483 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | 484 (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT)); 485 break; 486 case 8: 487 if (dither == AMDGPU_FMT_DITHER_ENABLE) 488 /* XXX sort out optimal dither settings */ 489 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 490 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 491 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK | 492 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | 493 (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT)); 494 else 495 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | 496 (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT)); 497 break; 498 case 10: 499 if (dither == AMDGPU_FMT_DITHER_ENABLE) 500 /* XXX sort out optimal dither settings */ 501 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 502 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 503 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK | 504 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | 505 (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT)); 506 else 507 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | 508 (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT)); 509 break; 510 default: 511 /* not needed */ 512 break; 513 } 514 515 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 516 } 517 518 519 /* display watermark setup */ 520 /** 521 * dce_v8_0_line_buffer_adjust - Set up the line buffer 522 * 523 * @adev: amdgpu_device pointer 524 * @amdgpu_crtc: the selected display controller 525 * @mode: the current display mode on the selected display 526 * controller 527 * 528 * Setup up the line buffer allocation for 529 * the selected display controller (CIK). 530 * Returns the line buffer size in pixels. 531 */ 532 static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev, 533 struct amdgpu_crtc *amdgpu_crtc, 534 struct drm_display_mode *mode) 535 { 536 u32 tmp, buffer_alloc, i; 537 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8; 538 /* 539 * Line Buffer Setup 540 * There are 6 line buffers, one for each display controllers. 541 * There are 3 partitions per LB. Select the number of partitions 542 * to enable based on the display width. For display widths larger 543 * than 4096, you need use to use 2 display controllers and combine 544 * them using the stereo blender. 545 */ 546 if (amdgpu_crtc->base.enabled && mode) { 547 if (mode->crtc_hdisplay < 1920) { 548 tmp = 1; 549 buffer_alloc = 2; 550 } else if (mode->crtc_hdisplay < 2560) { 551 tmp = 2; 552 buffer_alloc = 2; 553 } else if (mode->crtc_hdisplay < 4096) { 554 tmp = 0; 555 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 556 } else { 557 DRM_DEBUG_KMS("Mode too big for LB!\n"); 558 tmp = 0; 559 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 560 } 561 } else { 562 tmp = 1; 563 buffer_alloc = 0; 564 } 565 566 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, 567 (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) | 568 (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT)); 569 570 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, 571 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT)); 572 for (i = 0; i < adev->usec_timeout; i++) { 573 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & 574 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK) 575 break; 576 udelay(1); 577 } 578 579 if (amdgpu_crtc->base.enabled && mode) { 580 switch (tmp) { 581 case 0: 582 default: 583 return 4096 * 2; 584 case 1: 585 return 1920 * 2; 586 case 2: 587 return 2560 * 2; 588 } 589 } 590 591 /* controller not enabled, so no lb used */ 592 return 0; 593 } 594 595 /** 596 * cik_get_number_of_dram_channels - get the number of dram channels 597 * 598 * @adev: amdgpu_device pointer 599 * 600 * Look up the number of video ram channels (CIK). 601 * Used for display watermark bandwidth calculations 602 * Returns the number of dram channels 603 */ 604 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) 605 { 606 u32 tmp = RREG32(mmMC_SHARED_CHMAP); 607 608 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { 609 case 0: 610 default: 611 return 1; 612 case 1: 613 return 2; 614 case 2: 615 return 4; 616 case 3: 617 return 8; 618 case 4: 619 return 3; 620 case 5: 621 return 6; 622 case 6: 623 return 10; 624 case 7: 625 return 12; 626 case 8: 627 return 16; 628 } 629 } 630 631 struct dce8_wm_params { 632 u32 dram_channels; /* number of dram channels */ 633 u32 yclk; /* bandwidth per dram data pin in kHz */ 634 u32 sclk; /* engine clock in kHz */ 635 u32 disp_clk; /* display clock in kHz */ 636 u32 src_width; /* viewport width */ 637 u32 active_time; /* active display time in ns */ 638 u32 blank_time; /* blank time in ns */ 639 bool interlaced; /* mode is interlaced */ 640 fixed20_12 vsc; /* vertical scale ratio */ 641 u32 num_heads; /* number of active crtcs */ 642 u32 bytes_per_pixel; /* bytes per pixel display + overlay */ 643 u32 lb_size; /* line buffer allocated to pipe */ 644 u32 vtaps; /* vertical scaler taps */ 645 }; 646 647 /** 648 * dce_v8_0_dram_bandwidth - get the dram bandwidth 649 * 650 * @wm: watermark calculation data 651 * 652 * Calculate the raw dram bandwidth (CIK). 653 * Used for display watermark bandwidth calculations 654 * Returns the dram bandwidth in MBytes/s 655 */ 656 static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm) 657 { 658 /* Calculate raw DRAM Bandwidth */ 659 fixed20_12 dram_efficiency; /* 0.7 */ 660 fixed20_12 yclk, dram_channels, bandwidth; 661 fixed20_12 a; 662 663 a.full = dfixed_const(1000); 664 yclk.full = dfixed_const(wm->yclk); 665 yclk.full = dfixed_div(yclk, a); 666 dram_channels.full = dfixed_const(wm->dram_channels * 4); 667 a.full = dfixed_const(10); 668 dram_efficiency.full = dfixed_const(7); 669 dram_efficiency.full = dfixed_div(dram_efficiency, a); 670 bandwidth.full = dfixed_mul(dram_channels, yclk); 671 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); 672 673 return dfixed_trunc(bandwidth); 674 } 675 676 /** 677 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display 678 * 679 * @wm: watermark calculation data 680 * 681 * Calculate the dram bandwidth used for display (CIK). 682 * Used for display watermark bandwidth calculations 683 * Returns the dram bandwidth for display in MBytes/s 684 */ 685 static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm) 686 { 687 /* Calculate DRAM Bandwidth and the part allocated to display. */ 688 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ 689 fixed20_12 yclk, dram_channels, bandwidth; 690 fixed20_12 a; 691 692 a.full = dfixed_const(1000); 693 yclk.full = dfixed_const(wm->yclk); 694 yclk.full = dfixed_div(yclk, a); 695 dram_channels.full = dfixed_const(wm->dram_channels * 4); 696 a.full = dfixed_const(10); 697 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ 698 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); 699 bandwidth.full = dfixed_mul(dram_channels, yclk); 700 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); 701 702 return dfixed_trunc(bandwidth); 703 } 704 705 /** 706 * dce_v8_0_data_return_bandwidth - get the data return bandwidth 707 * 708 * @wm: watermark calculation data 709 * 710 * Calculate the data return bandwidth used for display (CIK). 711 * Used for display watermark bandwidth calculations 712 * Returns the data return bandwidth in MBytes/s 713 */ 714 static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm) 715 { 716 /* Calculate the display Data return Bandwidth */ 717 fixed20_12 return_efficiency; /* 0.8 */ 718 fixed20_12 sclk, bandwidth; 719 fixed20_12 a; 720 721 a.full = dfixed_const(1000); 722 sclk.full = dfixed_const(wm->sclk); 723 sclk.full = dfixed_div(sclk, a); 724 a.full = dfixed_const(10); 725 return_efficiency.full = dfixed_const(8); 726 return_efficiency.full = dfixed_div(return_efficiency, a); 727 a.full = dfixed_const(32); 728 bandwidth.full = dfixed_mul(a, sclk); 729 bandwidth.full = dfixed_mul(bandwidth, return_efficiency); 730 731 return dfixed_trunc(bandwidth); 732 } 733 734 /** 735 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth 736 * 737 * @wm: watermark calculation data 738 * 739 * Calculate the dmif bandwidth used for display (CIK). 740 * Used for display watermark bandwidth calculations 741 * Returns the dmif bandwidth in MBytes/s 742 */ 743 static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm) 744 { 745 /* Calculate the DMIF Request Bandwidth */ 746 fixed20_12 disp_clk_request_efficiency; /* 0.8 */ 747 fixed20_12 disp_clk, bandwidth; 748 fixed20_12 a, b; 749 750 a.full = dfixed_const(1000); 751 disp_clk.full = dfixed_const(wm->disp_clk); 752 disp_clk.full = dfixed_div(disp_clk, a); 753 a.full = dfixed_const(32); 754 b.full = dfixed_mul(a, disp_clk); 755 756 a.full = dfixed_const(10); 757 disp_clk_request_efficiency.full = dfixed_const(8); 758 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); 759 760 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); 761 762 return dfixed_trunc(bandwidth); 763 } 764 765 /** 766 * dce_v8_0_available_bandwidth - get the min available bandwidth 767 * 768 * @wm: watermark calculation data 769 * 770 * Calculate the min available bandwidth used for display (CIK). 771 * Used for display watermark bandwidth calculations 772 * Returns the min available bandwidth in MBytes/s 773 */ 774 static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm) 775 { 776 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ 777 u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm); 778 u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm); 779 u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm); 780 781 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); 782 } 783 784 /** 785 * dce_v8_0_average_bandwidth - get the average available bandwidth 786 * 787 * @wm: watermark calculation data 788 * 789 * Calculate the average available bandwidth used for display (CIK). 790 * Used for display watermark bandwidth calculations 791 * Returns the average available bandwidth in MBytes/s 792 */ 793 static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm) 794 { 795 /* Calculate the display mode Average Bandwidth 796 * DisplayMode should contain the source and destination dimensions, 797 * timing, etc. 798 */ 799 fixed20_12 bpp; 800 fixed20_12 line_time; 801 fixed20_12 src_width; 802 fixed20_12 bandwidth; 803 fixed20_12 a; 804 805 a.full = dfixed_const(1000); 806 line_time.full = dfixed_const(wm->active_time + wm->blank_time); 807 line_time.full = dfixed_div(line_time, a); 808 bpp.full = dfixed_const(wm->bytes_per_pixel); 809 src_width.full = dfixed_const(wm->src_width); 810 bandwidth.full = dfixed_mul(src_width, bpp); 811 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); 812 bandwidth.full = dfixed_div(bandwidth, line_time); 813 814 return dfixed_trunc(bandwidth); 815 } 816 817 /** 818 * dce_v8_0_latency_watermark - get the latency watermark 819 * 820 * @wm: watermark calculation data 821 * 822 * Calculate the latency watermark (CIK). 823 * Used for display watermark bandwidth calculations 824 * Returns the latency watermark in ns 825 */ 826 static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm) 827 { 828 /* First calculate the latency in ns */ 829 u32 mc_latency = 2000; /* 2000 ns. */ 830 u32 available_bandwidth = dce_v8_0_available_bandwidth(wm); 831 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; 832 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; 833 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ 834 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + 835 (wm->num_heads * cursor_line_pair_return_time); 836 u32 latency = mc_latency + other_heads_data_return_time + dc_latency; 837 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; 838 u32 tmp, dmif_size = 12288; 839 fixed20_12 a, b, c; 840 841 if (wm->num_heads == 0) 842 return 0; 843 844 a.full = dfixed_const(2); 845 b.full = dfixed_const(1); 846 if ((wm->vsc.full > a.full) || 847 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || 848 (wm->vtaps >= 5) || 849 ((wm->vsc.full >= a.full) && wm->interlaced)) 850 max_src_lines_per_dst_line = 4; 851 else 852 max_src_lines_per_dst_line = 2; 853 854 a.full = dfixed_const(available_bandwidth); 855 b.full = dfixed_const(wm->num_heads); 856 a.full = dfixed_div(a, b); 857 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); 858 tmp = min(dfixed_trunc(a), tmp); 859 860 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); 861 862 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 863 b.full = dfixed_const(1000); 864 c.full = dfixed_const(lb_fill_bw); 865 b.full = dfixed_div(c, b); 866 a.full = dfixed_div(a, b); 867 line_fill_time = dfixed_trunc(a); 868 869 if (line_fill_time < wm->active_time) 870 return latency; 871 else 872 return latency + (line_fill_time - wm->active_time); 873 874 } 875 876 /** 877 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check 878 * average and available dram bandwidth 879 * 880 * @wm: watermark calculation data 881 * 882 * Check if the display average bandwidth fits in the display 883 * dram bandwidth (CIK). 884 * Used for display watermark bandwidth calculations 885 * Returns true if the display fits, false if not. 886 */ 887 static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm) 888 { 889 if (dce_v8_0_average_bandwidth(wm) <= 890 (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads)) 891 return true; 892 else 893 return false; 894 } 895 896 /** 897 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check 898 * average and available bandwidth 899 * 900 * @wm: watermark calculation data 901 * 902 * Check if the display average bandwidth fits in the display 903 * available bandwidth (CIK). 904 * Used for display watermark bandwidth calculations 905 * Returns true if the display fits, false if not. 906 */ 907 static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm) 908 { 909 if (dce_v8_0_average_bandwidth(wm) <= 910 (dce_v8_0_available_bandwidth(wm) / wm->num_heads)) 911 return true; 912 else 913 return false; 914 } 915 916 /** 917 * dce_v8_0_check_latency_hiding - check latency hiding 918 * 919 * @wm: watermark calculation data 920 * 921 * Check latency hiding (CIK). 922 * Used for display watermark bandwidth calculations 923 * Returns true if the display fits, false if not. 924 */ 925 static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm) 926 { 927 u32 lb_partitions = wm->lb_size / wm->src_width; 928 u32 line_time = wm->active_time + wm->blank_time; 929 u32 latency_tolerant_lines; 930 u32 latency_hiding; 931 fixed20_12 a; 932 933 a.full = dfixed_const(1); 934 if (wm->vsc.full > a.full) 935 latency_tolerant_lines = 1; 936 else { 937 if (lb_partitions <= (wm->vtaps + 1)) 938 latency_tolerant_lines = 1; 939 else 940 latency_tolerant_lines = 2; 941 } 942 943 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); 944 945 if (dce_v8_0_latency_watermark(wm) <= latency_hiding) 946 return true; 947 else 948 return false; 949 } 950 951 /** 952 * dce_v8_0_program_watermarks - program display watermarks 953 * 954 * @adev: amdgpu_device pointer 955 * @amdgpu_crtc: the selected display controller 956 * @lb_size: line buffer size 957 * @num_heads: number of display controllers in use 958 * 959 * Calculate and program the display watermarks for the 960 * selected display controller (CIK). 961 */ 962 static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, 963 struct amdgpu_crtc *amdgpu_crtc, 964 u32 lb_size, u32 num_heads) 965 { 966 struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 967 struct dce8_wm_params wm_low, wm_high; 968 u32 active_time; 969 u32 line_time = 0; 970 u32 latency_watermark_a = 0, latency_watermark_b = 0; 971 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 972 973 if (amdgpu_crtc->base.enabled && num_heads && mode) { 974 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 975 (u32)mode->clock); 976 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 977 (u32)mode->clock); 978 line_time = min(line_time, (u32)65535); 979 980 /* watermark for high clocks */ 981 if (adev->pm.dpm_enabled) { 982 wm_high.yclk = 983 amdgpu_dpm_get_mclk(adev, false) * 10; 984 wm_high.sclk = 985 amdgpu_dpm_get_sclk(adev, false) * 10; 986 } else { 987 wm_high.yclk = adev->pm.current_mclk * 10; 988 wm_high.sclk = adev->pm.current_sclk * 10; 989 } 990 991 wm_high.disp_clk = mode->clock; 992 wm_high.src_width = mode->crtc_hdisplay; 993 wm_high.active_time = active_time; 994 wm_high.blank_time = line_time - wm_high.active_time; 995 wm_high.interlaced = false; 996 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 997 wm_high.interlaced = true; 998 wm_high.vsc = amdgpu_crtc->vsc; 999 wm_high.vtaps = 1; 1000 if (amdgpu_crtc->rmx_type != RMX_OFF) 1001 wm_high.vtaps = 2; 1002 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1003 wm_high.lb_size = lb_size; 1004 wm_high.dram_channels = cik_get_number_of_dram_channels(adev); 1005 wm_high.num_heads = num_heads; 1006 1007 /* set for high clocks */ 1008 latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535); 1009 1010 /* possibly force display priority to high */ 1011 /* should really do this at mode validation time... */ 1012 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || 1013 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) || 1014 !dce_v8_0_check_latency_hiding(&wm_high) || 1015 (adev->mode_info.disp_priority == 2)) { 1016 DRM_DEBUG_KMS("force priority to high\n"); 1017 } 1018 1019 /* watermark for low clocks */ 1020 if (adev->pm.dpm_enabled) { 1021 wm_low.yclk = 1022 amdgpu_dpm_get_mclk(adev, true) * 10; 1023 wm_low.sclk = 1024 amdgpu_dpm_get_sclk(adev, true) * 10; 1025 } else { 1026 wm_low.yclk = adev->pm.current_mclk * 10; 1027 wm_low.sclk = adev->pm.current_sclk * 10; 1028 } 1029 1030 wm_low.disp_clk = mode->clock; 1031 wm_low.src_width = mode->crtc_hdisplay; 1032 wm_low.active_time = active_time; 1033 wm_low.blank_time = line_time - wm_low.active_time; 1034 wm_low.interlaced = false; 1035 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1036 wm_low.interlaced = true; 1037 wm_low.vsc = amdgpu_crtc->vsc; 1038 wm_low.vtaps = 1; 1039 if (amdgpu_crtc->rmx_type != RMX_OFF) 1040 wm_low.vtaps = 2; 1041 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1042 wm_low.lb_size = lb_size; 1043 wm_low.dram_channels = cik_get_number_of_dram_channels(adev); 1044 wm_low.num_heads = num_heads; 1045 1046 /* set for low clocks */ 1047 latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535); 1048 1049 /* possibly force display priority to high */ 1050 /* should really do this at mode validation time... */ 1051 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || 1052 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) || 1053 !dce_v8_0_check_latency_hiding(&wm_low) || 1054 (adev->mode_info.disp_priority == 2)) { 1055 DRM_DEBUG_KMS("force priority to high\n"); 1056 } 1057 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); 1058 } 1059 1060 /* select wm A */ 1061 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); 1062 tmp = wm_mask; 1063 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); 1064 tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); 1065 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1066 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 1067 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 1068 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 1069 /* select wm B */ 1070 tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); 1071 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); 1072 tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT); 1073 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1074 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 1075 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 1076 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 1077 /* restore original selection */ 1078 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); 1079 1080 /* save values for DPM */ 1081 amdgpu_crtc->line_time = line_time; 1082 amdgpu_crtc->wm_high = latency_watermark_a; 1083 amdgpu_crtc->wm_low = latency_watermark_b; 1084 /* Save number of lines the linebuffer leads before the scanout */ 1085 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; 1086 } 1087 1088 /** 1089 * dce_v8_0_bandwidth_update - program display watermarks 1090 * 1091 * @adev: amdgpu_device pointer 1092 * 1093 * Calculate and program the display watermarks and line 1094 * buffer allocation (CIK). 1095 */ 1096 static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev) 1097 { 1098 struct drm_display_mode *mode = NULL; 1099 u32 num_heads = 0, lb_size; 1100 int i; 1101 1102 amdgpu_display_update_priority(adev); 1103 1104 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1105 if (adev->mode_info.crtcs[i]->base.enabled) 1106 num_heads++; 1107 } 1108 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1109 mode = &adev->mode_info.crtcs[i]->base.mode; 1110 lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); 1111 dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i], 1112 lb_size, num_heads); 1113 } 1114 } 1115 1116 static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev) 1117 { 1118 int i; 1119 u32 offset, tmp; 1120 1121 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1122 offset = adev->mode_info.audio.pin[i].offset; 1123 tmp = RREG32_AUDIO_ENDPT(offset, 1124 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); 1125 if (((tmp & 1126 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> 1127 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) 1128 adev->mode_info.audio.pin[i].connected = false; 1129 else 1130 adev->mode_info.audio.pin[i].connected = true; 1131 } 1132 } 1133 1134 static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev) 1135 { 1136 int i; 1137 1138 dce_v8_0_audio_get_connected_pins(adev); 1139 1140 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1141 if (adev->mode_info.audio.pin[i].connected) 1142 return &adev->mode_info.audio.pin[i]; 1143 } 1144 DRM_ERROR("No connected audio pins found!\n"); 1145 return NULL; 1146 } 1147 1148 static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder) 1149 { 1150 struct amdgpu_device *adev = drm_to_adev(encoder->dev); 1151 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1152 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1153 u32 offset; 1154 1155 if (!dig || !dig->afmt || !dig->afmt->pin) 1156 return; 1157 1158 offset = dig->afmt->offset; 1159 1160 WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset, 1161 (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT)); 1162 } 1163 1164 static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, 1165 struct drm_display_mode *mode) 1166 { 1167 struct drm_device *dev = encoder->dev; 1168 struct amdgpu_device *adev = drm_to_adev(dev); 1169 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1170 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1171 struct drm_connector *connector; 1172 struct drm_connector_list_iter iter; 1173 struct amdgpu_connector *amdgpu_connector = NULL; 1174 u32 tmp = 0, offset; 1175 1176 if (!dig || !dig->afmt || !dig->afmt->pin) 1177 return; 1178 1179 offset = dig->afmt->pin->offset; 1180 1181 drm_connector_list_iter_begin(dev, &iter); 1182 drm_for_each_connector_iter(connector, &iter) { 1183 if (connector->encoder == encoder) { 1184 amdgpu_connector = to_amdgpu_connector(connector); 1185 break; 1186 } 1187 } 1188 drm_connector_list_iter_end(&iter); 1189 1190 if (!amdgpu_connector) { 1191 DRM_ERROR("Couldn't find encoder's connector\n"); 1192 return; 1193 } 1194 1195 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1196 if (connector->latency_present[1]) 1197 tmp = 1198 (connector->video_latency[1] << 1199 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | 1200 (connector->audio_latency[1] << 1201 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); 1202 else 1203 tmp = 1204 (0 << 1205 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | 1206 (0 << 1207 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); 1208 } else { 1209 if (connector->latency_present[0]) 1210 tmp = 1211 (connector->video_latency[0] << 1212 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | 1213 (connector->audio_latency[0] << 1214 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); 1215 else 1216 tmp = 1217 (0 << 1218 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) | 1219 (0 << 1220 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT); 1221 1222 } 1223 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 1224 } 1225 1226 static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) 1227 { 1228 struct drm_device *dev = encoder->dev; 1229 struct amdgpu_device *adev = drm_to_adev(dev); 1230 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1231 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1232 struct drm_connector *connector; 1233 struct drm_connector_list_iter iter; 1234 struct amdgpu_connector *amdgpu_connector = NULL; 1235 u32 offset, tmp; 1236 u8 *sadb = NULL; 1237 int sad_count; 1238 1239 if (!dig || !dig->afmt || !dig->afmt->pin) 1240 return; 1241 1242 offset = dig->afmt->pin->offset; 1243 1244 drm_connector_list_iter_begin(dev, &iter); 1245 drm_for_each_connector_iter(connector, &iter) { 1246 if (connector->encoder == encoder) { 1247 amdgpu_connector = to_amdgpu_connector(connector); 1248 break; 1249 } 1250 } 1251 drm_connector_list_iter_end(&iter); 1252 1253 if (!amdgpu_connector) { 1254 DRM_ERROR("Couldn't find encoder's connector\n"); 1255 return; 1256 } 1257 1258 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb); 1259 if (sad_count < 0) { 1260 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 1261 sad_count = 0; 1262 } 1263 1264 /* program the speaker allocation */ 1265 tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 1266 tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK | 1267 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK); 1268 /* set HDMI mode */ 1269 tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK; 1270 if (sad_count) 1271 tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); 1272 else 1273 tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */ 1274 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 1275 1276 kfree(sadb); 1277 } 1278 1279 static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) 1280 { 1281 struct drm_device *dev = encoder->dev; 1282 struct amdgpu_device *adev = drm_to_adev(dev); 1283 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1284 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1285 u32 offset; 1286 struct drm_connector *connector; 1287 struct drm_connector_list_iter iter; 1288 struct amdgpu_connector *amdgpu_connector = NULL; 1289 struct cea_sad *sads; 1290 int i, sad_count; 1291 1292 static const u16 eld_reg_to_type[][2] = { 1293 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, 1294 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, 1295 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, 1296 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, 1297 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, 1298 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, 1299 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, 1300 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, 1301 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, 1302 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, 1303 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, 1304 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 1305 }; 1306 1307 if (!dig || !dig->afmt || !dig->afmt->pin) 1308 return; 1309 1310 offset = dig->afmt->pin->offset; 1311 1312 drm_connector_list_iter_begin(dev, &iter); 1313 drm_for_each_connector_iter(connector, &iter) { 1314 if (connector->encoder == encoder) { 1315 amdgpu_connector = to_amdgpu_connector(connector); 1316 break; 1317 } 1318 } 1319 drm_connector_list_iter_end(&iter); 1320 1321 if (!amdgpu_connector) { 1322 DRM_ERROR("Couldn't find encoder's connector\n"); 1323 return; 1324 } 1325 1326 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); 1327 if (sad_count < 0) 1328 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 1329 if (sad_count <= 0) 1330 return; 1331 BUG_ON(!sads); 1332 1333 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 1334 u32 value = 0; 1335 u8 stereo_freqs = 0; 1336 int max_channels = -1; 1337 int j; 1338 1339 for (j = 0; j < sad_count; j++) { 1340 struct cea_sad *sad = &sads[j]; 1341 1342 if (sad->format == eld_reg_to_type[i][1]) { 1343 if (sad->channels > max_channels) { 1344 value = (sad->channels << 1345 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) | 1346 (sad->byte2 << 1347 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) | 1348 (sad->freq << 1349 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT); 1350 max_channels = sad->channels; 1351 } 1352 1353 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 1354 stereo_freqs |= sad->freq; 1355 else 1356 break; 1357 } 1358 } 1359 1360 value |= (stereo_freqs << 1361 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT); 1362 1363 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value); 1364 } 1365 1366 kfree(sads); 1367 } 1368 1369 static void dce_v8_0_audio_enable(struct amdgpu_device *adev, 1370 struct amdgpu_audio_pin *pin, 1371 bool enable) 1372 { 1373 if (!pin) 1374 return; 1375 1376 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 1377 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); 1378 } 1379 1380 static const u32 pin_offsets[7] = 1381 { 1382 (0x1780 - 0x1780), 1383 (0x1786 - 0x1780), 1384 (0x178c - 0x1780), 1385 (0x1792 - 0x1780), 1386 (0x1798 - 0x1780), 1387 (0x179d - 0x1780), 1388 (0x17a4 - 0x1780), 1389 }; 1390 1391 static int dce_v8_0_audio_init(struct amdgpu_device *adev) 1392 { 1393 int i; 1394 1395 if (!amdgpu_audio) 1396 return 0; 1397 1398 adev->mode_info.audio.enabled = true; 1399 1400 if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */ 1401 adev->mode_info.audio.num_pins = 7; 1402 else if ((adev->asic_type == CHIP_KABINI) || 1403 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */ 1404 adev->mode_info.audio.num_pins = 3; 1405 else if ((adev->asic_type == CHIP_BONAIRE) || 1406 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */ 1407 adev->mode_info.audio.num_pins = 7; 1408 else 1409 adev->mode_info.audio.num_pins = 3; 1410 1411 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1412 adev->mode_info.audio.pin[i].channels = -1; 1413 adev->mode_info.audio.pin[i].rate = -1; 1414 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1415 adev->mode_info.audio.pin[i].status_bits = 0; 1416 adev->mode_info.audio.pin[i].category_code = 0; 1417 adev->mode_info.audio.pin[i].connected = false; 1418 adev->mode_info.audio.pin[i].offset = pin_offsets[i]; 1419 adev->mode_info.audio.pin[i].id = i; 1420 /* disable audio. it will be set up later */ 1421 /* XXX remove once we switch to ip funcs */ 1422 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1423 } 1424 1425 return 0; 1426 } 1427 1428 static void dce_v8_0_audio_fini(struct amdgpu_device *adev) 1429 { 1430 int i; 1431 1432 if (!amdgpu_audio) 1433 return; 1434 1435 if (!adev->mode_info.audio.enabled) 1436 return; 1437 1438 for (i = 0; i < adev->mode_info.audio.num_pins; i++) 1439 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1440 1441 adev->mode_info.audio.enabled = false; 1442 } 1443 1444 /* 1445 * update the N and CTS parameters for a given pixel clock rate 1446 */ 1447 static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) 1448 { 1449 struct drm_device *dev = encoder->dev; 1450 struct amdgpu_device *adev = drm_to_adev(dev); 1451 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); 1452 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1453 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1454 uint32_t offset = dig->afmt->offset; 1455 1456 WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT)); 1457 WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz); 1458 1459 WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT)); 1460 WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz); 1461 1462 WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT)); 1463 WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz); 1464 } 1465 1466 /* 1467 * build a HDMI Video Info Frame 1468 */ 1469 static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, 1470 void *buffer, size_t size) 1471 { 1472 struct drm_device *dev = encoder->dev; 1473 struct amdgpu_device *adev = drm_to_adev(dev); 1474 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1475 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1476 uint32_t offset = dig->afmt->offset; 1477 uint8_t *frame = buffer + 3; 1478 uint8_t *header = buffer; 1479 1480 WREG32(mmAFMT_AVI_INFO0 + offset, 1481 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 1482 WREG32(mmAFMT_AVI_INFO1 + offset, 1483 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); 1484 WREG32(mmAFMT_AVI_INFO2 + offset, 1485 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); 1486 WREG32(mmAFMT_AVI_INFO3 + offset, 1487 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); 1488 } 1489 1490 static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) 1491 { 1492 struct drm_device *dev = encoder->dev; 1493 struct amdgpu_device *adev = drm_to_adev(dev); 1494 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1495 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1496 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1497 u32 dto_phase = 24 * 1000; 1498 u32 dto_modulo = clock; 1499 1500 if (!dig || !dig->afmt) 1501 return; 1502 1503 /* XXX two dtos; generally use dto0 for hdmi */ 1504 /* Express [24MHz / target pixel clock] as an exact rational 1505 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 1506 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 1507 */ 1508 WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT)); 1509 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); 1510 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); 1511 } 1512 1513 /* 1514 * update the info frames with the data from the current display mode 1515 */ 1516 static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, 1517 struct drm_display_mode *mode) 1518 { 1519 struct drm_device *dev = encoder->dev; 1520 struct amdgpu_device *adev = drm_to_adev(dev); 1521 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1522 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1523 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 1524 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 1525 struct hdmi_avi_infoframe frame; 1526 uint32_t offset, val; 1527 ssize_t err; 1528 int bpc = 8; 1529 1530 if (!dig || !dig->afmt) 1531 return; 1532 1533 /* Silent, r600_hdmi_enable will raise WARN for us */ 1534 if (!dig->afmt->enabled) 1535 return; 1536 1537 offset = dig->afmt->offset; 1538 1539 /* hdmi deep color mode general control packets setup, if bpc > 8 */ 1540 if (encoder->crtc) { 1541 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1542 bpc = amdgpu_crtc->bpc; 1543 } 1544 1545 /* disable audio prior to setting up hw */ 1546 dig->afmt->pin = dce_v8_0_audio_get_pin(adev); 1547 dce_v8_0_audio_enable(adev, dig->afmt->pin, false); 1548 1549 dce_v8_0_audio_set_dto(encoder, mode->clock); 1550 1551 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset, 1552 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */ 1553 1554 WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000); 1555 1556 val = RREG32(mmHDMI_CONTROL + offset); 1557 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK; 1558 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK; 1559 1560 switch (bpc) { 1561 case 0: 1562 case 6: 1563 case 8: 1564 case 16: 1565 default: 1566 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", 1567 connector->name, bpc); 1568 break; 1569 case 10: 1570 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK; 1571 val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT; 1572 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", 1573 connector->name); 1574 break; 1575 case 12: 1576 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK; 1577 val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT; 1578 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", 1579 connector->name); 1580 break; 1581 } 1582 1583 WREG32(mmHDMI_CONTROL + offset, val); 1584 1585 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset, 1586 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */ 1587 HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */ 1588 HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */ 1589 1590 WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset, 1591 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */ 1592 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */ 1593 1594 WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset, 1595 AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */ 1596 1597 WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset, 1598 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */ 1599 1600 WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */ 1601 1602 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset, 1603 (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */ 1604 (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */ 1605 1606 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset, 1607 AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */ 1608 1609 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */ 1610 1611 if (bpc > 8) 1612 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset, 1613 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */ 1614 else 1615 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset, 1616 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */ 1617 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */ 1618 1619 dce_v8_0_afmt_update_ACR(encoder, mode->clock); 1620 1621 WREG32(mmAFMT_60958_0 + offset, 1622 (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT)); 1623 1624 WREG32(mmAFMT_60958_1 + offset, 1625 (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT)); 1626 1627 WREG32(mmAFMT_60958_2 + offset, 1628 (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) | 1629 (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) | 1630 (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) | 1631 (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) | 1632 (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) | 1633 (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT)); 1634 1635 dce_v8_0_audio_write_speaker_allocation(encoder); 1636 1637 1638 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset, 1639 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); 1640 1641 dce_v8_0_afmt_audio_select_pin(encoder); 1642 dce_v8_0_audio_write_sad_regs(encoder); 1643 dce_v8_0_audio_write_latency_fields(encoder, mode); 1644 1645 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); 1646 if (err < 0) { 1647 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1648 return; 1649 } 1650 1651 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); 1652 if (err < 0) { 1653 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); 1654 return; 1655 } 1656 1657 dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); 1658 1659 WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset, 1660 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */ 1661 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */ 1662 1663 WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset, 1664 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */ 1665 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK); 1666 1667 WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset, 1668 AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */ 1669 1670 WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF); 1671 WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF); 1672 WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001); 1673 WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001); 1674 1675 /* enable audio after setting up hw */ 1676 dce_v8_0_audio_enable(adev, dig->afmt->pin, true); 1677 } 1678 1679 static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable) 1680 { 1681 struct drm_device *dev = encoder->dev; 1682 struct amdgpu_device *adev = drm_to_adev(dev); 1683 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1684 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1685 1686 if (!dig || !dig->afmt) 1687 return; 1688 1689 /* Silent, r600_hdmi_enable will raise WARN for us */ 1690 if (enable && dig->afmt->enabled) 1691 return; 1692 if (!enable && !dig->afmt->enabled) 1693 return; 1694 1695 if (!enable && dig->afmt->pin) { 1696 dce_v8_0_audio_enable(adev, dig->afmt->pin, false); 1697 dig->afmt->pin = NULL; 1698 } 1699 1700 dig->afmt->enabled = enable; 1701 1702 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", 1703 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); 1704 } 1705 1706 static int dce_v8_0_afmt_init(struct amdgpu_device *adev) 1707 { 1708 int i; 1709 1710 for (i = 0; i < adev->mode_info.num_dig; i++) 1711 adev->mode_info.afmt[i] = NULL; 1712 1713 /* DCE8 has audio blocks tied to DIG encoders */ 1714 for (i = 0; i < adev->mode_info.num_dig; i++) { 1715 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); 1716 if (adev->mode_info.afmt[i]) { 1717 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1718 adev->mode_info.afmt[i]->id = i; 1719 } else { 1720 int j; 1721 for (j = 0; j < i; j++) { 1722 kfree(adev->mode_info.afmt[j]); 1723 adev->mode_info.afmt[j] = NULL; 1724 } 1725 return -ENOMEM; 1726 } 1727 } 1728 return 0; 1729 } 1730 1731 static void dce_v8_0_afmt_fini(struct amdgpu_device *adev) 1732 { 1733 int i; 1734 1735 for (i = 0; i < adev->mode_info.num_dig; i++) { 1736 kfree(adev->mode_info.afmt[i]); 1737 adev->mode_info.afmt[i] = NULL; 1738 } 1739 } 1740 1741 static const u32 vga_control_regs[6] = 1742 { 1743 mmD1VGA_CONTROL, 1744 mmD2VGA_CONTROL, 1745 mmD3VGA_CONTROL, 1746 mmD4VGA_CONTROL, 1747 mmD5VGA_CONTROL, 1748 mmD6VGA_CONTROL, 1749 }; 1750 1751 static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable) 1752 { 1753 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1754 struct drm_device *dev = crtc->dev; 1755 struct amdgpu_device *adev = drm_to_adev(dev); 1756 u32 vga_control; 1757 1758 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; 1759 if (enable) 1760 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); 1761 else 1762 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); 1763 } 1764 1765 static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable) 1766 { 1767 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1768 struct drm_device *dev = crtc->dev; 1769 struct amdgpu_device *adev = drm_to_adev(dev); 1770 1771 if (enable) 1772 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); 1773 else 1774 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); 1775 } 1776 1777 static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, 1778 struct drm_framebuffer *fb, 1779 int x, int y, int atomic) 1780 { 1781 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1782 struct drm_device *dev = crtc->dev; 1783 struct amdgpu_device *adev = drm_to_adev(dev); 1784 struct drm_framebuffer *target_fb; 1785 struct drm_gem_object *obj; 1786 struct amdgpu_bo *abo; 1787 uint64_t fb_location, tiling_flags; 1788 uint32_t fb_format, fb_pitch_pixels; 1789 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1790 u32 pipe_config; 1791 u32 viewport_w, viewport_h; 1792 int r; 1793 bool bypass_lut = false; 1794 struct drm_format_name_buf format_name; 1795 1796 /* no fb bound */ 1797 if (!atomic && !crtc->primary->fb) { 1798 DRM_DEBUG_KMS("No FB bound\n"); 1799 return 0; 1800 } 1801 1802 if (atomic) 1803 target_fb = fb; 1804 else 1805 target_fb = crtc->primary->fb; 1806 1807 /* If atomic, assume fb object is pinned & idle & fenced and 1808 * just update base pointers 1809 */ 1810 obj = target_fb->obj[0]; 1811 abo = gem_to_amdgpu_bo(obj); 1812 r = amdgpu_bo_reserve(abo, false); 1813 if (unlikely(r != 0)) 1814 return r; 1815 1816 if (!atomic) { 1817 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); 1818 if (unlikely(r != 0)) { 1819 amdgpu_bo_unreserve(abo); 1820 return -EINVAL; 1821 } 1822 } 1823 fb_location = amdgpu_bo_gpu_offset(abo); 1824 1825 amdgpu_bo_get_tiling_flags(abo, &tiling_flags); 1826 amdgpu_bo_unreserve(abo); 1827 1828 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 1829 1830 switch (target_fb->format->format) { 1831 case DRM_FORMAT_C8: 1832 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1833 (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1834 break; 1835 case DRM_FORMAT_XRGB4444: 1836 case DRM_FORMAT_ARGB4444: 1837 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1838 (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1839 #ifdef __BIG_ENDIAN 1840 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1841 #endif 1842 break; 1843 case DRM_FORMAT_XRGB1555: 1844 case DRM_FORMAT_ARGB1555: 1845 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1846 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1847 #ifdef __BIG_ENDIAN 1848 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1849 #endif 1850 break; 1851 case DRM_FORMAT_BGRX5551: 1852 case DRM_FORMAT_BGRA5551: 1853 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1854 (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1855 #ifdef __BIG_ENDIAN 1856 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1857 #endif 1858 break; 1859 case DRM_FORMAT_RGB565: 1860 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1861 (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1862 #ifdef __BIG_ENDIAN 1863 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1864 #endif 1865 break; 1866 case DRM_FORMAT_XRGB8888: 1867 case DRM_FORMAT_ARGB8888: 1868 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1869 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1870 #ifdef __BIG_ENDIAN 1871 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1872 #endif 1873 break; 1874 case DRM_FORMAT_XRGB2101010: 1875 case DRM_FORMAT_ARGB2101010: 1876 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1877 (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1878 #ifdef __BIG_ENDIAN 1879 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1880 #endif 1881 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 1882 bypass_lut = true; 1883 break; 1884 case DRM_FORMAT_BGRX1010102: 1885 case DRM_FORMAT_BGRA1010102: 1886 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1887 (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1888 #ifdef __BIG_ENDIAN 1889 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1890 #endif 1891 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 1892 bypass_lut = true; 1893 break; 1894 case DRM_FORMAT_XBGR8888: 1895 case DRM_FORMAT_ABGR8888: 1896 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | 1897 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); 1898 fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) | 1899 (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT)); 1900 #ifdef __BIG_ENDIAN 1901 fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); 1902 #endif 1903 break; 1904 default: 1905 DRM_ERROR("Unsupported screen format %s\n", 1906 drm_get_format_name(target_fb->format->format, &format_name)); 1907 return -EINVAL; 1908 } 1909 1910 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { 1911 unsigned bankw, bankh, mtaspect, tile_split, num_banks; 1912 1913 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 1914 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 1915 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 1916 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 1917 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 1918 1919 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT); 1920 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); 1921 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT); 1922 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT); 1923 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT); 1924 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT); 1925 fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT); 1926 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { 1927 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT); 1928 } 1929 1930 fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT); 1931 1932 dce_v8_0_vga_enable(crtc, false); 1933 1934 /* Make sure surface address is updated at vertical blank rather than 1935 * horizontal blank 1936 */ 1937 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0); 1938 1939 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 1940 upper_32_bits(fb_location)); 1941 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 1942 upper_32_bits(fb_location)); 1943 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 1944 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 1945 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 1946 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); 1947 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 1948 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); 1949 1950 /* 1951 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT 1952 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to 1953 * retain the full precision throughout the pipeline. 1954 */ 1955 WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset, 1956 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0), 1957 ~LUT_10BIT_BYPASS_EN); 1958 1959 if (bypass_lut) 1960 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); 1961 1962 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); 1963 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); 1964 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); 1965 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); 1966 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); 1967 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); 1968 1969 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; 1970 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); 1971 1972 dce_v8_0_grph_enable(crtc, true); 1973 1974 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, 1975 target_fb->height); 1976 1977 x &= ~3; 1978 y &= ~1; 1979 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, 1980 (x << 16) | y); 1981 viewport_w = crtc->mode.hdisplay; 1982 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 1983 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, 1984 (viewport_w << 16) | viewport_h); 1985 1986 /* set pageflip to happen anywhere in vblank interval */ 1987 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); 1988 1989 if (!atomic && fb && fb != crtc->primary->fb) { 1990 abo = gem_to_amdgpu_bo(fb->obj[0]); 1991 r = amdgpu_bo_reserve(abo, true); 1992 if (unlikely(r != 0)) 1993 return r; 1994 amdgpu_bo_unpin(abo); 1995 amdgpu_bo_unreserve(abo); 1996 } 1997 1998 /* Bytes per pixel may have changed */ 1999 dce_v8_0_bandwidth_update(adev); 2000 2001 return 0; 2002 } 2003 2004 static void dce_v8_0_set_interleave(struct drm_crtc *crtc, 2005 struct drm_display_mode *mode) 2006 { 2007 struct drm_device *dev = crtc->dev; 2008 struct amdgpu_device *adev = drm_to_adev(dev); 2009 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2010 2011 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 2012 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 2013 LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT); 2014 else 2015 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0); 2016 } 2017 2018 static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc) 2019 { 2020 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2021 struct drm_device *dev = crtc->dev; 2022 struct amdgpu_device *adev = drm_to_adev(dev); 2023 u16 *r, *g, *b; 2024 int i; 2025 2026 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); 2027 2028 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 2029 ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) | 2030 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT))); 2031 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, 2032 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK); 2033 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, 2034 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK); 2035 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2036 ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) | 2037 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT))); 2038 2039 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); 2040 2041 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); 2042 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); 2043 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); 2044 2045 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); 2046 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); 2047 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); 2048 2049 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); 2050 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); 2051 2052 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); 2053 r = crtc->gamma_store; 2054 g = r + crtc->gamma_size; 2055 b = g + crtc->gamma_size; 2056 for (i = 0; i < 256; i++) { 2057 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, 2058 ((*r++ & 0xffc0) << 14) | 2059 ((*g++ & 0xffc0) << 4) | 2060 (*b++ >> 6)); 2061 } 2062 2063 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2064 ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) | 2065 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) | 2066 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT))); 2067 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, 2068 ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) | 2069 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT))); 2070 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2071 ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) | 2072 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT))); 2073 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 2074 ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) | 2075 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT))); 2076 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 2077 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0); 2078 /* XXX this only needs to be programmed once per crtc at startup, 2079 * not sure where the best place for it is 2080 */ 2081 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, 2082 ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK); 2083 } 2084 2085 static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder) 2086 { 2087 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 2088 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 2089 2090 switch (amdgpu_encoder->encoder_id) { 2091 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2092 if (dig->linkb) 2093 return 1; 2094 else 2095 return 0; 2096 break; 2097 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2098 if (dig->linkb) 2099 return 3; 2100 else 2101 return 2; 2102 break; 2103 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2104 if (dig->linkb) 2105 return 5; 2106 else 2107 return 4; 2108 break; 2109 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2110 return 6; 2111 break; 2112 default: 2113 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); 2114 return 0; 2115 } 2116 } 2117 2118 /** 2119 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc. 2120 * 2121 * @crtc: drm crtc 2122 * 2123 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors 2124 * a single PPLL can be used for all DP crtcs/encoders. For non-DP 2125 * monitors a dedicated PPLL must be used. If a particular board has 2126 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming 2127 * as there is no need to program the PLL itself. If we are not able to 2128 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to 2129 * avoid messing up an existing monitor. 2130 * 2131 * Asic specific PLL information 2132 * 2133 * DCE 8.x 2134 * KB/KV 2135 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) 2136 * CI 2137 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC 2138 * 2139 */ 2140 static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc) 2141 { 2142 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2143 struct drm_device *dev = crtc->dev; 2144 struct amdgpu_device *adev = drm_to_adev(dev); 2145 u32 pll_in_use; 2146 int pll; 2147 2148 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { 2149 if (adev->clock.dp_extclk) 2150 /* skip PPLL programming if using ext clock */ 2151 return ATOM_PPLL_INVALID; 2152 else { 2153 /* use the same PPLL for all DP monitors */ 2154 pll = amdgpu_pll_get_shared_dp_ppll(crtc); 2155 if (pll != ATOM_PPLL_INVALID) 2156 return pll; 2157 } 2158 } else { 2159 /* use the same PPLL for all monitors with the same clock */ 2160 pll = amdgpu_pll_get_shared_nondp_ppll(crtc); 2161 if (pll != ATOM_PPLL_INVALID) 2162 return pll; 2163 } 2164 /* otherwise, pick one of the plls */ 2165 if ((adev->asic_type == CHIP_KABINI) || 2166 (adev->asic_type == CHIP_MULLINS)) { 2167 /* KB/ML has PPLL1 and PPLL2 */ 2168 pll_in_use = amdgpu_pll_get_use_mask(crtc); 2169 if (!(pll_in_use & (1 << ATOM_PPLL2))) 2170 return ATOM_PPLL2; 2171 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2172 return ATOM_PPLL1; 2173 DRM_ERROR("unable to allocate a PPLL\n"); 2174 return ATOM_PPLL_INVALID; 2175 } else { 2176 /* CI/KV has PPLL0, PPLL1, and PPLL2 */ 2177 pll_in_use = amdgpu_pll_get_use_mask(crtc); 2178 if (!(pll_in_use & (1 << ATOM_PPLL2))) 2179 return ATOM_PPLL2; 2180 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2181 return ATOM_PPLL1; 2182 if (!(pll_in_use & (1 << ATOM_PPLL0))) 2183 return ATOM_PPLL0; 2184 DRM_ERROR("unable to allocate a PPLL\n"); 2185 return ATOM_PPLL_INVALID; 2186 } 2187 return ATOM_PPLL_INVALID; 2188 } 2189 2190 static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock) 2191 { 2192 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2193 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2194 uint32_t cur_lock; 2195 2196 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); 2197 if (lock) 2198 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 2199 else 2200 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 2201 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); 2202 } 2203 2204 static void dce_v8_0_hide_cursor(struct drm_crtc *crtc) 2205 { 2206 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2207 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2208 2209 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 2210 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 2211 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 2212 } 2213 2214 static void dce_v8_0_show_cursor(struct drm_crtc *crtc) 2215 { 2216 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2217 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2218 2219 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2220 upper_32_bits(amdgpu_crtc->cursor_addr)); 2221 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2222 lower_32_bits(amdgpu_crtc->cursor_addr)); 2223 2224 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 2225 CUR_CONTROL__CURSOR_EN_MASK | 2226 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 2227 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 2228 } 2229 2230 static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc, 2231 int x, int y) 2232 { 2233 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2234 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2235 int xorigin = 0, yorigin = 0; 2236 2237 amdgpu_crtc->cursor_x = x; 2238 amdgpu_crtc->cursor_y = y; 2239 2240 /* avivo cursor are offset into the total surface */ 2241 x += crtc->x; 2242 y += crtc->y; 2243 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 2244 2245 if (x < 0) { 2246 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 2247 x = 0; 2248 } 2249 if (y < 0) { 2250 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 2251 y = 0; 2252 } 2253 2254 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2255 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2256 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2257 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2258 2259 return 0; 2260 } 2261 2262 static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc, 2263 int x, int y) 2264 { 2265 int ret; 2266 2267 dce_v8_0_lock_cursor(crtc, true); 2268 ret = dce_v8_0_cursor_move_locked(crtc, x, y); 2269 dce_v8_0_lock_cursor(crtc, false); 2270 2271 return ret; 2272 } 2273 2274 static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, 2275 struct drm_file *file_priv, 2276 uint32_t handle, 2277 uint32_t width, 2278 uint32_t height, 2279 int32_t hot_x, 2280 int32_t hot_y) 2281 { 2282 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2283 struct drm_gem_object *obj; 2284 struct amdgpu_bo *aobj; 2285 int ret; 2286 2287 if (!handle) { 2288 /* turn off cursor */ 2289 dce_v8_0_hide_cursor(crtc); 2290 obj = NULL; 2291 goto unpin; 2292 } 2293 2294 if ((width > amdgpu_crtc->max_cursor_width) || 2295 (height > amdgpu_crtc->max_cursor_height)) { 2296 DRM_ERROR("bad cursor width or height %d x %d\n", width, height); 2297 return -EINVAL; 2298 } 2299 2300 obj = drm_gem_object_lookup(file_priv, handle); 2301 if (!obj) { 2302 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); 2303 return -ENOENT; 2304 } 2305 2306 aobj = gem_to_amdgpu_bo(obj); 2307 ret = amdgpu_bo_reserve(aobj, false); 2308 if (ret != 0) { 2309 drm_gem_object_put(obj); 2310 return ret; 2311 } 2312 2313 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); 2314 amdgpu_bo_unreserve(aobj); 2315 if (ret) { 2316 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 2317 drm_gem_object_put(obj); 2318 return ret; 2319 } 2320 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); 2321 2322 dce_v8_0_lock_cursor(crtc, true); 2323 2324 if (width != amdgpu_crtc->cursor_width || 2325 height != amdgpu_crtc->cursor_height || 2326 hot_x != amdgpu_crtc->cursor_hot_x || 2327 hot_y != amdgpu_crtc->cursor_hot_y) { 2328 int x, y; 2329 2330 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x; 2331 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y; 2332 2333 dce_v8_0_cursor_move_locked(crtc, x, y); 2334 2335 amdgpu_crtc->cursor_width = width; 2336 amdgpu_crtc->cursor_height = height; 2337 amdgpu_crtc->cursor_hot_x = hot_x; 2338 amdgpu_crtc->cursor_hot_y = hot_y; 2339 } 2340 2341 dce_v8_0_show_cursor(crtc); 2342 dce_v8_0_lock_cursor(crtc, false); 2343 2344 unpin: 2345 if (amdgpu_crtc->cursor_bo) { 2346 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2347 ret = amdgpu_bo_reserve(aobj, true); 2348 if (likely(ret == 0)) { 2349 amdgpu_bo_unpin(aobj); 2350 amdgpu_bo_unreserve(aobj); 2351 } 2352 drm_gem_object_put(amdgpu_crtc->cursor_bo); 2353 } 2354 2355 amdgpu_crtc->cursor_bo = obj; 2356 return 0; 2357 } 2358 2359 static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) 2360 { 2361 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2362 2363 if (amdgpu_crtc->cursor_bo) { 2364 dce_v8_0_lock_cursor(crtc, true); 2365 2366 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2367 amdgpu_crtc->cursor_y); 2368 2369 dce_v8_0_show_cursor(crtc); 2370 2371 dce_v8_0_lock_cursor(crtc, false); 2372 } 2373 } 2374 2375 static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2376 u16 *blue, uint32_t size, 2377 struct drm_modeset_acquire_ctx *ctx) 2378 { 2379 dce_v8_0_crtc_load_lut(crtc); 2380 2381 return 0; 2382 } 2383 2384 static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) 2385 { 2386 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2387 2388 drm_crtc_cleanup(crtc); 2389 kfree(amdgpu_crtc); 2390 } 2391 2392 static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = { 2393 .cursor_set2 = dce_v8_0_crtc_cursor_set2, 2394 .cursor_move = dce_v8_0_crtc_cursor_move, 2395 .gamma_set = dce_v8_0_crtc_gamma_set, 2396 .set_config = amdgpu_display_crtc_set_config, 2397 .destroy = dce_v8_0_crtc_destroy, 2398 .page_flip_target = amdgpu_display_crtc_page_flip_target, 2399 .get_vblank_counter = amdgpu_get_vblank_counter_kms, 2400 .enable_vblank = amdgpu_enable_vblank_kms, 2401 .disable_vblank = amdgpu_disable_vblank_kms, 2402 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 2403 }; 2404 2405 static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2406 { 2407 struct drm_device *dev = crtc->dev; 2408 struct amdgpu_device *adev = drm_to_adev(dev); 2409 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2410 unsigned type; 2411 2412 switch (mode) { 2413 case DRM_MODE_DPMS_ON: 2414 amdgpu_crtc->enabled = true; 2415 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); 2416 dce_v8_0_vga_enable(crtc, true); 2417 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2418 dce_v8_0_vga_enable(crtc, false); 2419 /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2420 type = amdgpu_display_crtc_idx_to_irq_type(adev, 2421 amdgpu_crtc->crtc_id); 2422 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2423 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2424 drm_crtc_vblank_on(crtc); 2425 dce_v8_0_crtc_load_lut(crtc); 2426 break; 2427 case DRM_MODE_DPMS_STANDBY: 2428 case DRM_MODE_DPMS_SUSPEND: 2429 case DRM_MODE_DPMS_OFF: 2430 drm_crtc_vblank_off(crtc); 2431 if (amdgpu_crtc->enabled) { 2432 dce_v8_0_vga_enable(crtc, true); 2433 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2434 dce_v8_0_vga_enable(crtc, false); 2435 } 2436 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); 2437 amdgpu_crtc->enabled = false; 2438 break; 2439 } 2440 /* adjust pm to dpms */ 2441 amdgpu_pm_compute_clocks(adev); 2442 } 2443 2444 static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc) 2445 { 2446 /* disable crtc pair power gating before programming */ 2447 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); 2448 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); 2449 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2450 } 2451 2452 static void dce_v8_0_crtc_commit(struct drm_crtc *crtc) 2453 { 2454 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 2455 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); 2456 } 2457 2458 static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) 2459 { 2460 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2461 struct drm_device *dev = crtc->dev; 2462 struct amdgpu_device *adev = drm_to_adev(dev); 2463 struct amdgpu_atom_ss ss; 2464 int i; 2465 2466 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2467 if (crtc->primary->fb) { 2468 int r; 2469 struct amdgpu_bo *abo; 2470 2471 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); 2472 r = amdgpu_bo_reserve(abo, true); 2473 if (unlikely(r)) 2474 DRM_ERROR("failed to reserve abo before unpin\n"); 2475 else { 2476 amdgpu_bo_unpin(abo); 2477 amdgpu_bo_unreserve(abo); 2478 } 2479 } 2480 /* disable the GRPH */ 2481 dce_v8_0_grph_enable(crtc, false); 2482 2483 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); 2484 2485 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2486 if (adev->mode_info.crtcs[i] && 2487 adev->mode_info.crtcs[i]->enabled && 2488 i != amdgpu_crtc->crtc_id && 2489 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { 2490 /* one other crtc is using this pll don't turn 2491 * off the pll 2492 */ 2493 goto done; 2494 } 2495 } 2496 2497 switch (amdgpu_crtc->pll_id) { 2498 case ATOM_PPLL1: 2499 case ATOM_PPLL2: 2500 /* disable the ppll */ 2501 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, 2502 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2503 break; 2504 case ATOM_PPLL0: 2505 /* disable the ppll */ 2506 if ((adev->asic_type == CHIP_KAVERI) || 2507 (adev->asic_type == CHIP_BONAIRE) || 2508 (adev->asic_type == CHIP_HAWAII)) 2509 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, 2510 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2511 break; 2512 default: 2513 break; 2514 } 2515 done: 2516 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2517 amdgpu_crtc->adjusted_clock = 0; 2518 amdgpu_crtc->encoder = NULL; 2519 amdgpu_crtc->connector = NULL; 2520 } 2521 2522 static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc, 2523 struct drm_display_mode *mode, 2524 struct drm_display_mode *adjusted_mode, 2525 int x, int y, struct drm_framebuffer *old_fb) 2526 { 2527 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2528 2529 if (!amdgpu_crtc->adjusted_clock) 2530 return -EINVAL; 2531 2532 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); 2533 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); 2534 dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2535 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); 2536 amdgpu_atombios_crtc_scaler_setup(crtc); 2537 dce_v8_0_cursor_reset(crtc); 2538 /* update the hw version fpr dpm */ 2539 amdgpu_crtc->hw_mode = *adjusted_mode; 2540 2541 return 0; 2542 } 2543 2544 static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc, 2545 const struct drm_display_mode *mode, 2546 struct drm_display_mode *adjusted_mode) 2547 { 2548 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2549 struct drm_device *dev = crtc->dev; 2550 struct drm_encoder *encoder; 2551 2552 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ 2553 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2554 if (encoder->crtc == crtc) { 2555 amdgpu_crtc->encoder = encoder; 2556 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); 2557 break; 2558 } 2559 } 2560 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { 2561 amdgpu_crtc->encoder = NULL; 2562 amdgpu_crtc->connector = NULL; 2563 return false; 2564 } 2565 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2566 return false; 2567 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2568 return false; 2569 /* pick pll */ 2570 amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc); 2571 /* if we can't get a PPLL for a non-DP encoder, fail */ 2572 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && 2573 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) 2574 return false; 2575 2576 return true; 2577 } 2578 2579 static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, 2580 struct drm_framebuffer *old_fb) 2581 { 2582 return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2583 } 2584 2585 static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc, 2586 struct drm_framebuffer *fb, 2587 int x, int y, enum mode_set_atomic state) 2588 { 2589 return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1); 2590 } 2591 2592 static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = { 2593 .dpms = dce_v8_0_crtc_dpms, 2594 .mode_fixup = dce_v8_0_crtc_mode_fixup, 2595 .mode_set = dce_v8_0_crtc_mode_set, 2596 .mode_set_base = dce_v8_0_crtc_set_base, 2597 .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic, 2598 .prepare = dce_v8_0_crtc_prepare, 2599 .commit = dce_v8_0_crtc_commit, 2600 .disable = dce_v8_0_crtc_disable, 2601 .get_scanout_position = amdgpu_crtc_get_scanout_position, 2602 }; 2603 2604 static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index) 2605 { 2606 struct amdgpu_crtc *amdgpu_crtc; 2607 2608 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + 2609 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 2610 if (amdgpu_crtc == NULL) 2611 return -ENOMEM; 2612 2613 drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v8_0_crtc_funcs); 2614 2615 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); 2616 amdgpu_crtc->crtc_id = index; 2617 adev->mode_info.crtcs[index] = amdgpu_crtc; 2618 2619 amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH; 2620 amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT; 2621 adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; 2622 adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; 2623 2624 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; 2625 2626 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2627 amdgpu_crtc->adjusted_clock = 0; 2628 amdgpu_crtc->encoder = NULL; 2629 amdgpu_crtc->connector = NULL; 2630 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs); 2631 2632 return 0; 2633 } 2634 2635 static int dce_v8_0_early_init(void *handle) 2636 { 2637 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2638 2639 adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg; 2640 adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg; 2641 2642 dce_v8_0_set_display_funcs(adev); 2643 2644 adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev); 2645 2646 switch (adev->asic_type) { 2647 case CHIP_BONAIRE: 2648 case CHIP_HAWAII: 2649 adev->mode_info.num_hpd = 6; 2650 adev->mode_info.num_dig = 6; 2651 break; 2652 case CHIP_KAVERI: 2653 adev->mode_info.num_hpd = 6; 2654 adev->mode_info.num_dig = 7; 2655 break; 2656 case CHIP_KABINI: 2657 case CHIP_MULLINS: 2658 adev->mode_info.num_hpd = 6; 2659 adev->mode_info.num_dig = 6; /* ? */ 2660 break; 2661 default: 2662 /* FIXME: not supported yet */ 2663 return -EINVAL; 2664 } 2665 2666 dce_v8_0_set_irq_funcs(adev); 2667 2668 return 0; 2669 } 2670 2671 static int dce_v8_0_sw_init(void *handle) 2672 { 2673 int r, i; 2674 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2675 2676 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2677 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); 2678 if (r) 2679 return r; 2680 } 2681 2682 for (i = 8; i < 20; i += 2) { 2683 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq); 2684 if (r) 2685 return r; 2686 } 2687 2688 /* HPD hotplug */ 2689 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq); 2690 if (r) 2691 return r; 2692 2693 adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; 2694 2695 adev_to_drm(adev)->mode_config.async_page_flip = true; 2696 2697 adev_to_drm(adev)->mode_config.max_width = 16384; 2698 adev_to_drm(adev)->mode_config.max_height = 16384; 2699 2700 adev_to_drm(adev)->mode_config.preferred_depth = 24; 2701 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 2702 2703 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; 2704 2705 r = amdgpu_display_modeset_create_props(adev); 2706 if (r) 2707 return r; 2708 2709 adev_to_drm(adev)->mode_config.max_width = 16384; 2710 adev_to_drm(adev)->mode_config.max_height = 16384; 2711 2712 /* allocate crtcs */ 2713 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2714 r = dce_v8_0_crtc_init(adev, i); 2715 if (r) 2716 return r; 2717 } 2718 2719 if (amdgpu_atombios_get_connector_info_from_object_table(adev)) 2720 amdgpu_display_print_display_setup(adev_to_drm(adev)); 2721 else 2722 return -EINVAL; 2723 2724 /* setup afmt */ 2725 r = dce_v8_0_afmt_init(adev); 2726 if (r) 2727 return r; 2728 2729 r = dce_v8_0_audio_init(adev); 2730 if (r) 2731 return r; 2732 2733 drm_kms_helper_poll_init(adev_to_drm(adev)); 2734 2735 adev->mode_info.mode_config_initialized = true; 2736 return 0; 2737 } 2738 2739 static int dce_v8_0_sw_fini(void *handle) 2740 { 2741 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2742 2743 kfree(adev->mode_info.bios_hardcoded_edid); 2744 2745 drm_kms_helper_poll_fini(adev_to_drm(adev)); 2746 2747 dce_v8_0_audio_fini(adev); 2748 2749 dce_v8_0_afmt_fini(adev); 2750 2751 drm_mode_config_cleanup(adev_to_drm(adev)); 2752 adev->mode_info.mode_config_initialized = false; 2753 2754 return 0; 2755 } 2756 2757 static int dce_v8_0_hw_init(void *handle) 2758 { 2759 int i; 2760 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2761 2762 /* disable vga render */ 2763 dce_v8_0_set_vga_render_state(adev, false); 2764 /* init dig PHYs, disp eng pll */ 2765 amdgpu_atombios_encoder_init_dig(adev); 2766 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); 2767 2768 /* initialize hpd */ 2769 dce_v8_0_hpd_init(adev); 2770 2771 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2772 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2773 } 2774 2775 dce_v8_0_pageflip_interrupt_init(adev); 2776 2777 return 0; 2778 } 2779 2780 static int dce_v8_0_hw_fini(void *handle) 2781 { 2782 int i; 2783 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2784 2785 dce_v8_0_hpd_fini(adev); 2786 2787 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2788 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2789 } 2790 2791 dce_v8_0_pageflip_interrupt_fini(adev); 2792 2793 return 0; 2794 } 2795 2796 static int dce_v8_0_suspend(void *handle) 2797 { 2798 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2799 2800 adev->mode_info.bl_level = 2801 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); 2802 2803 return dce_v8_0_hw_fini(handle); 2804 } 2805 2806 static int dce_v8_0_resume(void *handle) 2807 { 2808 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2809 int ret; 2810 2811 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, 2812 adev->mode_info.bl_level); 2813 2814 ret = dce_v8_0_hw_init(handle); 2815 2816 /* turn on the BL */ 2817 if (adev->mode_info.bl_encoder) { 2818 u8 bl_level = amdgpu_display_backlight_get_level(adev, 2819 adev->mode_info.bl_encoder); 2820 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 2821 bl_level); 2822 } 2823 2824 return ret; 2825 } 2826 2827 static bool dce_v8_0_is_idle(void *handle) 2828 { 2829 return true; 2830 } 2831 2832 static int dce_v8_0_wait_for_idle(void *handle) 2833 { 2834 return 0; 2835 } 2836 2837 static int dce_v8_0_soft_reset(void *handle) 2838 { 2839 u32 srbm_soft_reset = 0, tmp; 2840 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2841 2842 if (dce_v8_0_is_display_hung(adev)) 2843 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; 2844 2845 if (srbm_soft_reset) { 2846 tmp = RREG32(mmSRBM_SOFT_RESET); 2847 tmp |= srbm_soft_reset; 2848 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 2849 WREG32(mmSRBM_SOFT_RESET, tmp); 2850 tmp = RREG32(mmSRBM_SOFT_RESET); 2851 2852 udelay(50); 2853 2854 tmp &= ~srbm_soft_reset; 2855 WREG32(mmSRBM_SOFT_RESET, tmp); 2856 tmp = RREG32(mmSRBM_SOFT_RESET); 2857 2858 /* Wait a little for things to settle down */ 2859 udelay(50); 2860 } 2861 return 0; 2862 } 2863 2864 static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, 2865 int crtc, 2866 enum amdgpu_interrupt_state state) 2867 { 2868 u32 reg_block, lb_interrupt_mask; 2869 2870 if (crtc >= adev->mode_info.num_crtc) { 2871 DRM_DEBUG("invalid crtc %d\n", crtc); 2872 return; 2873 } 2874 2875 switch (crtc) { 2876 case 0: 2877 reg_block = CRTC0_REGISTER_OFFSET; 2878 break; 2879 case 1: 2880 reg_block = CRTC1_REGISTER_OFFSET; 2881 break; 2882 case 2: 2883 reg_block = CRTC2_REGISTER_OFFSET; 2884 break; 2885 case 3: 2886 reg_block = CRTC3_REGISTER_OFFSET; 2887 break; 2888 case 4: 2889 reg_block = CRTC4_REGISTER_OFFSET; 2890 break; 2891 case 5: 2892 reg_block = CRTC5_REGISTER_OFFSET; 2893 break; 2894 default: 2895 DRM_DEBUG("invalid crtc %d\n", crtc); 2896 return; 2897 } 2898 2899 switch (state) { 2900 case AMDGPU_IRQ_STATE_DISABLE: 2901 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); 2902 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK; 2903 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); 2904 break; 2905 case AMDGPU_IRQ_STATE_ENABLE: 2906 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); 2907 lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK; 2908 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); 2909 break; 2910 default: 2911 break; 2912 } 2913 } 2914 2915 static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, 2916 int crtc, 2917 enum amdgpu_interrupt_state state) 2918 { 2919 u32 reg_block, lb_interrupt_mask; 2920 2921 if (crtc >= adev->mode_info.num_crtc) { 2922 DRM_DEBUG("invalid crtc %d\n", crtc); 2923 return; 2924 } 2925 2926 switch (crtc) { 2927 case 0: 2928 reg_block = CRTC0_REGISTER_OFFSET; 2929 break; 2930 case 1: 2931 reg_block = CRTC1_REGISTER_OFFSET; 2932 break; 2933 case 2: 2934 reg_block = CRTC2_REGISTER_OFFSET; 2935 break; 2936 case 3: 2937 reg_block = CRTC3_REGISTER_OFFSET; 2938 break; 2939 case 4: 2940 reg_block = CRTC4_REGISTER_OFFSET; 2941 break; 2942 case 5: 2943 reg_block = CRTC5_REGISTER_OFFSET; 2944 break; 2945 default: 2946 DRM_DEBUG("invalid crtc %d\n", crtc); 2947 return; 2948 } 2949 2950 switch (state) { 2951 case AMDGPU_IRQ_STATE_DISABLE: 2952 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); 2953 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK; 2954 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); 2955 break; 2956 case AMDGPU_IRQ_STATE_ENABLE: 2957 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block); 2958 lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK; 2959 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask); 2960 break; 2961 default: 2962 break; 2963 } 2964 } 2965 2966 static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev, 2967 struct amdgpu_irq_src *src, 2968 unsigned type, 2969 enum amdgpu_interrupt_state state) 2970 { 2971 u32 dc_hpd_int_cntl; 2972 2973 if (type >= adev->mode_info.num_hpd) { 2974 DRM_DEBUG("invalid hdp %d\n", type); 2975 return 0; 2976 } 2977 2978 switch (state) { 2979 case AMDGPU_IRQ_STATE_DISABLE: 2980 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); 2981 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 2982 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); 2983 break; 2984 case AMDGPU_IRQ_STATE_ENABLE: 2985 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); 2986 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 2987 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); 2988 break; 2989 default: 2990 break; 2991 } 2992 2993 return 0; 2994 } 2995 2996 static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev, 2997 struct amdgpu_irq_src *src, 2998 unsigned type, 2999 enum amdgpu_interrupt_state state) 3000 { 3001 switch (type) { 3002 case AMDGPU_CRTC_IRQ_VBLANK1: 3003 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state); 3004 break; 3005 case AMDGPU_CRTC_IRQ_VBLANK2: 3006 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state); 3007 break; 3008 case AMDGPU_CRTC_IRQ_VBLANK3: 3009 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state); 3010 break; 3011 case AMDGPU_CRTC_IRQ_VBLANK4: 3012 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state); 3013 break; 3014 case AMDGPU_CRTC_IRQ_VBLANK5: 3015 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state); 3016 break; 3017 case AMDGPU_CRTC_IRQ_VBLANK6: 3018 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state); 3019 break; 3020 case AMDGPU_CRTC_IRQ_VLINE1: 3021 dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state); 3022 break; 3023 case AMDGPU_CRTC_IRQ_VLINE2: 3024 dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state); 3025 break; 3026 case AMDGPU_CRTC_IRQ_VLINE3: 3027 dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state); 3028 break; 3029 case AMDGPU_CRTC_IRQ_VLINE4: 3030 dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state); 3031 break; 3032 case AMDGPU_CRTC_IRQ_VLINE5: 3033 dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state); 3034 break; 3035 case AMDGPU_CRTC_IRQ_VLINE6: 3036 dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state); 3037 break; 3038 default: 3039 break; 3040 } 3041 return 0; 3042 } 3043 3044 static int dce_v8_0_crtc_irq(struct amdgpu_device *adev, 3045 struct amdgpu_irq_src *source, 3046 struct amdgpu_iv_entry *entry) 3047 { 3048 unsigned crtc = entry->src_id - 1; 3049 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 3050 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, 3051 crtc); 3052 3053 switch (entry->src_data[0]) { 3054 case 0: /* vblank */ 3055 if (disp_int & interrupt_status_offsets[crtc].vblank) 3056 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK); 3057 else 3058 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3059 3060 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3061 drm_handle_vblank(adev_to_drm(adev), crtc); 3062 } 3063 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3064 break; 3065 case 1: /* vline */ 3066 if (disp_int & interrupt_status_offsets[crtc].vline) 3067 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK); 3068 else 3069 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3070 3071 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3072 break; 3073 default: 3074 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); 3075 break; 3076 } 3077 3078 return 0; 3079 } 3080 3081 static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev, 3082 struct amdgpu_irq_src *src, 3083 unsigned type, 3084 enum amdgpu_interrupt_state state) 3085 { 3086 u32 reg; 3087 3088 if (type >= adev->mode_info.num_crtc) { 3089 DRM_ERROR("invalid pageflip crtc %d\n", type); 3090 return -EINVAL; 3091 } 3092 3093 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]); 3094 if (state == AMDGPU_IRQ_STATE_DISABLE) 3095 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3096 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3097 else 3098 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3099 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3100 3101 return 0; 3102 } 3103 3104 static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, 3105 struct amdgpu_irq_src *source, 3106 struct amdgpu_iv_entry *entry) 3107 { 3108 unsigned long flags; 3109 unsigned crtc_id; 3110 struct amdgpu_crtc *amdgpu_crtc; 3111 struct amdgpu_flip_work *works; 3112 3113 crtc_id = (entry->src_id - 8) >> 1; 3114 amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 3115 3116 if (crtc_id >= adev->mode_info.num_crtc) { 3117 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); 3118 return -EINVAL; 3119 } 3120 3121 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) & 3122 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 3123 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id], 3124 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 3125 3126 /* IRQ could occur when in initial stage */ 3127 if (amdgpu_crtc == NULL) 3128 return 0; 3129 3130 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 3131 works = amdgpu_crtc->pflip_works; 3132 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ 3133 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " 3134 "AMDGPU_FLIP_SUBMITTED(%d)\n", 3135 amdgpu_crtc->pflip_status, 3136 AMDGPU_FLIP_SUBMITTED); 3137 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 3138 return 0; 3139 } 3140 3141 /* page flip completed. clean up */ 3142 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 3143 amdgpu_crtc->pflip_works = NULL; 3144 3145 /* wakeup usersapce */ 3146 if (works->event) 3147 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); 3148 3149 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 3150 3151 drm_crtc_vblank_put(&amdgpu_crtc->base); 3152 schedule_work(&works->unpin_work); 3153 3154 return 0; 3155 } 3156 3157 static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, 3158 struct amdgpu_irq_src *source, 3159 struct amdgpu_iv_entry *entry) 3160 { 3161 uint32_t disp_int, mask, tmp; 3162 unsigned hpd; 3163 3164 if (entry->src_data[0] >= adev->mode_info.num_hpd) { 3165 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); 3166 return 0; 3167 } 3168 3169 hpd = entry->src_data[0]; 3170 disp_int = RREG32(interrupt_status_offsets[hpd].reg); 3171 mask = interrupt_status_offsets[hpd].hpd; 3172 3173 if (disp_int & mask) { 3174 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 3175 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 3176 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 3177 schedule_work(&adev->hotplug_work); 3178 DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3179 } 3180 3181 return 0; 3182 3183 } 3184 3185 static int dce_v8_0_set_clockgating_state(void *handle, 3186 enum amd_clockgating_state state) 3187 { 3188 return 0; 3189 } 3190 3191 static int dce_v8_0_set_powergating_state(void *handle, 3192 enum amd_powergating_state state) 3193 { 3194 return 0; 3195 } 3196 3197 static const struct amd_ip_funcs dce_v8_0_ip_funcs = { 3198 .name = "dce_v8_0", 3199 .early_init = dce_v8_0_early_init, 3200 .late_init = NULL, 3201 .sw_init = dce_v8_0_sw_init, 3202 .sw_fini = dce_v8_0_sw_fini, 3203 .hw_init = dce_v8_0_hw_init, 3204 .hw_fini = dce_v8_0_hw_fini, 3205 .suspend = dce_v8_0_suspend, 3206 .resume = dce_v8_0_resume, 3207 .is_idle = dce_v8_0_is_idle, 3208 .wait_for_idle = dce_v8_0_wait_for_idle, 3209 .soft_reset = dce_v8_0_soft_reset, 3210 .set_clockgating_state = dce_v8_0_set_clockgating_state, 3211 .set_powergating_state = dce_v8_0_set_powergating_state, 3212 }; 3213 3214 static void 3215 dce_v8_0_encoder_mode_set(struct drm_encoder *encoder, 3216 struct drm_display_mode *mode, 3217 struct drm_display_mode *adjusted_mode) 3218 { 3219 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3220 3221 amdgpu_encoder->pixel_clock = adjusted_mode->clock; 3222 3223 /* need to call this here rather than in prepare() since we need some crtc info */ 3224 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3225 3226 /* set scaler clears this on some chips */ 3227 dce_v8_0_set_interleave(encoder->crtc, mode); 3228 3229 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 3230 dce_v8_0_afmt_enable(encoder, true); 3231 dce_v8_0_afmt_setmode(encoder, adjusted_mode); 3232 } 3233 } 3234 3235 static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder) 3236 { 3237 struct amdgpu_device *adev = drm_to_adev(encoder->dev); 3238 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3239 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 3240 3241 if ((amdgpu_encoder->active_device & 3242 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || 3243 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != 3244 ENCODER_OBJECT_ID_NONE)) { 3245 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 3246 if (dig) { 3247 dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder); 3248 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) 3249 dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; 3250 } 3251 } 3252 3253 amdgpu_atombios_scratch_regs_lock(adev, true); 3254 3255 if (connector) { 3256 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 3257 3258 /* select the clock/data port if it uses a router */ 3259 if (amdgpu_connector->router.cd_valid) 3260 amdgpu_i2c_router_select_cd_port(amdgpu_connector); 3261 3262 /* turn eDP panel on for mode set */ 3263 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3264 amdgpu_atombios_encoder_set_edp_panel_power(connector, 3265 ATOM_TRANSMITTER_ACTION_POWER_ON); 3266 } 3267 3268 /* this is needed for the pll/ss setup to work correctly in some cases */ 3269 amdgpu_atombios_encoder_set_crtc_source(encoder); 3270 /* set up the FMT blocks */ 3271 dce_v8_0_program_fmt(encoder); 3272 } 3273 3274 static void dce_v8_0_encoder_commit(struct drm_encoder *encoder) 3275 { 3276 struct drm_device *dev = encoder->dev; 3277 struct amdgpu_device *adev = drm_to_adev(dev); 3278 3279 /* need to call this here as we need the crtc set up */ 3280 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 3281 amdgpu_atombios_scratch_regs_lock(adev, false); 3282 } 3283 3284 static void dce_v8_0_encoder_disable(struct drm_encoder *encoder) 3285 { 3286 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3287 struct amdgpu_encoder_atom_dig *dig; 3288 3289 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3290 3291 if (amdgpu_atombios_encoder_is_digital(encoder)) { 3292 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 3293 dce_v8_0_afmt_enable(encoder, false); 3294 dig = amdgpu_encoder->enc_priv; 3295 dig->dig_encoder = -1; 3296 } 3297 amdgpu_encoder->active_device = 0; 3298 } 3299 3300 /* these are handled by the primary encoders */ 3301 static void dce_v8_0_ext_prepare(struct drm_encoder *encoder) 3302 { 3303 3304 } 3305 3306 static void dce_v8_0_ext_commit(struct drm_encoder *encoder) 3307 { 3308 3309 } 3310 3311 static void 3312 dce_v8_0_ext_mode_set(struct drm_encoder *encoder, 3313 struct drm_display_mode *mode, 3314 struct drm_display_mode *adjusted_mode) 3315 { 3316 3317 } 3318 3319 static void dce_v8_0_ext_disable(struct drm_encoder *encoder) 3320 { 3321 3322 } 3323 3324 static void 3325 dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode) 3326 { 3327 3328 } 3329 3330 static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = { 3331 .dpms = dce_v8_0_ext_dpms, 3332 .prepare = dce_v8_0_ext_prepare, 3333 .mode_set = dce_v8_0_ext_mode_set, 3334 .commit = dce_v8_0_ext_commit, 3335 .disable = dce_v8_0_ext_disable, 3336 /* no detect for TMDS/LVDS yet */ 3337 }; 3338 3339 static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = { 3340 .dpms = amdgpu_atombios_encoder_dpms, 3341 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3342 .prepare = dce_v8_0_encoder_prepare, 3343 .mode_set = dce_v8_0_encoder_mode_set, 3344 .commit = dce_v8_0_encoder_commit, 3345 .disable = dce_v8_0_encoder_disable, 3346 .detect = amdgpu_atombios_encoder_dig_detect, 3347 }; 3348 3349 static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = { 3350 .dpms = amdgpu_atombios_encoder_dpms, 3351 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3352 .prepare = dce_v8_0_encoder_prepare, 3353 .mode_set = dce_v8_0_encoder_mode_set, 3354 .commit = dce_v8_0_encoder_commit, 3355 .detect = amdgpu_atombios_encoder_dac_detect, 3356 }; 3357 3358 static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder) 3359 { 3360 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3361 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3362 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); 3363 kfree(amdgpu_encoder->enc_priv); 3364 drm_encoder_cleanup(encoder); 3365 kfree(amdgpu_encoder); 3366 } 3367 3368 static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = { 3369 .destroy = dce_v8_0_encoder_destroy, 3370 }; 3371 3372 static void dce_v8_0_encoder_add(struct amdgpu_device *adev, 3373 uint32_t encoder_enum, 3374 uint32_t supported_device, 3375 u16 caps) 3376 { 3377 struct drm_device *dev = adev_to_drm(adev); 3378 struct drm_encoder *encoder; 3379 struct amdgpu_encoder *amdgpu_encoder; 3380 3381 /* see if we already added it */ 3382 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3383 amdgpu_encoder = to_amdgpu_encoder(encoder); 3384 if (amdgpu_encoder->encoder_enum == encoder_enum) { 3385 amdgpu_encoder->devices |= supported_device; 3386 return; 3387 } 3388 3389 } 3390 3391 /* add a new one */ 3392 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); 3393 if (!amdgpu_encoder) 3394 return; 3395 3396 encoder = &amdgpu_encoder->base; 3397 switch (adev->mode_info.num_crtc) { 3398 case 1: 3399 encoder->possible_crtcs = 0x1; 3400 break; 3401 case 2: 3402 default: 3403 encoder->possible_crtcs = 0x3; 3404 break; 3405 case 4: 3406 encoder->possible_crtcs = 0xf; 3407 break; 3408 case 6: 3409 encoder->possible_crtcs = 0x3f; 3410 break; 3411 } 3412 3413 amdgpu_encoder->enc_priv = NULL; 3414 3415 amdgpu_encoder->encoder_enum = encoder_enum; 3416 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 3417 amdgpu_encoder->devices = supported_device; 3418 amdgpu_encoder->rmx_type = RMX_OFF; 3419 amdgpu_encoder->underscan_type = UNDERSCAN_OFF; 3420 amdgpu_encoder->is_ext_encoder = false; 3421 amdgpu_encoder->caps = caps; 3422 3423 switch (amdgpu_encoder->encoder_id) { 3424 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3425 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3426 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3427 DRM_MODE_ENCODER_DAC, NULL); 3428 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs); 3429 break; 3430 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3431 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 3432 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 3433 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 3434 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 3435 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3436 amdgpu_encoder->rmx_type = RMX_FULL; 3437 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3438 DRM_MODE_ENCODER_LVDS, NULL); 3439 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3440 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3441 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3442 DRM_MODE_ENCODER_DAC, NULL); 3443 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3444 } else { 3445 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3446 DRM_MODE_ENCODER_TMDS, NULL); 3447 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3448 } 3449 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs); 3450 break; 3451 case ENCODER_OBJECT_ID_SI170B: 3452 case ENCODER_OBJECT_ID_CH7303: 3453 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: 3454 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: 3455 case ENCODER_OBJECT_ID_TITFP513: 3456 case ENCODER_OBJECT_ID_VT1623: 3457 case ENCODER_OBJECT_ID_HDMI_SI1930: 3458 case ENCODER_OBJECT_ID_TRAVIS: 3459 case ENCODER_OBJECT_ID_NUTMEG: 3460 /* these are handled by the primary encoders */ 3461 amdgpu_encoder->is_ext_encoder = true; 3462 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3463 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3464 DRM_MODE_ENCODER_LVDS, NULL); 3465 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3466 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3467 DRM_MODE_ENCODER_DAC, NULL); 3468 else 3469 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs, 3470 DRM_MODE_ENCODER_TMDS, NULL); 3471 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs); 3472 break; 3473 } 3474 } 3475 3476 static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { 3477 .bandwidth_update = &dce_v8_0_bandwidth_update, 3478 .vblank_get_counter = &dce_v8_0_vblank_get_counter, 3479 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3480 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3481 .hpd_sense = &dce_v8_0_hpd_sense, 3482 .hpd_set_polarity = &dce_v8_0_hpd_set_polarity, 3483 .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg, 3484 .page_flip = &dce_v8_0_page_flip, 3485 .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos, 3486 .add_encoder = &dce_v8_0_encoder_add, 3487 .add_connector = &amdgpu_connector_add, 3488 }; 3489 3490 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev) 3491 { 3492 adev->mode_info.funcs = &dce_v8_0_display_funcs; 3493 } 3494 3495 static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = { 3496 .set = dce_v8_0_set_crtc_interrupt_state, 3497 .process = dce_v8_0_crtc_irq, 3498 }; 3499 3500 static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = { 3501 .set = dce_v8_0_set_pageflip_interrupt_state, 3502 .process = dce_v8_0_pageflip_irq, 3503 }; 3504 3505 static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = { 3506 .set = dce_v8_0_set_hpd_interrupt_state, 3507 .process = dce_v8_0_hpd_irq, 3508 }; 3509 3510 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev) 3511 { 3512 if (adev->mode_info.num_crtc > 0) 3513 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; 3514 else 3515 adev->crtc_irq.num_types = 0; 3516 adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs; 3517 3518 adev->pageflip_irq.num_types = adev->mode_info.num_crtc; 3519 adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs; 3520 3521 adev->hpd_irq.num_types = adev->mode_info.num_hpd; 3522 adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs; 3523 } 3524 3525 const struct amdgpu_ip_block_version dce_v8_0_ip_block = 3526 { 3527 .type = AMD_IP_BLOCK_TYPE_DCE, 3528 .major = 8, 3529 .minor = 0, 3530 .rev = 0, 3531 .funcs = &dce_v8_0_ip_funcs, 3532 }; 3533 3534 const struct amdgpu_ip_block_version dce_v8_1_ip_block = 3535 { 3536 .type = AMD_IP_BLOCK_TYPE_DCE, 3537 .major = 8, 3538 .minor = 1, 3539 .rev = 0, 3540 .funcs = &dce_v8_0_ip_funcs, 3541 }; 3542 3543 const struct amdgpu_ip_block_version dce_v8_2_ip_block = 3544 { 3545 .type = AMD_IP_BLOCK_TYPE_DCE, 3546 .major = 8, 3547 .minor = 2, 3548 .rev = 0, 3549 .funcs = &dce_v8_0_ip_funcs, 3550 }; 3551 3552 const struct amdgpu_ip_block_version dce_v8_3_ip_block = 3553 { 3554 .type = AMD_IP_BLOCK_TYPE_DCE, 3555 .major = 8, 3556 .minor = 3, 3557 .rev = 0, 3558 .funcs = &dce_v8_0_ip_funcs, 3559 }; 3560 3561 const struct amdgpu_ip_block_version dce_v8_5_ip_block = 3562 { 3563 .type = AMD_IP_BLOCK_TYPE_DCE, 3564 .major = 8, 3565 .minor = 5, 3566 .rev = 0, 3567 .funcs = &dce_v8_0_ip_funcs, 3568 }; 3569