1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/pci.h> 25 26 #include <drm/drm_fourcc.h> 27 #include <drm/drm_vblank.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_pm.h" 31 #include "amdgpu_i2c.h" 32 #include "atom.h" 33 #include "amdgpu_atombios.h" 34 #include "atombios_crtc.h" 35 #include "atombios_encoders.h" 36 #include "amdgpu_pll.h" 37 #include "amdgpu_connectors.h" 38 #include "amdgpu_display.h" 39 40 #include "bif/bif_3_0_d.h" 41 #include "bif/bif_3_0_sh_mask.h" 42 #include "oss/oss_1_0_d.h" 43 #include "oss/oss_1_0_sh_mask.h" 44 #include "gca/gfx_6_0_d.h" 45 #include "gca/gfx_6_0_sh_mask.h" 46 #include "gmc/gmc_6_0_d.h" 47 #include "gmc/gmc_6_0_sh_mask.h" 48 #include "dce/dce_6_0_d.h" 49 #include "dce/dce_6_0_sh_mask.h" 50 #include "gca/gfx_7_2_enum.h" 51 #include "dce_v6_0.h" 52 #include "si_enums.h" 53 54 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev); 55 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev); 56 57 static const u32 crtc_offsets[6] = 58 { 59 SI_CRTC0_REGISTER_OFFSET, 60 SI_CRTC1_REGISTER_OFFSET, 61 SI_CRTC2_REGISTER_OFFSET, 62 SI_CRTC3_REGISTER_OFFSET, 63 SI_CRTC4_REGISTER_OFFSET, 64 SI_CRTC5_REGISTER_OFFSET 65 }; 66 67 static const u32 hpd_offsets[] = 68 { 69 mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS, 70 mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS, 71 mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS, 72 mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS, 73 mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS, 74 mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS, 75 }; 76 77 static const uint32_t dig_offsets[] = { 78 SI_CRTC0_REGISTER_OFFSET, 79 SI_CRTC1_REGISTER_OFFSET, 80 SI_CRTC2_REGISTER_OFFSET, 81 SI_CRTC3_REGISTER_OFFSET, 82 SI_CRTC4_REGISTER_OFFSET, 83 SI_CRTC5_REGISTER_OFFSET, 84 (0x13830 - 0x7030) >> 2, 85 }; 86 87 static const struct { 88 uint32_t reg; 89 uint32_t vblank; 90 uint32_t vline; 91 uint32_t hpd; 92 93 } interrupt_status_offsets[6] = { { 94 .reg = mmDISP_INTERRUPT_STATUS, 95 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, 96 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, 97 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 98 }, { 99 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, 100 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, 101 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, 102 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 103 }, { 104 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, 105 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, 106 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, 107 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 108 }, { 109 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, 110 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, 111 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, 112 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 113 }, { 114 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, 115 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, 116 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, 117 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 118 }, { 119 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, 120 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, 121 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, 122 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 123 } }; 124 125 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev, 126 u32 block_offset, u32 reg) 127 { 128 unsigned long flags; 129 u32 r; 130 131 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 132 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 133 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); 134 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 135 136 return r; 137 } 138 139 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev, 140 u32 block_offset, u32 reg, u32 v) 141 { 142 unsigned long flags; 143 144 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 145 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, 146 reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK); 147 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); 148 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 149 } 150 151 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 152 { 153 if (crtc >= adev->mode_info.num_crtc) 154 return 0; 155 else 156 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 157 } 158 159 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev) 160 { 161 unsigned i; 162 163 /* Enable pflip interrupts */ 164 for (i = 0; i < adev->mode_info.num_crtc; i++) 165 amdgpu_irq_get(adev, &adev->pageflip_irq, i); 166 } 167 168 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev) 169 { 170 unsigned i; 171 172 /* Disable pflip interrupts */ 173 for (i = 0; i < adev->mode_info.num_crtc; i++) 174 amdgpu_irq_put(adev, &adev->pageflip_irq, i); 175 } 176 177 /** 178 * dce_v6_0_page_flip - pageflip callback. 179 * 180 * @adev: amdgpu_device pointer 181 * @crtc_id: crtc to cleanup pageflip on 182 * @crtc_base: new address of the crtc (GPU MC address) 183 * 184 * Does the actual pageflip (evergreen+). 185 * During vblank we take the crtc lock and wait for the update_pending 186 * bit to go high, when it does, we release the lock, and allow the 187 * double buffered update to take place. 188 * Returns the current update pending status. 189 */ 190 static void dce_v6_0_page_flip(struct amdgpu_device *adev, 191 int crtc_id, u64 crtc_base, bool async) 192 { 193 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 194 struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb; 195 196 /* flip at hsync for async, default is vsync */ 197 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ? 198 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0); 199 /* update pitch */ 200 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, 201 fb->pitches[0] / fb->format->cpp[0]); 202 /* update the scanout addresses */ 203 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 204 upper_32_bits(crtc_base)); 205 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 206 (u32)crtc_base); 207 208 /* post the write */ 209 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); 210 } 211 212 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 213 u32 *vbl, u32 *position) 214 { 215 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 216 return -EINVAL; 217 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); 218 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 219 220 return 0; 221 222 } 223 224 /** 225 * dce_v6_0_hpd_sense - hpd sense callback. 226 * 227 * @adev: amdgpu_device pointer 228 * @hpd: hpd (hotplug detect) pin 229 * 230 * Checks if a digital monitor is connected (evergreen+). 231 * Returns true if connected, false if not connected. 232 */ 233 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev, 234 enum amdgpu_hpd_id hpd) 235 { 236 bool connected = false; 237 238 if (hpd >= adev->mode_info.num_hpd) 239 return connected; 240 241 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK) 242 connected = true; 243 244 return connected; 245 } 246 247 /** 248 * dce_v6_0_hpd_set_polarity - hpd set polarity callback. 249 * 250 * @adev: amdgpu_device pointer 251 * @hpd: hpd (hotplug detect) pin 252 * 253 * Set the polarity of the hpd pin (evergreen+). 254 */ 255 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev, 256 enum amdgpu_hpd_id hpd) 257 { 258 u32 tmp; 259 bool connected = dce_v6_0_hpd_sense(adev, hpd); 260 261 if (hpd >= adev->mode_info.num_hpd) 262 return; 263 264 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 265 if (connected) 266 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 267 else 268 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK; 269 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 270 } 271 272 /** 273 * dce_v6_0_hpd_init - hpd setup callback. 274 * 275 * @adev: amdgpu_device pointer 276 * 277 * Setup the hpd pins used by the card (evergreen+). 278 * Enable the pin, set the polarity, and enable the hpd interrupts. 279 */ 280 static void dce_v6_0_hpd_init(struct amdgpu_device *adev) 281 { 282 struct drm_device *dev = adev->ddev; 283 struct drm_connector *connector; 284 u32 tmp; 285 286 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 287 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 288 289 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 290 continue; 291 292 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 293 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK; 294 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 295 296 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 297 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 298 /* don't try to enable hpd on eDP or LVDS avoid breaking the 299 * aux dp channel on imac and help (but not completely fix) 300 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 301 * also avoid interrupt storms during dpms. 302 */ 303 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 304 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK; 305 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 306 continue; 307 } 308 309 dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); 310 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 311 } 312 313 } 314 315 /** 316 * dce_v6_0_hpd_fini - hpd tear down callback. 317 * 318 * @adev: amdgpu_device pointer 319 * 320 * Tear down the hpd pins used by the card (evergreen+). 321 * Disable the hpd interrupts. 322 */ 323 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) 324 { 325 struct drm_device *dev = adev->ddev; 326 struct drm_connector *connector; 327 u32 tmp; 328 329 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 330 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 331 332 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 333 continue; 334 335 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 336 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK; 337 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0); 338 339 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 340 } 341 } 342 343 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev) 344 { 345 return mmDC_GPIO_HPD_A; 346 } 347 348 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev, 349 bool render) 350 { 351 if (!render) 352 WREG32(mmVGA_RENDER_CONTROL, 353 RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL); 354 355 } 356 357 static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev) 358 { 359 switch (adev->asic_type) { 360 case CHIP_TAHITI: 361 case CHIP_PITCAIRN: 362 case CHIP_VERDE: 363 return 6; 364 case CHIP_OLAND: 365 return 2; 366 default: 367 return 0; 368 } 369 } 370 371 void dce_v6_0_disable_dce(struct amdgpu_device *adev) 372 { 373 /*Disable VGA render and enabled crtc, if has DCE engine*/ 374 if (amdgpu_atombios_has_dce_engine_info(adev)) { 375 u32 tmp; 376 int crtc_enabled, i; 377 378 dce_v6_0_set_vga_render_state(adev, false); 379 380 /*Disable crtc*/ 381 for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) { 382 crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & 383 CRTC_CONTROL__CRTC_MASTER_EN_MASK; 384 if (crtc_enabled) { 385 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 386 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 387 tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK; 388 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); 389 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 390 } 391 } 392 } 393 } 394 395 static void dce_v6_0_program_fmt(struct drm_encoder *encoder) 396 { 397 398 struct drm_device *dev = encoder->dev; 399 struct amdgpu_device *adev = dev->dev_private; 400 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 401 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 402 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 403 int bpc = 0; 404 u32 tmp = 0; 405 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; 406 407 if (connector) { 408 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 409 bpc = amdgpu_connector_get_monitor_bpc(connector); 410 dither = amdgpu_connector->dither; 411 } 412 413 /* LVDS FMT is set up by atom */ 414 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) 415 return; 416 417 if (bpc == 0) 418 return; 419 420 421 switch (bpc) { 422 case 6: 423 if (dither == AMDGPU_FMT_DITHER_ENABLE) 424 /* XXX sort out optimal dither settings */ 425 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 426 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 427 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK); 428 else 429 tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK; 430 break; 431 case 8: 432 if (dither == AMDGPU_FMT_DITHER_ENABLE) 433 /* XXX sort out optimal dither settings */ 434 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK | 435 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK | 436 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK | 437 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK | 438 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK); 439 else 440 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK | 441 FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK); 442 break; 443 case 10: 444 default: 445 /* not needed */ 446 break; 447 } 448 449 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 450 } 451 452 /** 453 * cik_get_number_of_dram_channels - get the number of dram channels 454 * 455 * @adev: amdgpu_device pointer 456 * 457 * Look up the number of video ram channels (CIK). 458 * Used for display watermark bandwidth calculations 459 * Returns the number of dram channels 460 */ 461 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev) 462 { 463 u32 tmp = RREG32(mmMC_SHARED_CHMAP); 464 465 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { 466 case 0: 467 default: 468 return 1; 469 case 1: 470 return 2; 471 case 2: 472 return 4; 473 case 3: 474 return 8; 475 case 4: 476 return 3; 477 case 5: 478 return 6; 479 case 6: 480 return 10; 481 case 7: 482 return 12; 483 case 8: 484 return 16; 485 } 486 } 487 488 struct dce6_wm_params { 489 u32 dram_channels; /* number of dram channels */ 490 u32 yclk; /* bandwidth per dram data pin in kHz */ 491 u32 sclk; /* engine clock in kHz */ 492 u32 disp_clk; /* display clock in kHz */ 493 u32 src_width; /* viewport width */ 494 u32 active_time; /* active display time in ns */ 495 u32 blank_time; /* blank time in ns */ 496 bool interlaced; /* mode is interlaced */ 497 fixed20_12 vsc; /* vertical scale ratio */ 498 u32 num_heads; /* number of active crtcs */ 499 u32 bytes_per_pixel; /* bytes per pixel display + overlay */ 500 u32 lb_size; /* line buffer allocated to pipe */ 501 u32 vtaps; /* vertical scaler taps */ 502 }; 503 504 /** 505 * dce_v6_0_dram_bandwidth - get the dram bandwidth 506 * 507 * @wm: watermark calculation data 508 * 509 * Calculate the raw dram bandwidth (CIK). 510 * Used for display watermark bandwidth calculations 511 * Returns the dram bandwidth in MBytes/s 512 */ 513 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm) 514 { 515 /* Calculate raw DRAM Bandwidth */ 516 fixed20_12 dram_efficiency; /* 0.7 */ 517 fixed20_12 yclk, dram_channels, bandwidth; 518 fixed20_12 a; 519 520 a.full = dfixed_const(1000); 521 yclk.full = dfixed_const(wm->yclk); 522 yclk.full = dfixed_div(yclk, a); 523 dram_channels.full = dfixed_const(wm->dram_channels * 4); 524 a.full = dfixed_const(10); 525 dram_efficiency.full = dfixed_const(7); 526 dram_efficiency.full = dfixed_div(dram_efficiency, a); 527 bandwidth.full = dfixed_mul(dram_channels, yclk); 528 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); 529 530 return dfixed_trunc(bandwidth); 531 } 532 533 /** 534 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display 535 * 536 * @wm: watermark calculation data 537 * 538 * Calculate the dram bandwidth used for display (CIK). 539 * Used for display watermark bandwidth calculations 540 * Returns the dram bandwidth for display in MBytes/s 541 */ 542 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm) 543 { 544 /* Calculate DRAM Bandwidth and the part allocated to display. */ 545 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ 546 fixed20_12 yclk, dram_channels, bandwidth; 547 fixed20_12 a; 548 549 a.full = dfixed_const(1000); 550 yclk.full = dfixed_const(wm->yclk); 551 yclk.full = dfixed_div(yclk, a); 552 dram_channels.full = dfixed_const(wm->dram_channels * 4); 553 a.full = dfixed_const(10); 554 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ 555 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); 556 bandwidth.full = dfixed_mul(dram_channels, yclk); 557 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); 558 559 return dfixed_trunc(bandwidth); 560 } 561 562 /** 563 * dce_v6_0_data_return_bandwidth - get the data return bandwidth 564 * 565 * @wm: watermark calculation data 566 * 567 * Calculate the data return bandwidth used for display (CIK). 568 * Used for display watermark bandwidth calculations 569 * Returns the data return bandwidth in MBytes/s 570 */ 571 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm) 572 { 573 /* Calculate the display Data return Bandwidth */ 574 fixed20_12 return_efficiency; /* 0.8 */ 575 fixed20_12 sclk, bandwidth; 576 fixed20_12 a; 577 578 a.full = dfixed_const(1000); 579 sclk.full = dfixed_const(wm->sclk); 580 sclk.full = dfixed_div(sclk, a); 581 a.full = dfixed_const(10); 582 return_efficiency.full = dfixed_const(8); 583 return_efficiency.full = dfixed_div(return_efficiency, a); 584 a.full = dfixed_const(32); 585 bandwidth.full = dfixed_mul(a, sclk); 586 bandwidth.full = dfixed_mul(bandwidth, return_efficiency); 587 588 return dfixed_trunc(bandwidth); 589 } 590 591 /** 592 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth 593 * 594 * @wm: watermark calculation data 595 * 596 * Calculate the dmif bandwidth used for display (CIK). 597 * Used for display watermark bandwidth calculations 598 * Returns the dmif bandwidth in MBytes/s 599 */ 600 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm) 601 { 602 /* Calculate the DMIF Request Bandwidth */ 603 fixed20_12 disp_clk_request_efficiency; /* 0.8 */ 604 fixed20_12 disp_clk, bandwidth; 605 fixed20_12 a, b; 606 607 a.full = dfixed_const(1000); 608 disp_clk.full = dfixed_const(wm->disp_clk); 609 disp_clk.full = dfixed_div(disp_clk, a); 610 a.full = dfixed_const(32); 611 b.full = dfixed_mul(a, disp_clk); 612 613 a.full = dfixed_const(10); 614 disp_clk_request_efficiency.full = dfixed_const(8); 615 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); 616 617 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); 618 619 return dfixed_trunc(bandwidth); 620 } 621 622 /** 623 * dce_v6_0_available_bandwidth - get the min available bandwidth 624 * 625 * @wm: watermark calculation data 626 * 627 * Calculate the min available bandwidth used for display (CIK). 628 * Used for display watermark bandwidth calculations 629 * Returns the min available bandwidth in MBytes/s 630 */ 631 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm) 632 { 633 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ 634 u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm); 635 u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm); 636 u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm); 637 638 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); 639 } 640 641 /** 642 * dce_v6_0_average_bandwidth - get the average available bandwidth 643 * 644 * @wm: watermark calculation data 645 * 646 * Calculate the average available bandwidth used for display (CIK). 647 * Used for display watermark bandwidth calculations 648 * Returns the average available bandwidth in MBytes/s 649 */ 650 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm) 651 { 652 /* Calculate the display mode Average Bandwidth 653 * DisplayMode should contain the source and destination dimensions, 654 * timing, etc. 655 */ 656 fixed20_12 bpp; 657 fixed20_12 line_time; 658 fixed20_12 src_width; 659 fixed20_12 bandwidth; 660 fixed20_12 a; 661 662 a.full = dfixed_const(1000); 663 line_time.full = dfixed_const(wm->active_time + wm->blank_time); 664 line_time.full = dfixed_div(line_time, a); 665 bpp.full = dfixed_const(wm->bytes_per_pixel); 666 src_width.full = dfixed_const(wm->src_width); 667 bandwidth.full = dfixed_mul(src_width, bpp); 668 bandwidth.full = dfixed_mul(bandwidth, wm->vsc); 669 bandwidth.full = dfixed_div(bandwidth, line_time); 670 671 return dfixed_trunc(bandwidth); 672 } 673 674 /** 675 * dce_v6_0_latency_watermark - get the latency watermark 676 * 677 * @wm: watermark calculation data 678 * 679 * Calculate the latency watermark (CIK). 680 * Used for display watermark bandwidth calculations 681 * Returns the latency watermark in ns 682 */ 683 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm) 684 { 685 /* First calculate the latency in ns */ 686 u32 mc_latency = 2000; /* 2000 ns. */ 687 u32 available_bandwidth = dce_v6_0_available_bandwidth(wm); 688 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; 689 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; 690 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ 691 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + 692 (wm->num_heads * cursor_line_pair_return_time); 693 u32 latency = mc_latency + other_heads_data_return_time + dc_latency; 694 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; 695 u32 tmp, dmif_size = 12288; 696 fixed20_12 a, b, c; 697 698 if (wm->num_heads == 0) 699 return 0; 700 701 a.full = dfixed_const(2); 702 b.full = dfixed_const(1); 703 if ((wm->vsc.full > a.full) || 704 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || 705 (wm->vtaps >= 5) || 706 ((wm->vsc.full >= a.full) && wm->interlaced)) 707 max_src_lines_per_dst_line = 4; 708 else 709 max_src_lines_per_dst_line = 2; 710 711 a.full = dfixed_const(available_bandwidth); 712 b.full = dfixed_const(wm->num_heads); 713 a.full = dfixed_div(a, b); 714 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); 715 tmp = min(dfixed_trunc(a), tmp); 716 717 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); 718 719 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 720 b.full = dfixed_const(1000); 721 c.full = dfixed_const(lb_fill_bw); 722 b.full = dfixed_div(c, b); 723 a.full = dfixed_div(a, b); 724 line_fill_time = dfixed_trunc(a); 725 726 if (line_fill_time < wm->active_time) 727 return latency; 728 else 729 return latency + (line_fill_time - wm->active_time); 730 731 } 732 733 /** 734 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check 735 * average and available dram bandwidth 736 * 737 * @wm: watermark calculation data 738 * 739 * Check if the display average bandwidth fits in the display 740 * dram bandwidth (CIK). 741 * Used for display watermark bandwidth calculations 742 * Returns true if the display fits, false if not. 743 */ 744 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm) 745 { 746 if (dce_v6_0_average_bandwidth(wm) <= 747 (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads)) 748 return true; 749 else 750 return false; 751 } 752 753 /** 754 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check 755 * average and available bandwidth 756 * 757 * @wm: watermark calculation data 758 * 759 * Check if the display average bandwidth fits in the display 760 * available bandwidth (CIK). 761 * Used for display watermark bandwidth calculations 762 * Returns true if the display fits, false if not. 763 */ 764 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm) 765 { 766 if (dce_v6_0_average_bandwidth(wm) <= 767 (dce_v6_0_available_bandwidth(wm) / wm->num_heads)) 768 return true; 769 else 770 return false; 771 } 772 773 /** 774 * dce_v6_0_check_latency_hiding - check latency hiding 775 * 776 * @wm: watermark calculation data 777 * 778 * Check latency hiding (CIK). 779 * Used for display watermark bandwidth calculations 780 * Returns true if the display fits, false if not. 781 */ 782 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm) 783 { 784 u32 lb_partitions = wm->lb_size / wm->src_width; 785 u32 line_time = wm->active_time + wm->blank_time; 786 u32 latency_tolerant_lines; 787 u32 latency_hiding; 788 fixed20_12 a; 789 790 a.full = dfixed_const(1); 791 if (wm->vsc.full > a.full) 792 latency_tolerant_lines = 1; 793 else { 794 if (lb_partitions <= (wm->vtaps + 1)) 795 latency_tolerant_lines = 1; 796 else 797 latency_tolerant_lines = 2; 798 } 799 800 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); 801 802 if (dce_v6_0_latency_watermark(wm) <= latency_hiding) 803 return true; 804 else 805 return false; 806 } 807 808 /** 809 * dce_v6_0_program_watermarks - program display watermarks 810 * 811 * @adev: amdgpu_device pointer 812 * @amdgpu_crtc: the selected display controller 813 * @lb_size: line buffer size 814 * @num_heads: number of display controllers in use 815 * 816 * Calculate and program the display watermarks for the 817 * selected display controller (CIK). 818 */ 819 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, 820 struct amdgpu_crtc *amdgpu_crtc, 821 u32 lb_size, u32 num_heads) 822 { 823 struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 824 struct dce6_wm_params wm_low, wm_high; 825 u32 dram_channels; 826 u32 active_time; 827 u32 line_time = 0; 828 u32 latency_watermark_a = 0, latency_watermark_b = 0; 829 u32 priority_a_mark = 0, priority_b_mark = 0; 830 u32 priority_a_cnt = PRIORITY_OFF; 831 u32 priority_b_cnt = PRIORITY_OFF; 832 u32 tmp, arb_control3, lb_vblank_lead_lines = 0; 833 fixed20_12 a, b, c; 834 835 if (amdgpu_crtc->base.enabled && num_heads && mode) { 836 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 837 (u32)mode->clock); 838 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 839 (u32)mode->clock); 840 line_time = min(line_time, (u32)65535); 841 priority_a_cnt = 0; 842 priority_b_cnt = 0; 843 844 dram_channels = si_get_number_of_dram_channels(adev); 845 846 /* watermark for high clocks */ 847 if (adev->pm.dpm_enabled) { 848 wm_high.yclk = 849 amdgpu_dpm_get_mclk(adev, false) * 10; 850 wm_high.sclk = 851 amdgpu_dpm_get_sclk(adev, false) * 10; 852 } else { 853 wm_high.yclk = adev->pm.current_mclk * 10; 854 wm_high.sclk = adev->pm.current_sclk * 10; 855 } 856 857 wm_high.disp_clk = mode->clock; 858 wm_high.src_width = mode->crtc_hdisplay; 859 wm_high.active_time = active_time; 860 wm_high.blank_time = line_time - wm_high.active_time; 861 wm_high.interlaced = false; 862 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 863 wm_high.interlaced = true; 864 wm_high.vsc = amdgpu_crtc->vsc; 865 wm_high.vtaps = 1; 866 if (amdgpu_crtc->rmx_type != RMX_OFF) 867 wm_high.vtaps = 2; 868 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ 869 wm_high.lb_size = lb_size; 870 wm_high.dram_channels = dram_channels; 871 wm_high.num_heads = num_heads; 872 873 if (adev->pm.dpm_enabled) { 874 /* watermark for low clocks */ 875 wm_low.yclk = 876 amdgpu_dpm_get_mclk(adev, true) * 10; 877 wm_low.sclk = 878 amdgpu_dpm_get_sclk(adev, true) * 10; 879 } else { 880 wm_low.yclk = adev->pm.current_mclk * 10; 881 wm_low.sclk = adev->pm.current_sclk * 10; 882 } 883 884 wm_low.disp_clk = mode->clock; 885 wm_low.src_width = mode->crtc_hdisplay; 886 wm_low.active_time = active_time; 887 wm_low.blank_time = line_time - wm_low.active_time; 888 wm_low.interlaced = false; 889 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 890 wm_low.interlaced = true; 891 wm_low.vsc = amdgpu_crtc->vsc; 892 wm_low.vtaps = 1; 893 if (amdgpu_crtc->rmx_type != RMX_OFF) 894 wm_low.vtaps = 2; 895 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ 896 wm_low.lb_size = lb_size; 897 wm_low.dram_channels = dram_channels; 898 wm_low.num_heads = num_heads; 899 900 /* set for high clocks */ 901 latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535); 902 /* set for low clocks */ 903 latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535); 904 905 /* possibly force display priority to high */ 906 /* should really do this at mode validation time... */ 907 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || 908 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) || 909 !dce_v6_0_check_latency_hiding(&wm_high) || 910 (adev->mode_info.disp_priority == 2)) { 911 DRM_DEBUG_KMS("force priority to high\n"); 912 priority_a_cnt |= PRIORITY_ALWAYS_ON; 913 priority_b_cnt |= PRIORITY_ALWAYS_ON; 914 } 915 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || 916 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) || 917 !dce_v6_0_check_latency_hiding(&wm_low) || 918 (adev->mode_info.disp_priority == 2)) { 919 DRM_DEBUG_KMS("force priority to high\n"); 920 priority_a_cnt |= PRIORITY_ALWAYS_ON; 921 priority_b_cnt |= PRIORITY_ALWAYS_ON; 922 } 923 924 a.full = dfixed_const(1000); 925 b.full = dfixed_const(mode->clock); 926 b.full = dfixed_div(b, a); 927 c.full = dfixed_const(latency_watermark_a); 928 c.full = dfixed_mul(c, b); 929 c.full = dfixed_mul(c, amdgpu_crtc->hsc); 930 c.full = dfixed_div(c, a); 931 a.full = dfixed_const(16); 932 c.full = dfixed_div(c, a); 933 priority_a_mark = dfixed_trunc(c); 934 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK; 935 936 a.full = dfixed_const(1000); 937 b.full = dfixed_const(mode->clock); 938 b.full = dfixed_div(b, a); 939 c.full = dfixed_const(latency_watermark_b); 940 c.full = dfixed_mul(c, b); 941 c.full = dfixed_mul(c, amdgpu_crtc->hsc); 942 c.full = dfixed_div(c, a); 943 a.full = dfixed_const(16); 944 c.full = dfixed_div(c, a); 945 priority_b_mark = dfixed_trunc(c); 946 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; 947 948 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); 949 } 950 951 /* select wm A */ 952 arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); 953 tmp = arb_control3; 954 tmp &= ~LATENCY_WATERMARK_MASK(3); 955 tmp |= LATENCY_WATERMARK_MASK(1); 956 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); 957 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 958 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 959 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 960 /* select wm B */ 961 tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset); 962 tmp &= ~LATENCY_WATERMARK_MASK(3); 963 tmp |= LATENCY_WATERMARK_MASK(2); 964 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp); 965 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, 966 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) | 967 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT))); 968 /* restore original selection */ 969 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3); 970 971 /* write the priority marks */ 972 WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt); 973 WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt); 974 975 /* save values for DPM */ 976 amdgpu_crtc->line_time = line_time; 977 amdgpu_crtc->wm_high = latency_watermark_a; 978 979 /* Save number of lines the linebuffer leads before the scanout */ 980 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; 981 } 982 983 /* watermark setup */ 984 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev, 985 struct amdgpu_crtc *amdgpu_crtc, 986 struct drm_display_mode *mode, 987 struct drm_display_mode *other_mode) 988 { 989 u32 tmp, buffer_alloc, i; 990 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8; 991 /* 992 * Line Buffer Setup 993 * There are 3 line buffers, each one shared by 2 display controllers. 994 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between 995 * the display controllers. The paritioning is done via one of four 996 * preset allocations specified in bits 21:20: 997 * 0 - half lb 998 * 2 - whole lb, other crtc must be disabled 999 */ 1000 /* this can get tricky if we have two large displays on a paired group 1001 * of crtcs. Ideally for multiple large displays we'd assign them to 1002 * non-linked crtcs for maximum line buffer allocation. 1003 */ 1004 if (amdgpu_crtc->base.enabled && mode) { 1005 if (other_mode) { 1006 tmp = 0; /* 1/2 */ 1007 buffer_alloc = 1; 1008 } else { 1009 tmp = 2; /* whole */ 1010 buffer_alloc = 2; 1011 } 1012 } else { 1013 tmp = 0; 1014 buffer_alloc = 0; 1015 } 1016 1017 WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset, 1018 DC_LB_MEMORY_CONFIG(tmp)); 1019 1020 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, 1021 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT)); 1022 for (i = 0; i < adev->usec_timeout; i++) { 1023 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & 1024 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK) 1025 break; 1026 udelay(1); 1027 } 1028 1029 if (amdgpu_crtc->base.enabled && mode) { 1030 switch (tmp) { 1031 case 0: 1032 default: 1033 return 4096 * 2; 1034 case 2: 1035 return 8192 * 2; 1036 } 1037 } 1038 1039 /* controller not enabled, so no lb used */ 1040 return 0; 1041 } 1042 1043 1044 /** 1045 * 1046 * dce_v6_0_bandwidth_update - program display watermarks 1047 * 1048 * @adev: amdgpu_device pointer 1049 * 1050 * Calculate and program the display watermarks and line 1051 * buffer allocation (CIK). 1052 */ 1053 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev) 1054 { 1055 struct drm_display_mode *mode0 = NULL; 1056 struct drm_display_mode *mode1 = NULL; 1057 u32 num_heads = 0, lb_size; 1058 int i; 1059 1060 if (!adev->mode_info.mode_config_initialized) 1061 return; 1062 1063 amdgpu_display_update_priority(adev); 1064 1065 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1066 if (adev->mode_info.crtcs[i]->base.enabled) 1067 num_heads++; 1068 } 1069 for (i = 0; i < adev->mode_info.num_crtc; i += 2) { 1070 mode0 = &adev->mode_info.crtcs[i]->base.mode; 1071 mode1 = &adev->mode_info.crtcs[i+1]->base.mode; 1072 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1); 1073 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads); 1074 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0); 1075 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads); 1076 } 1077 } 1078 1079 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev) 1080 { 1081 int i; 1082 u32 tmp; 1083 1084 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1085 tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset, 1086 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); 1087 if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT, 1088 PORT_CONNECTIVITY)) 1089 adev->mode_info.audio.pin[i].connected = false; 1090 else 1091 adev->mode_info.audio.pin[i].connected = true; 1092 } 1093 1094 } 1095 1096 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev) 1097 { 1098 int i; 1099 1100 dce_v6_0_audio_get_connected_pins(adev); 1101 1102 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1103 if (adev->mode_info.audio.pin[i].connected) 1104 return &adev->mode_info.audio.pin[i]; 1105 } 1106 DRM_ERROR("No connected audio pins found!\n"); 1107 return NULL; 1108 } 1109 1110 static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder) 1111 { 1112 struct amdgpu_device *adev = encoder->dev->dev_private; 1113 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1114 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1115 1116 if (!dig || !dig->afmt || !dig->afmt->pin) 1117 return; 1118 1119 WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, 1120 REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, 1121 dig->afmt->pin->id)); 1122 } 1123 1124 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, 1125 struct drm_display_mode *mode) 1126 { 1127 struct amdgpu_device *adev = encoder->dev->dev_private; 1128 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1129 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1130 struct drm_connector *connector; 1131 struct amdgpu_connector *amdgpu_connector = NULL; 1132 int interlace = 0; 1133 u32 tmp; 1134 1135 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1136 if (connector->encoder == encoder) { 1137 amdgpu_connector = to_amdgpu_connector(connector); 1138 break; 1139 } 1140 } 1141 1142 if (!amdgpu_connector) { 1143 DRM_ERROR("Couldn't find encoder's connector\n"); 1144 return; 1145 } 1146 1147 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1148 interlace = 1; 1149 1150 if (connector->latency_present[interlace]) { 1151 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1152 VIDEO_LIPSYNC, connector->video_latency[interlace]); 1153 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1154 AUDIO_LIPSYNC, connector->audio_latency[interlace]); 1155 } else { 1156 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1157 VIDEO_LIPSYNC, 0); 1158 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1159 AUDIO_LIPSYNC, 0); 1160 } 1161 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1162 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 1163 } 1164 1165 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) 1166 { 1167 struct amdgpu_device *adev = encoder->dev->dev_private; 1168 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1169 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1170 struct drm_connector *connector; 1171 struct amdgpu_connector *amdgpu_connector = NULL; 1172 u8 *sadb = NULL; 1173 int sad_count; 1174 u32 tmp; 1175 1176 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1177 if (connector->encoder == encoder) { 1178 amdgpu_connector = to_amdgpu_connector(connector); 1179 break; 1180 } 1181 } 1182 1183 if (!amdgpu_connector) { 1184 DRM_ERROR("Couldn't find encoder's connector\n"); 1185 return; 1186 } 1187 1188 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb); 1189 if (sad_count < 0) { 1190 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 1191 sad_count = 0; 1192 } 1193 1194 /* program the speaker allocation */ 1195 tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1196 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 1197 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1198 HDMI_CONNECTION, 0); 1199 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1200 DP_CONNECTION, 0); 1201 1202 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) 1203 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1204 DP_CONNECTION, 1); 1205 else 1206 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1207 HDMI_CONNECTION, 1); 1208 1209 if (sad_count) 1210 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1211 SPEAKER_ALLOCATION, sadb[0]); 1212 else 1213 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1214 SPEAKER_ALLOCATION, 5); /* stereo */ 1215 1216 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1217 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 1218 1219 kfree(sadb); 1220 } 1221 1222 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) 1223 { 1224 struct amdgpu_device *adev = encoder->dev->dev_private; 1225 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1226 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1227 struct drm_connector *connector; 1228 struct amdgpu_connector *amdgpu_connector = NULL; 1229 struct cea_sad *sads; 1230 int i, sad_count; 1231 1232 static const u16 eld_reg_to_type[][2] = { 1233 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, 1234 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, 1235 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, 1236 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, 1237 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, 1238 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, 1239 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, 1240 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, 1241 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, 1242 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, 1243 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, 1244 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 1245 }; 1246 1247 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1248 if (connector->encoder == encoder) { 1249 amdgpu_connector = to_amdgpu_connector(connector); 1250 break; 1251 } 1252 } 1253 1254 if (!amdgpu_connector) { 1255 DRM_ERROR("Couldn't find encoder's connector\n"); 1256 return; 1257 } 1258 1259 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); 1260 if (sad_count <= 0) { 1261 DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 1262 return; 1263 } 1264 1265 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 1266 u32 tmp = 0; 1267 u8 stereo_freqs = 0; 1268 int max_channels = -1; 1269 int j; 1270 1271 for (j = 0; j < sad_count; j++) { 1272 struct cea_sad *sad = &sads[j]; 1273 1274 if (sad->format == eld_reg_to_type[i][1]) { 1275 if (sad->channels > max_channels) { 1276 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1277 MAX_CHANNELS, sad->channels); 1278 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1279 DESCRIPTOR_BYTE_2, sad->byte2); 1280 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1281 SUPPORTED_FREQUENCIES, sad->freq); 1282 max_channels = sad->channels; 1283 } 1284 1285 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 1286 stereo_freqs |= sad->freq; 1287 else 1288 break; 1289 } 1290 } 1291 1292 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1293 SUPPORTED_FREQUENCIES_STEREO, stereo_freqs); 1294 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp); 1295 } 1296 1297 kfree(sads); 1298 1299 } 1300 1301 static void dce_v6_0_audio_enable(struct amdgpu_device *adev, 1302 struct amdgpu_audio_pin *pin, 1303 bool enable) 1304 { 1305 if (!pin) 1306 return; 1307 1308 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 1309 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); 1310 } 1311 1312 static const u32 pin_offsets[7] = 1313 { 1314 (0x1780 - 0x1780), 1315 (0x1786 - 0x1780), 1316 (0x178c - 0x1780), 1317 (0x1792 - 0x1780), 1318 (0x1798 - 0x1780), 1319 (0x179d - 0x1780), 1320 (0x17a4 - 0x1780), 1321 }; 1322 1323 static int dce_v6_0_audio_init(struct amdgpu_device *adev) 1324 { 1325 int i; 1326 1327 if (!amdgpu_audio) 1328 return 0; 1329 1330 adev->mode_info.audio.enabled = true; 1331 1332 switch (adev->asic_type) { 1333 case CHIP_TAHITI: 1334 case CHIP_PITCAIRN: 1335 case CHIP_VERDE: 1336 default: 1337 adev->mode_info.audio.num_pins = 6; 1338 break; 1339 case CHIP_OLAND: 1340 adev->mode_info.audio.num_pins = 2; 1341 break; 1342 } 1343 1344 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1345 adev->mode_info.audio.pin[i].channels = -1; 1346 adev->mode_info.audio.pin[i].rate = -1; 1347 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1348 adev->mode_info.audio.pin[i].status_bits = 0; 1349 adev->mode_info.audio.pin[i].category_code = 0; 1350 adev->mode_info.audio.pin[i].connected = false; 1351 adev->mode_info.audio.pin[i].offset = pin_offsets[i]; 1352 adev->mode_info.audio.pin[i].id = i; 1353 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1354 } 1355 1356 return 0; 1357 } 1358 1359 static void dce_v6_0_audio_fini(struct amdgpu_device *adev) 1360 { 1361 int i; 1362 1363 if (!amdgpu_audio) 1364 return; 1365 1366 if (!adev->mode_info.audio.enabled) 1367 return; 1368 1369 for (i = 0; i < adev->mode_info.audio.num_pins; i++) 1370 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1371 1372 adev->mode_info.audio.enabled = false; 1373 } 1374 1375 static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder) 1376 { 1377 struct drm_device *dev = encoder->dev; 1378 struct amdgpu_device *adev = dev->dev_private; 1379 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1380 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1381 u32 tmp; 1382 1383 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); 1384 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); 1385 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); 1386 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); 1387 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); 1388 } 1389 1390 static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder, 1391 uint32_t clock, int bpc) 1392 { 1393 struct drm_device *dev = encoder->dev; 1394 struct amdgpu_device *adev = dev->dev_private; 1395 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); 1396 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1397 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1398 u32 tmp; 1399 1400 tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset); 1401 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1); 1402 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1403 bpc > 8 ? 0 : 1); 1404 WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp); 1405 1406 tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset); 1407 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz); 1408 WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp); 1409 tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset); 1410 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz); 1411 WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp); 1412 1413 tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset); 1414 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz); 1415 WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp); 1416 tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset); 1417 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz); 1418 WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp); 1419 1420 tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset); 1421 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz); 1422 WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp); 1423 tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset); 1424 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz); 1425 WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp); 1426 } 1427 1428 static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder, 1429 struct drm_display_mode *mode) 1430 { 1431 struct drm_device *dev = encoder->dev; 1432 struct amdgpu_device *adev = dev->dev_private; 1433 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1434 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1435 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 1436 struct hdmi_avi_infoframe frame; 1437 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 1438 uint8_t *payload = buffer + 3; 1439 uint8_t *header = buffer; 1440 ssize_t err; 1441 u32 tmp; 1442 1443 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); 1444 if (err < 0) { 1445 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1446 return; 1447 } 1448 1449 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); 1450 if (err < 0) { 1451 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); 1452 return; 1453 } 1454 1455 WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset, 1456 payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24)); 1457 WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset, 1458 payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24)); 1459 WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset, 1460 payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24)); 1461 WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset, 1462 payload[0xC] | (payload[0xD] << 8) | (header[1] << 24)); 1463 1464 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1465 /* anything other than 0 */ 1466 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, 1467 HDMI_AUDIO_INFO_LINE, 2); 1468 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1469 } 1470 1471 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) 1472 { 1473 struct drm_device *dev = encoder->dev; 1474 struct amdgpu_device *adev = dev->dev_private; 1475 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1476 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); 1477 u32 tmp; 1478 1479 /* 1480 * Two dtos: generally use dto0 for hdmi, dto1 for dp. 1481 * Express [24MHz / target pixel clock] as an exact rational 1482 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 1483 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 1484 */ 1485 tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE); 1486 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, 1487 DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id); 1488 if (em == ATOM_ENCODER_MODE_HDMI) { 1489 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, 1490 DCCG_AUDIO_DTO_SEL, 0); 1491 } else if (ENCODER_MODE_IS_DP(em)) { 1492 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, 1493 DCCG_AUDIO_DTO_SEL, 1); 1494 } 1495 WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp); 1496 if (em == ATOM_ENCODER_MODE_HDMI) { 1497 WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000); 1498 WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock); 1499 } else if (ENCODER_MODE_IS_DP(em)) { 1500 WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000); 1501 WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock); 1502 } 1503 } 1504 1505 static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder) 1506 { 1507 struct drm_device *dev = encoder->dev; 1508 struct amdgpu_device *adev = dev->dev_private; 1509 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1510 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1511 u32 tmp; 1512 1513 tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset); 1514 tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); 1515 WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1516 1517 tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset); 1518 tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1); 1519 WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp); 1520 1521 tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset); 1522 tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); 1523 WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp); 1524 1525 tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset); 1526 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3); 1527 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4); 1528 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5); 1529 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6); 1530 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7); 1531 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); 1532 WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp); 1533 1534 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset); 1535 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff); 1536 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp); 1537 1538 tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1539 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); 1540 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3); 1541 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1542 1543 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1544 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1); 1545 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); 1546 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1547 } 1548 1549 static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute) 1550 { 1551 struct drm_device *dev = encoder->dev; 1552 struct amdgpu_device *adev = dev->dev_private; 1553 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1554 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1555 u32 tmp; 1556 1557 tmp = RREG32(mmHDMI_GC + dig->afmt->offset); 1558 tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0); 1559 WREG32(mmHDMI_GC + dig->afmt->offset, tmp); 1560 } 1561 1562 static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable) 1563 { 1564 struct drm_device *dev = encoder->dev; 1565 struct amdgpu_device *adev = dev->dev_private; 1566 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1567 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1568 u32 tmp; 1569 1570 if (enable) { 1571 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1572 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1); 1573 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1); 1574 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); 1575 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1); 1576 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1577 1578 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1579 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2); 1580 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1581 1582 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1583 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); 1584 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1585 } else { 1586 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1587 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0); 1588 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0); 1589 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0); 1590 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0); 1591 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1592 1593 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1594 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0); 1595 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1596 } 1597 } 1598 1599 static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable) 1600 { 1601 struct drm_device *dev = encoder->dev; 1602 struct amdgpu_device *adev = dev->dev_private; 1603 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1604 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1605 u32 tmp; 1606 1607 if (enable) { 1608 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1609 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); 1610 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1611 1612 tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset); 1613 tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1); 1614 WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp); 1615 1616 tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset); 1617 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1); 1618 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1); 1619 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1); 1620 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); 1621 WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp); 1622 } else { 1623 WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0); 1624 } 1625 } 1626 1627 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, 1628 struct drm_display_mode *mode) 1629 { 1630 struct drm_device *dev = encoder->dev; 1631 struct amdgpu_device *adev = dev->dev_private; 1632 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1633 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1634 struct drm_connector *connector; 1635 struct amdgpu_connector *amdgpu_connector = NULL; 1636 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); 1637 int bpc = 8; 1638 1639 if (!dig || !dig->afmt) 1640 return; 1641 1642 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 1643 if (connector->encoder == encoder) { 1644 amdgpu_connector = to_amdgpu_connector(connector); 1645 break; 1646 } 1647 } 1648 1649 if (!amdgpu_connector) { 1650 DRM_ERROR("Couldn't find encoder's connector\n"); 1651 return; 1652 } 1653 1654 if (!dig->afmt->enabled) 1655 return; 1656 1657 dig->afmt->pin = dce_v6_0_audio_get_pin(adev); 1658 if (!dig->afmt->pin) 1659 return; 1660 1661 if (encoder->crtc) { 1662 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1663 bpc = amdgpu_crtc->bpc; 1664 } 1665 1666 /* disable audio before setting up hw */ 1667 dce_v6_0_audio_enable(adev, dig->afmt->pin, false); 1668 1669 dce_v6_0_audio_set_mute(encoder, true); 1670 dce_v6_0_audio_write_speaker_allocation(encoder); 1671 dce_v6_0_audio_write_sad_regs(encoder); 1672 dce_v6_0_audio_write_latency_fields(encoder, mode); 1673 if (em == ATOM_ENCODER_MODE_HDMI) { 1674 dce_v6_0_audio_set_dto(encoder, mode->clock); 1675 dce_v6_0_audio_set_vbi_packet(encoder); 1676 dce_v6_0_audio_set_acr(encoder, mode->clock, bpc); 1677 } else if (ENCODER_MODE_IS_DP(em)) { 1678 dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10); 1679 } 1680 dce_v6_0_audio_set_packet(encoder); 1681 dce_v6_0_audio_select_pin(encoder); 1682 dce_v6_0_audio_set_avi_infoframe(encoder, mode); 1683 dce_v6_0_audio_set_mute(encoder, false); 1684 if (em == ATOM_ENCODER_MODE_HDMI) { 1685 dce_v6_0_audio_hdmi_enable(encoder, 1); 1686 } else if (ENCODER_MODE_IS_DP(em)) { 1687 dce_v6_0_audio_dp_enable(encoder, 1); 1688 } 1689 1690 /* enable audio after setting up hw */ 1691 dce_v6_0_audio_enable(adev, dig->afmt->pin, true); 1692 } 1693 1694 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable) 1695 { 1696 struct drm_device *dev = encoder->dev; 1697 struct amdgpu_device *adev = dev->dev_private; 1698 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1699 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1700 1701 if (!dig || !dig->afmt) 1702 return; 1703 1704 /* Silent, r600_hdmi_enable will raise WARN for us */ 1705 if (enable && dig->afmt->enabled) 1706 return; 1707 1708 if (!enable && !dig->afmt->enabled) 1709 return; 1710 1711 if (!enable && dig->afmt->pin) { 1712 dce_v6_0_audio_enable(adev, dig->afmt->pin, false); 1713 dig->afmt->pin = NULL; 1714 } 1715 1716 dig->afmt->enabled = enable; 1717 1718 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", 1719 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); 1720 } 1721 1722 static int dce_v6_0_afmt_init(struct amdgpu_device *adev) 1723 { 1724 int i, j; 1725 1726 for (i = 0; i < adev->mode_info.num_dig; i++) 1727 adev->mode_info.afmt[i] = NULL; 1728 1729 /* DCE6 has audio blocks tied to DIG encoders */ 1730 for (i = 0; i < adev->mode_info.num_dig; i++) { 1731 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); 1732 if (adev->mode_info.afmt[i]) { 1733 adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1734 adev->mode_info.afmt[i]->id = i; 1735 } else { 1736 for (j = 0; j < i; j++) { 1737 kfree(adev->mode_info.afmt[j]); 1738 adev->mode_info.afmt[j] = NULL; 1739 } 1740 DRM_ERROR("Out of memory allocating afmt table\n"); 1741 return -ENOMEM; 1742 } 1743 } 1744 return 0; 1745 } 1746 1747 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev) 1748 { 1749 int i; 1750 1751 for (i = 0; i < adev->mode_info.num_dig; i++) { 1752 kfree(adev->mode_info.afmt[i]); 1753 adev->mode_info.afmt[i] = NULL; 1754 } 1755 } 1756 1757 static const u32 vga_control_regs[6] = 1758 { 1759 mmD1VGA_CONTROL, 1760 mmD2VGA_CONTROL, 1761 mmD3VGA_CONTROL, 1762 mmD4VGA_CONTROL, 1763 mmD5VGA_CONTROL, 1764 mmD6VGA_CONTROL, 1765 }; 1766 1767 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable) 1768 { 1769 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1770 struct drm_device *dev = crtc->dev; 1771 struct amdgpu_device *adev = dev->dev_private; 1772 u32 vga_control; 1773 1774 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; 1775 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0)); 1776 } 1777 1778 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable) 1779 { 1780 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1781 struct drm_device *dev = crtc->dev; 1782 struct amdgpu_device *adev = dev->dev_private; 1783 1784 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0); 1785 } 1786 1787 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, 1788 struct drm_framebuffer *fb, 1789 int x, int y, int atomic) 1790 { 1791 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1792 struct drm_device *dev = crtc->dev; 1793 struct amdgpu_device *adev = dev->dev_private; 1794 struct drm_framebuffer *target_fb; 1795 struct drm_gem_object *obj; 1796 struct amdgpu_bo *abo; 1797 uint64_t fb_location, tiling_flags; 1798 uint32_t fb_format, fb_pitch_pixels, pipe_config; 1799 u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE); 1800 u32 viewport_w, viewport_h; 1801 int r; 1802 bool bypass_lut = false; 1803 struct drm_format_name_buf format_name; 1804 1805 /* no fb bound */ 1806 if (!atomic && !crtc->primary->fb) { 1807 DRM_DEBUG_KMS("No FB bound\n"); 1808 return 0; 1809 } 1810 1811 if (atomic) 1812 target_fb = fb; 1813 else 1814 target_fb = crtc->primary->fb; 1815 1816 /* If atomic, assume fb object is pinned & idle & fenced and 1817 * just update base pointers 1818 */ 1819 obj = target_fb->obj[0]; 1820 abo = gem_to_amdgpu_bo(obj); 1821 r = amdgpu_bo_reserve(abo, false); 1822 if (unlikely(r != 0)) 1823 return r; 1824 1825 if (!atomic) { 1826 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); 1827 if (unlikely(r != 0)) { 1828 amdgpu_bo_unreserve(abo); 1829 return -EINVAL; 1830 } 1831 } 1832 fb_location = amdgpu_bo_gpu_offset(abo); 1833 1834 amdgpu_bo_get_tiling_flags(abo, &tiling_flags); 1835 amdgpu_bo_unreserve(abo); 1836 1837 switch (target_fb->format->format) { 1838 case DRM_FORMAT_C8: 1839 fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) | 1840 GRPH_FORMAT(GRPH_FORMAT_INDEXED)); 1841 break; 1842 case DRM_FORMAT_XRGB4444: 1843 case DRM_FORMAT_ARGB4444: 1844 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | 1845 GRPH_FORMAT(GRPH_FORMAT_ARGB4444)); 1846 #ifdef __BIG_ENDIAN 1847 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); 1848 #endif 1849 break; 1850 case DRM_FORMAT_XRGB1555: 1851 case DRM_FORMAT_ARGB1555: 1852 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | 1853 GRPH_FORMAT(GRPH_FORMAT_ARGB1555)); 1854 #ifdef __BIG_ENDIAN 1855 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); 1856 #endif 1857 break; 1858 case DRM_FORMAT_BGRX5551: 1859 case DRM_FORMAT_BGRA5551: 1860 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | 1861 GRPH_FORMAT(GRPH_FORMAT_BGRA5551)); 1862 #ifdef __BIG_ENDIAN 1863 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); 1864 #endif 1865 break; 1866 case DRM_FORMAT_RGB565: 1867 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) | 1868 GRPH_FORMAT(GRPH_FORMAT_ARGB565)); 1869 #ifdef __BIG_ENDIAN 1870 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16); 1871 #endif 1872 break; 1873 case DRM_FORMAT_XRGB8888: 1874 case DRM_FORMAT_ARGB8888: 1875 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | 1876 GRPH_FORMAT(GRPH_FORMAT_ARGB8888)); 1877 #ifdef __BIG_ENDIAN 1878 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); 1879 #endif 1880 break; 1881 case DRM_FORMAT_XRGB2101010: 1882 case DRM_FORMAT_ARGB2101010: 1883 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | 1884 GRPH_FORMAT(GRPH_FORMAT_ARGB2101010)); 1885 #ifdef __BIG_ENDIAN 1886 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); 1887 #endif 1888 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 1889 bypass_lut = true; 1890 break; 1891 case DRM_FORMAT_BGRX1010102: 1892 case DRM_FORMAT_BGRA1010102: 1893 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | 1894 GRPH_FORMAT(GRPH_FORMAT_BGRA1010102)); 1895 #ifdef __BIG_ENDIAN 1896 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); 1897 #endif 1898 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 1899 bypass_lut = true; 1900 break; 1901 case DRM_FORMAT_XBGR8888: 1902 case DRM_FORMAT_ABGR8888: 1903 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) | 1904 GRPH_FORMAT(GRPH_FORMAT_ARGB8888)); 1905 fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) | 1906 GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R)); 1907 #ifdef __BIG_ENDIAN 1908 fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32); 1909 #endif 1910 break; 1911 default: 1912 DRM_ERROR("Unsupported screen format %s\n", 1913 drm_get_format_name(target_fb->format->format, &format_name)); 1914 return -EINVAL; 1915 } 1916 1917 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { 1918 unsigned bankw, bankh, mtaspect, tile_split, num_banks; 1919 1920 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 1921 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 1922 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 1923 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 1924 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 1925 1926 fb_format |= GRPH_NUM_BANKS(num_banks); 1927 fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1); 1928 fb_format |= GRPH_TILE_SPLIT(tile_split); 1929 fb_format |= GRPH_BANK_WIDTH(bankw); 1930 fb_format |= GRPH_BANK_HEIGHT(bankh); 1931 fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect); 1932 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { 1933 fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1); 1934 } 1935 1936 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 1937 fb_format |= GRPH_PIPE_CONFIG(pipe_config); 1938 1939 dce_v6_0_vga_enable(crtc, false); 1940 1941 /* Make sure surface address is updated at vertical blank rather than 1942 * horizontal blank 1943 */ 1944 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0); 1945 1946 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 1947 upper_32_bits(fb_location)); 1948 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 1949 upper_32_bits(fb_location)); 1950 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 1951 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 1952 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 1953 (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 1954 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 1955 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); 1956 1957 /* 1958 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT 1959 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to 1960 * retain the full precision throughout the pipeline. 1961 */ 1962 WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, 1963 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0), 1964 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK); 1965 1966 if (bypass_lut) 1967 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); 1968 1969 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); 1970 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); 1971 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); 1972 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); 1973 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); 1974 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); 1975 1976 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; 1977 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); 1978 1979 dce_v6_0_grph_enable(crtc, true); 1980 1981 WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, 1982 target_fb->height); 1983 x &= ~3; 1984 y &= ~1; 1985 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, 1986 (x << 16) | y); 1987 viewport_w = crtc->mode.hdisplay; 1988 viewport_h = (crtc->mode.vdisplay + 1) & ~1; 1989 1990 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, 1991 (viewport_w << 16) | viewport_h); 1992 1993 /* set pageflip to happen anywhere in vblank interval */ 1994 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); 1995 1996 if (!atomic && fb && fb != crtc->primary->fb) { 1997 abo = gem_to_amdgpu_bo(fb->obj[0]); 1998 r = amdgpu_bo_reserve(abo, true); 1999 if (unlikely(r != 0)) 2000 return r; 2001 amdgpu_bo_unpin(abo); 2002 amdgpu_bo_unreserve(abo); 2003 } 2004 2005 /* Bytes per pixel may have changed */ 2006 dce_v6_0_bandwidth_update(adev); 2007 2008 return 0; 2009 2010 } 2011 2012 static void dce_v6_0_set_interleave(struct drm_crtc *crtc, 2013 struct drm_display_mode *mode) 2014 { 2015 struct drm_device *dev = crtc->dev; 2016 struct amdgpu_device *adev = dev->dev_private; 2017 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2018 2019 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 2020 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 2021 INTERLEAVE_EN); 2022 else 2023 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0); 2024 } 2025 2026 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc) 2027 { 2028 2029 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2030 struct drm_device *dev = crtc->dev; 2031 struct amdgpu_device *adev = dev->dev_private; 2032 u16 *r, *g, *b; 2033 int i; 2034 2035 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); 2036 2037 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 2038 ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) | 2039 (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT))); 2040 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, 2041 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK); 2042 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, 2043 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK); 2044 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2045 ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) | 2046 (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT))); 2047 2048 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); 2049 2050 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); 2051 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); 2052 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); 2053 2054 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); 2055 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); 2056 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); 2057 2058 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); 2059 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); 2060 2061 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); 2062 r = crtc->gamma_store; 2063 g = r + crtc->gamma_size; 2064 b = g + crtc->gamma_size; 2065 for (i = 0; i < 256; i++) { 2066 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, 2067 ((*r++ & 0xffc0) << 14) | 2068 ((*g++ & 0xffc0) << 4) | 2069 (*b++ >> 6)); 2070 } 2071 2072 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2073 ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) | 2074 (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) | 2075 ICON_DEGAMMA_MODE(0) | 2076 (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT))); 2077 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, 2078 ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) | 2079 (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT))); 2080 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, 2081 ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) | 2082 (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT))); 2083 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, 2084 ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) | 2085 (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT))); 2086 /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 2087 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0); 2088 2089 2090 } 2091 2092 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder) 2093 { 2094 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 2095 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 2096 2097 switch (amdgpu_encoder->encoder_id) { 2098 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2099 return dig->linkb ? 1 : 0; 2100 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2101 return dig->linkb ? 3 : 2; 2102 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2103 return dig->linkb ? 5 : 4; 2104 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2105 return 6; 2106 default: 2107 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); 2108 return 0; 2109 } 2110 } 2111 2112 /** 2113 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc. 2114 * 2115 * @crtc: drm crtc 2116 * 2117 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors 2118 * a single PPLL can be used for all DP crtcs/encoders. For non-DP 2119 * monitors a dedicated PPLL must be used. If a particular board has 2120 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming 2121 * as there is no need to program the PLL itself. If we are not able to 2122 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to 2123 * avoid messing up an existing monitor. 2124 * 2125 * 2126 */ 2127 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc) 2128 { 2129 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2130 struct drm_device *dev = crtc->dev; 2131 struct amdgpu_device *adev = dev->dev_private; 2132 u32 pll_in_use; 2133 int pll; 2134 2135 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { 2136 if (adev->clock.dp_extclk) 2137 /* skip PPLL programming if using ext clock */ 2138 return ATOM_PPLL_INVALID; 2139 else 2140 return ATOM_PPLL0; 2141 } else { 2142 /* use the same PPLL for all monitors with the same clock */ 2143 pll = amdgpu_pll_get_shared_nondp_ppll(crtc); 2144 if (pll != ATOM_PPLL_INVALID) 2145 return pll; 2146 } 2147 2148 /* PPLL1, and PPLL2 */ 2149 pll_in_use = amdgpu_pll_get_use_mask(crtc); 2150 if (!(pll_in_use & (1 << ATOM_PPLL2))) 2151 return ATOM_PPLL2; 2152 if (!(pll_in_use & (1 << ATOM_PPLL1))) 2153 return ATOM_PPLL1; 2154 DRM_ERROR("unable to allocate a PPLL\n"); 2155 return ATOM_PPLL_INVALID; 2156 } 2157 2158 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock) 2159 { 2160 struct amdgpu_device *adev = crtc->dev->dev_private; 2161 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2162 uint32_t cur_lock; 2163 2164 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); 2165 if (lock) 2166 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 2167 else 2168 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK; 2169 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); 2170 } 2171 2172 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc) 2173 { 2174 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2175 struct amdgpu_device *adev = crtc->dev->dev_private; 2176 2177 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 2178 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 2179 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 2180 2181 2182 } 2183 2184 static void dce_v6_0_show_cursor(struct drm_crtc *crtc) 2185 { 2186 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2187 struct amdgpu_device *adev = crtc->dev->dev_private; 2188 2189 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2190 upper_32_bits(amdgpu_crtc->cursor_addr)); 2191 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2192 lower_32_bits(amdgpu_crtc->cursor_addr)); 2193 2194 WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, 2195 CUR_CONTROL__CURSOR_EN_MASK | 2196 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) | 2197 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT)); 2198 2199 } 2200 2201 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc, 2202 int x, int y) 2203 { 2204 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2205 struct amdgpu_device *adev = crtc->dev->dev_private; 2206 int xorigin = 0, yorigin = 0; 2207 2208 int w = amdgpu_crtc->cursor_width; 2209 2210 amdgpu_crtc->cursor_x = x; 2211 amdgpu_crtc->cursor_y = y; 2212 2213 /* avivo cursor are offset into the total surface */ 2214 x += crtc->x; 2215 y += crtc->y; 2216 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 2217 2218 if (x < 0) { 2219 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 2220 x = 0; 2221 } 2222 if (y < 0) { 2223 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 2224 y = 0; 2225 } 2226 2227 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2228 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2229 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2230 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2231 2232 return 0; 2233 } 2234 2235 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc, 2236 int x, int y) 2237 { 2238 int ret; 2239 2240 dce_v6_0_lock_cursor(crtc, true); 2241 ret = dce_v6_0_cursor_move_locked(crtc, x, y); 2242 dce_v6_0_lock_cursor(crtc, false); 2243 2244 return ret; 2245 } 2246 2247 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, 2248 struct drm_file *file_priv, 2249 uint32_t handle, 2250 uint32_t width, 2251 uint32_t height, 2252 int32_t hot_x, 2253 int32_t hot_y) 2254 { 2255 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2256 struct drm_gem_object *obj; 2257 struct amdgpu_bo *aobj; 2258 int ret; 2259 2260 if (!handle) { 2261 /* turn off cursor */ 2262 dce_v6_0_hide_cursor(crtc); 2263 obj = NULL; 2264 goto unpin; 2265 } 2266 2267 if ((width > amdgpu_crtc->max_cursor_width) || 2268 (height > amdgpu_crtc->max_cursor_height)) { 2269 DRM_ERROR("bad cursor width or height %d x %d\n", width, height); 2270 return -EINVAL; 2271 } 2272 2273 obj = drm_gem_object_lookup(file_priv, handle); 2274 if (!obj) { 2275 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); 2276 return -ENOENT; 2277 } 2278 2279 aobj = gem_to_amdgpu_bo(obj); 2280 ret = amdgpu_bo_reserve(aobj, false); 2281 if (ret != 0) { 2282 drm_gem_object_put_unlocked(obj); 2283 return ret; 2284 } 2285 2286 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); 2287 amdgpu_bo_unreserve(aobj); 2288 if (ret) { 2289 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 2290 drm_gem_object_put_unlocked(obj); 2291 return ret; 2292 } 2293 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); 2294 2295 dce_v6_0_lock_cursor(crtc, true); 2296 2297 if (width != amdgpu_crtc->cursor_width || 2298 height != amdgpu_crtc->cursor_height || 2299 hot_x != amdgpu_crtc->cursor_hot_x || 2300 hot_y != amdgpu_crtc->cursor_hot_y) { 2301 int x, y; 2302 2303 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x; 2304 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y; 2305 2306 dce_v6_0_cursor_move_locked(crtc, x, y); 2307 2308 amdgpu_crtc->cursor_width = width; 2309 amdgpu_crtc->cursor_height = height; 2310 amdgpu_crtc->cursor_hot_x = hot_x; 2311 amdgpu_crtc->cursor_hot_y = hot_y; 2312 } 2313 2314 dce_v6_0_show_cursor(crtc); 2315 dce_v6_0_lock_cursor(crtc, false); 2316 2317 unpin: 2318 if (amdgpu_crtc->cursor_bo) { 2319 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2320 ret = amdgpu_bo_reserve(aobj, true); 2321 if (likely(ret == 0)) { 2322 amdgpu_bo_unpin(aobj); 2323 amdgpu_bo_unreserve(aobj); 2324 } 2325 drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); 2326 } 2327 2328 amdgpu_crtc->cursor_bo = obj; 2329 return 0; 2330 } 2331 2332 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc) 2333 { 2334 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2335 2336 if (amdgpu_crtc->cursor_bo) { 2337 dce_v6_0_lock_cursor(crtc, true); 2338 2339 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2340 amdgpu_crtc->cursor_y); 2341 2342 dce_v6_0_show_cursor(crtc); 2343 dce_v6_0_lock_cursor(crtc, false); 2344 } 2345 } 2346 2347 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2348 u16 *blue, uint32_t size, 2349 struct drm_modeset_acquire_ctx *ctx) 2350 { 2351 dce_v6_0_crtc_load_lut(crtc); 2352 2353 return 0; 2354 } 2355 2356 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc) 2357 { 2358 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2359 2360 drm_crtc_cleanup(crtc); 2361 kfree(amdgpu_crtc); 2362 } 2363 2364 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = { 2365 .cursor_set2 = dce_v6_0_crtc_cursor_set2, 2366 .cursor_move = dce_v6_0_crtc_cursor_move, 2367 .gamma_set = dce_v6_0_crtc_gamma_set, 2368 .set_config = amdgpu_display_crtc_set_config, 2369 .destroy = dce_v6_0_crtc_destroy, 2370 .page_flip_target = amdgpu_display_crtc_page_flip_target, 2371 }; 2372 2373 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2374 { 2375 struct drm_device *dev = crtc->dev; 2376 struct amdgpu_device *adev = dev->dev_private; 2377 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2378 unsigned type; 2379 2380 switch (mode) { 2381 case DRM_MODE_DPMS_ON: 2382 amdgpu_crtc->enabled = true; 2383 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); 2384 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2385 /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2386 type = amdgpu_display_crtc_idx_to_irq_type(adev, 2387 amdgpu_crtc->crtc_id); 2388 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2389 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2390 drm_crtc_vblank_on(crtc); 2391 dce_v6_0_crtc_load_lut(crtc); 2392 break; 2393 case DRM_MODE_DPMS_STANDBY: 2394 case DRM_MODE_DPMS_SUSPEND: 2395 case DRM_MODE_DPMS_OFF: 2396 drm_crtc_vblank_off(crtc); 2397 if (amdgpu_crtc->enabled) 2398 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2399 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); 2400 amdgpu_crtc->enabled = false; 2401 break; 2402 } 2403 /* adjust pm to dpms */ 2404 amdgpu_pm_compute_clocks(adev); 2405 } 2406 2407 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc) 2408 { 2409 /* disable crtc pair power gating before programming */ 2410 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); 2411 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); 2412 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2413 } 2414 2415 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc) 2416 { 2417 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 2418 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); 2419 } 2420 2421 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc) 2422 { 2423 2424 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2425 struct drm_device *dev = crtc->dev; 2426 struct amdgpu_device *adev = dev->dev_private; 2427 struct amdgpu_atom_ss ss; 2428 int i; 2429 2430 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2431 if (crtc->primary->fb) { 2432 int r; 2433 struct amdgpu_bo *abo; 2434 2435 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); 2436 r = amdgpu_bo_reserve(abo, true); 2437 if (unlikely(r)) 2438 DRM_ERROR("failed to reserve abo before unpin\n"); 2439 else { 2440 amdgpu_bo_unpin(abo); 2441 amdgpu_bo_unreserve(abo); 2442 } 2443 } 2444 /* disable the GRPH */ 2445 dce_v6_0_grph_enable(crtc, false); 2446 2447 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); 2448 2449 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2450 if (adev->mode_info.crtcs[i] && 2451 adev->mode_info.crtcs[i]->enabled && 2452 i != amdgpu_crtc->crtc_id && 2453 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { 2454 /* one other crtc is using this pll don't turn 2455 * off the pll 2456 */ 2457 goto done; 2458 } 2459 } 2460 2461 switch (amdgpu_crtc->pll_id) { 2462 case ATOM_PPLL1: 2463 case ATOM_PPLL2: 2464 /* disable the ppll */ 2465 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, 2466 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2467 break; 2468 default: 2469 break; 2470 } 2471 done: 2472 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2473 amdgpu_crtc->adjusted_clock = 0; 2474 amdgpu_crtc->encoder = NULL; 2475 amdgpu_crtc->connector = NULL; 2476 } 2477 2478 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc, 2479 struct drm_display_mode *mode, 2480 struct drm_display_mode *adjusted_mode, 2481 int x, int y, struct drm_framebuffer *old_fb) 2482 { 2483 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2484 2485 if (!amdgpu_crtc->adjusted_clock) 2486 return -EINVAL; 2487 2488 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); 2489 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); 2490 dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2491 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); 2492 amdgpu_atombios_crtc_scaler_setup(crtc); 2493 dce_v6_0_cursor_reset(crtc); 2494 /* update the hw version fpr dpm */ 2495 amdgpu_crtc->hw_mode = *adjusted_mode; 2496 2497 return 0; 2498 } 2499 2500 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc, 2501 const struct drm_display_mode *mode, 2502 struct drm_display_mode *adjusted_mode) 2503 { 2504 2505 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2506 struct drm_device *dev = crtc->dev; 2507 struct drm_encoder *encoder; 2508 2509 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ 2510 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2511 if (encoder->crtc == crtc) { 2512 amdgpu_crtc->encoder = encoder; 2513 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); 2514 break; 2515 } 2516 } 2517 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { 2518 amdgpu_crtc->encoder = NULL; 2519 amdgpu_crtc->connector = NULL; 2520 return false; 2521 } 2522 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2523 return false; 2524 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2525 return false; 2526 /* pick pll */ 2527 amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc); 2528 /* if we can't get a PPLL for a non-DP encoder, fail */ 2529 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && 2530 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) 2531 return false; 2532 2533 return true; 2534 } 2535 2536 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, 2537 struct drm_framebuffer *old_fb) 2538 { 2539 return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2540 } 2541 2542 static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc, 2543 struct drm_framebuffer *fb, 2544 int x, int y, enum mode_set_atomic state) 2545 { 2546 return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1); 2547 } 2548 2549 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = { 2550 .dpms = dce_v6_0_crtc_dpms, 2551 .mode_fixup = dce_v6_0_crtc_mode_fixup, 2552 .mode_set = dce_v6_0_crtc_mode_set, 2553 .mode_set_base = dce_v6_0_crtc_set_base, 2554 .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic, 2555 .prepare = dce_v6_0_crtc_prepare, 2556 .commit = dce_v6_0_crtc_commit, 2557 .disable = dce_v6_0_crtc_disable, 2558 }; 2559 2560 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index) 2561 { 2562 struct amdgpu_crtc *amdgpu_crtc; 2563 2564 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + 2565 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 2566 if (amdgpu_crtc == NULL) 2567 return -ENOMEM; 2568 2569 drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs); 2570 2571 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); 2572 amdgpu_crtc->crtc_id = index; 2573 adev->mode_info.crtcs[index] = amdgpu_crtc; 2574 2575 amdgpu_crtc->max_cursor_width = CURSOR_WIDTH; 2576 amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT; 2577 adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; 2578 adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; 2579 2580 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id]; 2581 2582 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2583 amdgpu_crtc->adjusted_clock = 0; 2584 amdgpu_crtc->encoder = NULL; 2585 amdgpu_crtc->connector = NULL; 2586 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs); 2587 2588 return 0; 2589 } 2590 2591 static int dce_v6_0_early_init(void *handle) 2592 { 2593 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2594 2595 adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg; 2596 adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg; 2597 2598 dce_v6_0_set_display_funcs(adev); 2599 2600 adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev); 2601 2602 switch (adev->asic_type) { 2603 case CHIP_TAHITI: 2604 case CHIP_PITCAIRN: 2605 case CHIP_VERDE: 2606 adev->mode_info.num_hpd = 6; 2607 adev->mode_info.num_dig = 6; 2608 break; 2609 case CHIP_OLAND: 2610 adev->mode_info.num_hpd = 2; 2611 adev->mode_info.num_dig = 2; 2612 break; 2613 default: 2614 return -EINVAL; 2615 } 2616 2617 dce_v6_0_set_irq_funcs(adev); 2618 2619 return 0; 2620 } 2621 2622 static int dce_v6_0_sw_init(void *handle) 2623 { 2624 int r, i; 2625 bool ret; 2626 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2627 2628 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2629 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); 2630 if (r) 2631 return r; 2632 } 2633 2634 for (i = 8; i < 20; i += 2) { 2635 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq); 2636 if (r) 2637 return r; 2638 } 2639 2640 /* HPD hotplug */ 2641 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq); 2642 if (r) 2643 return r; 2644 2645 adev->mode_info.mode_config_initialized = true; 2646 2647 adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; 2648 adev->ddev->mode_config.async_page_flip = true; 2649 adev->ddev->mode_config.max_width = 16384; 2650 adev->ddev->mode_config.max_height = 16384; 2651 adev->ddev->mode_config.preferred_depth = 24; 2652 adev->ddev->mode_config.prefer_shadow = 1; 2653 adev->ddev->mode_config.fb_base = adev->gmc.aper_base; 2654 2655 r = amdgpu_display_modeset_create_props(adev); 2656 if (r) 2657 return r; 2658 2659 adev->ddev->mode_config.max_width = 16384; 2660 adev->ddev->mode_config.max_height = 16384; 2661 2662 /* allocate crtcs */ 2663 for (i = 0; i < adev->mode_info.num_crtc; i++) { 2664 r = dce_v6_0_crtc_init(adev, i); 2665 if (r) 2666 return r; 2667 } 2668 2669 ret = amdgpu_atombios_get_connector_info_from_object_table(adev); 2670 if (ret) 2671 amdgpu_display_print_display_setup(adev->ddev); 2672 else 2673 return -EINVAL; 2674 2675 /* setup afmt */ 2676 r = dce_v6_0_afmt_init(adev); 2677 if (r) 2678 return r; 2679 2680 r = dce_v6_0_audio_init(adev); 2681 if (r) 2682 return r; 2683 2684 drm_kms_helper_poll_init(adev->ddev); 2685 2686 return r; 2687 } 2688 2689 static int dce_v6_0_sw_fini(void *handle) 2690 { 2691 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2692 2693 kfree(adev->mode_info.bios_hardcoded_edid); 2694 2695 drm_kms_helper_poll_fini(adev->ddev); 2696 2697 dce_v6_0_audio_fini(adev); 2698 dce_v6_0_afmt_fini(adev); 2699 2700 drm_mode_config_cleanup(adev->ddev); 2701 adev->mode_info.mode_config_initialized = false; 2702 2703 return 0; 2704 } 2705 2706 static int dce_v6_0_hw_init(void *handle) 2707 { 2708 int i; 2709 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2710 2711 /* disable vga render */ 2712 dce_v6_0_set_vga_render_state(adev, false); 2713 /* init dig PHYs, disp eng pll */ 2714 amdgpu_atombios_encoder_init_dig(adev); 2715 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); 2716 2717 /* initialize hpd */ 2718 dce_v6_0_hpd_init(adev); 2719 2720 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2721 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2722 } 2723 2724 dce_v6_0_pageflip_interrupt_init(adev); 2725 2726 return 0; 2727 } 2728 2729 static int dce_v6_0_hw_fini(void *handle) 2730 { 2731 int i; 2732 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2733 2734 dce_v6_0_hpd_fini(adev); 2735 2736 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 2737 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2738 } 2739 2740 dce_v6_0_pageflip_interrupt_fini(adev); 2741 2742 return 0; 2743 } 2744 2745 static int dce_v6_0_suspend(void *handle) 2746 { 2747 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2748 2749 adev->mode_info.bl_level = 2750 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); 2751 2752 return dce_v6_0_hw_fini(handle); 2753 } 2754 2755 static int dce_v6_0_resume(void *handle) 2756 { 2757 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2758 int ret; 2759 2760 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, 2761 adev->mode_info.bl_level); 2762 2763 ret = dce_v6_0_hw_init(handle); 2764 2765 /* turn on the BL */ 2766 if (adev->mode_info.bl_encoder) { 2767 u8 bl_level = amdgpu_display_backlight_get_level(adev, 2768 adev->mode_info.bl_encoder); 2769 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 2770 bl_level); 2771 } 2772 2773 return ret; 2774 } 2775 2776 static bool dce_v6_0_is_idle(void *handle) 2777 { 2778 return true; 2779 } 2780 2781 static int dce_v6_0_wait_for_idle(void *handle) 2782 { 2783 return 0; 2784 } 2785 2786 static int dce_v6_0_soft_reset(void *handle) 2787 { 2788 DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n"); 2789 return 0; 2790 } 2791 2792 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, 2793 int crtc, 2794 enum amdgpu_interrupt_state state) 2795 { 2796 u32 reg_block, interrupt_mask; 2797 2798 if (crtc >= adev->mode_info.num_crtc) { 2799 DRM_DEBUG("invalid crtc %d\n", crtc); 2800 return; 2801 } 2802 2803 switch (crtc) { 2804 case 0: 2805 reg_block = SI_CRTC0_REGISTER_OFFSET; 2806 break; 2807 case 1: 2808 reg_block = SI_CRTC1_REGISTER_OFFSET; 2809 break; 2810 case 2: 2811 reg_block = SI_CRTC2_REGISTER_OFFSET; 2812 break; 2813 case 3: 2814 reg_block = SI_CRTC3_REGISTER_OFFSET; 2815 break; 2816 case 4: 2817 reg_block = SI_CRTC4_REGISTER_OFFSET; 2818 break; 2819 case 5: 2820 reg_block = SI_CRTC5_REGISTER_OFFSET; 2821 break; 2822 default: 2823 DRM_DEBUG("invalid crtc %d\n", crtc); 2824 return; 2825 } 2826 2827 switch (state) { 2828 case AMDGPU_IRQ_STATE_DISABLE: 2829 interrupt_mask = RREG32(mmINT_MASK + reg_block); 2830 interrupt_mask &= ~VBLANK_INT_MASK; 2831 WREG32(mmINT_MASK + reg_block, interrupt_mask); 2832 break; 2833 case AMDGPU_IRQ_STATE_ENABLE: 2834 interrupt_mask = RREG32(mmINT_MASK + reg_block); 2835 interrupt_mask |= VBLANK_INT_MASK; 2836 WREG32(mmINT_MASK + reg_block, interrupt_mask); 2837 break; 2838 default: 2839 break; 2840 } 2841 } 2842 2843 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, 2844 int crtc, 2845 enum amdgpu_interrupt_state state) 2846 { 2847 2848 } 2849 2850 static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev, 2851 struct amdgpu_irq_src *src, 2852 unsigned type, 2853 enum amdgpu_interrupt_state state) 2854 { 2855 u32 dc_hpd_int_cntl; 2856 2857 if (type >= adev->mode_info.num_hpd) { 2858 DRM_DEBUG("invalid hdp %d\n", type); 2859 return 0; 2860 } 2861 2862 switch (state) { 2863 case AMDGPU_IRQ_STATE_DISABLE: 2864 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); 2865 dc_hpd_int_cntl &= ~DC_HPDx_INT_EN; 2866 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); 2867 break; 2868 case AMDGPU_IRQ_STATE_ENABLE: 2869 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]); 2870 dc_hpd_int_cntl |= DC_HPDx_INT_EN; 2871 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl); 2872 break; 2873 default: 2874 break; 2875 } 2876 2877 return 0; 2878 } 2879 2880 static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev, 2881 struct amdgpu_irq_src *src, 2882 unsigned type, 2883 enum amdgpu_interrupt_state state) 2884 { 2885 switch (type) { 2886 case AMDGPU_CRTC_IRQ_VBLANK1: 2887 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state); 2888 break; 2889 case AMDGPU_CRTC_IRQ_VBLANK2: 2890 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state); 2891 break; 2892 case AMDGPU_CRTC_IRQ_VBLANK3: 2893 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state); 2894 break; 2895 case AMDGPU_CRTC_IRQ_VBLANK4: 2896 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state); 2897 break; 2898 case AMDGPU_CRTC_IRQ_VBLANK5: 2899 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state); 2900 break; 2901 case AMDGPU_CRTC_IRQ_VBLANK6: 2902 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state); 2903 break; 2904 case AMDGPU_CRTC_IRQ_VLINE1: 2905 dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state); 2906 break; 2907 case AMDGPU_CRTC_IRQ_VLINE2: 2908 dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state); 2909 break; 2910 case AMDGPU_CRTC_IRQ_VLINE3: 2911 dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state); 2912 break; 2913 case AMDGPU_CRTC_IRQ_VLINE4: 2914 dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state); 2915 break; 2916 case AMDGPU_CRTC_IRQ_VLINE5: 2917 dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state); 2918 break; 2919 case AMDGPU_CRTC_IRQ_VLINE6: 2920 dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state); 2921 break; 2922 default: 2923 break; 2924 } 2925 return 0; 2926 } 2927 2928 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev, 2929 struct amdgpu_irq_src *source, 2930 struct amdgpu_iv_entry *entry) 2931 { 2932 unsigned crtc = entry->src_id - 1; 2933 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 2934 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, 2935 crtc); 2936 2937 switch (entry->src_data[0]) { 2938 case 0: /* vblank */ 2939 if (disp_int & interrupt_status_offsets[crtc].vblank) 2940 WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK); 2941 else 2942 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 2943 2944 if (amdgpu_irq_enabled(adev, source, irq_type)) { 2945 drm_handle_vblank(adev->ddev, crtc); 2946 } 2947 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 2948 break; 2949 case 1: /* vline */ 2950 if (disp_int & interrupt_status_offsets[crtc].vline) 2951 WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK); 2952 else 2953 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 2954 2955 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 2956 break; 2957 default: 2958 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); 2959 break; 2960 } 2961 2962 return 0; 2963 } 2964 2965 static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev, 2966 struct amdgpu_irq_src *src, 2967 unsigned type, 2968 enum amdgpu_interrupt_state state) 2969 { 2970 u32 reg; 2971 2972 if (type >= adev->mode_info.num_crtc) { 2973 DRM_ERROR("invalid pageflip crtc %d\n", type); 2974 return -EINVAL; 2975 } 2976 2977 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]); 2978 if (state == AMDGPU_IRQ_STATE_DISABLE) 2979 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 2980 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 2981 else 2982 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 2983 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 2984 2985 return 0; 2986 } 2987 2988 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev, 2989 struct amdgpu_irq_src *source, 2990 struct amdgpu_iv_entry *entry) 2991 { 2992 unsigned long flags; 2993 unsigned crtc_id; 2994 struct amdgpu_crtc *amdgpu_crtc; 2995 struct amdgpu_flip_work *works; 2996 2997 crtc_id = (entry->src_id - 8) >> 1; 2998 amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 2999 3000 if (crtc_id >= adev->mode_info.num_crtc) { 3001 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); 3002 return -EINVAL; 3003 } 3004 3005 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) & 3006 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 3007 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id], 3008 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 3009 3010 /* IRQ could occur when in initial stage */ 3011 if (amdgpu_crtc == NULL) 3012 return 0; 3013 3014 spin_lock_irqsave(&adev->ddev->event_lock, flags); 3015 works = amdgpu_crtc->pflip_works; 3016 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ 3017 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " 3018 "AMDGPU_FLIP_SUBMITTED(%d)\n", 3019 amdgpu_crtc->pflip_status, 3020 AMDGPU_FLIP_SUBMITTED); 3021 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3022 return 0; 3023 } 3024 3025 /* page flip completed. clean up */ 3026 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 3027 amdgpu_crtc->pflip_works = NULL; 3028 3029 /* wakeup usersapce */ 3030 if (works->event) 3031 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); 3032 3033 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3034 3035 drm_crtc_vblank_put(&amdgpu_crtc->base); 3036 schedule_work(&works->unpin_work); 3037 3038 return 0; 3039 } 3040 3041 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev, 3042 struct amdgpu_irq_src *source, 3043 struct amdgpu_iv_entry *entry) 3044 { 3045 uint32_t disp_int, mask, tmp; 3046 unsigned hpd; 3047 3048 if (entry->src_data[0] >= adev->mode_info.num_hpd) { 3049 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); 3050 return 0; 3051 } 3052 3053 hpd = entry->src_data[0]; 3054 disp_int = RREG32(interrupt_status_offsets[hpd].reg); 3055 mask = interrupt_status_offsets[hpd].hpd; 3056 3057 if (disp_int & mask) { 3058 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]); 3059 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK; 3060 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp); 3061 schedule_work(&adev->hotplug_work); 3062 DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3063 } 3064 3065 return 0; 3066 3067 } 3068 3069 static int dce_v6_0_set_clockgating_state(void *handle, 3070 enum amd_clockgating_state state) 3071 { 3072 return 0; 3073 } 3074 3075 static int dce_v6_0_set_powergating_state(void *handle, 3076 enum amd_powergating_state state) 3077 { 3078 return 0; 3079 } 3080 3081 static const struct amd_ip_funcs dce_v6_0_ip_funcs = { 3082 .name = "dce_v6_0", 3083 .early_init = dce_v6_0_early_init, 3084 .late_init = NULL, 3085 .sw_init = dce_v6_0_sw_init, 3086 .sw_fini = dce_v6_0_sw_fini, 3087 .hw_init = dce_v6_0_hw_init, 3088 .hw_fini = dce_v6_0_hw_fini, 3089 .suspend = dce_v6_0_suspend, 3090 .resume = dce_v6_0_resume, 3091 .is_idle = dce_v6_0_is_idle, 3092 .wait_for_idle = dce_v6_0_wait_for_idle, 3093 .soft_reset = dce_v6_0_soft_reset, 3094 .set_clockgating_state = dce_v6_0_set_clockgating_state, 3095 .set_powergating_state = dce_v6_0_set_powergating_state, 3096 }; 3097 3098 static void 3099 dce_v6_0_encoder_mode_set(struct drm_encoder *encoder, 3100 struct drm_display_mode *mode, 3101 struct drm_display_mode *adjusted_mode) 3102 { 3103 3104 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3105 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); 3106 3107 amdgpu_encoder->pixel_clock = adjusted_mode->clock; 3108 3109 /* need to call this here rather than in prepare() since we need some crtc info */ 3110 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3111 3112 /* set scaler clears this on some chips */ 3113 dce_v6_0_set_interleave(encoder->crtc, mode); 3114 3115 if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) { 3116 dce_v6_0_afmt_enable(encoder, true); 3117 dce_v6_0_afmt_setmode(encoder, adjusted_mode); 3118 } 3119 } 3120 3121 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder) 3122 { 3123 3124 struct amdgpu_device *adev = encoder->dev->dev_private; 3125 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3126 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 3127 3128 if ((amdgpu_encoder->active_device & 3129 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || 3130 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != 3131 ENCODER_OBJECT_ID_NONE)) { 3132 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 3133 if (dig) { 3134 dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder); 3135 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) 3136 dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; 3137 } 3138 } 3139 3140 amdgpu_atombios_scratch_regs_lock(adev, true); 3141 3142 if (connector) { 3143 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 3144 3145 /* select the clock/data port if it uses a router */ 3146 if (amdgpu_connector->router.cd_valid) 3147 amdgpu_i2c_router_select_cd_port(amdgpu_connector); 3148 3149 /* turn eDP panel on for mode set */ 3150 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3151 amdgpu_atombios_encoder_set_edp_panel_power(connector, 3152 ATOM_TRANSMITTER_ACTION_POWER_ON); 3153 } 3154 3155 /* this is needed for the pll/ss setup to work correctly in some cases */ 3156 amdgpu_atombios_encoder_set_crtc_source(encoder); 3157 /* set up the FMT blocks */ 3158 dce_v6_0_program_fmt(encoder); 3159 } 3160 3161 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder) 3162 { 3163 3164 struct drm_device *dev = encoder->dev; 3165 struct amdgpu_device *adev = dev->dev_private; 3166 3167 /* need to call this here as we need the crtc set up */ 3168 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 3169 amdgpu_atombios_scratch_regs_lock(adev, false); 3170 } 3171 3172 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder) 3173 { 3174 3175 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3176 struct amdgpu_encoder_atom_dig *dig; 3177 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); 3178 3179 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3180 3181 if (amdgpu_atombios_encoder_is_digital(encoder)) { 3182 if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) 3183 dce_v6_0_afmt_enable(encoder, false); 3184 dig = amdgpu_encoder->enc_priv; 3185 dig->dig_encoder = -1; 3186 } 3187 amdgpu_encoder->active_device = 0; 3188 } 3189 3190 /* these are handled by the primary encoders */ 3191 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder) 3192 { 3193 3194 } 3195 3196 static void dce_v6_0_ext_commit(struct drm_encoder *encoder) 3197 { 3198 3199 } 3200 3201 static void 3202 dce_v6_0_ext_mode_set(struct drm_encoder *encoder, 3203 struct drm_display_mode *mode, 3204 struct drm_display_mode *adjusted_mode) 3205 { 3206 3207 } 3208 3209 static void dce_v6_0_ext_disable(struct drm_encoder *encoder) 3210 { 3211 3212 } 3213 3214 static void 3215 dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode) 3216 { 3217 3218 } 3219 3220 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder, 3221 const struct drm_display_mode *mode, 3222 struct drm_display_mode *adjusted_mode) 3223 { 3224 return true; 3225 } 3226 3227 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = { 3228 .dpms = dce_v6_0_ext_dpms, 3229 .mode_fixup = dce_v6_0_ext_mode_fixup, 3230 .prepare = dce_v6_0_ext_prepare, 3231 .mode_set = dce_v6_0_ext_mode_set, 3232 .commit = dce_v6_0_ext_commit, 3233 .disable = dce_v6_0_ext_disable, 3234 /* no detect for TMDS/LVDS yet */ 3235 }; 3236 3237 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = { 3238 .dpms = amdgpu_atombios_encoder_dpms, 3239 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3240 .prepare = dce_v6_0_encoder_prepare, 3241 .mode_set = dce_v6_0_encoder_mode_set, 3242 .commit = dce_v6_0_encoder_commit, 3243 .disable = dce_v6_0_encoder_disable, 3244 .detect = amdgpu_atombios_encoder_dig_detect, 3245 }; 3246 3247 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = { 3248 .dpms = amdgpu_atombios_encoder_dpms, 3249 .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3250 .prepare = dce_v6_0_encoder_prepare, 3251 .mode_set = dce_v6_0_encoder_mode_set, 3252 .commit = dce_v6_0_encoder_commit, 3253 .detect = amdgpu_atombios_encoder_dac_detect, 3254 }; 3255 3256 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder) 3257 { 3258 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3259 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3260 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); 3261 kfree(amdgpu_encoder->enc_priv); 3262 drm_encoder_cleanup(encoder); 3263 kfree(amdgpu_encoder); 3264 } 3265 3266 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = { 3267 .destroy = dce_v6_0_encoder_destroy, 3268 }; 3269 3270 static void dce_v6_0_encoder_add(struct amdgpu_device *adev, 3271 uint32_t encoder_enum, 3272 uint32_t supported_device, 3273 u16 caps) 3274 { 3275 struct drm_device *dev = adev->ddev; 3276 struct drm_encoder *encoder; 3277 struct amdgpu_encoder *amdgpu_encoder; 3278 3279 /* see if we already added it */ 3280 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3281 amdgpu_encoder = to_amdgpu_encoder(encoder); 3282 if (amdgpu_encoder->encoder_enum == encoder_enum) { 3283 amdgpu_encoder->devices |= supported_device; 3284 return; 3285 } 3286 3287 } 3288 3289 /* add a new one */ 3290 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); 3291 if (!amdgpu_encoder) 3292 return; 3293 3294 encoder = &amdgpu_encoder->base; 3295 switch (adev->mode_info.num_crtc) { 3296 case 1: 3297 encoder->possible_crtcs = 0x1; 3298 break; 3299 case 2: 3300 default: 3301 encoder->possible_crtcs = 0x3; 3302 break; 3303 case 4: 3304 encoder->possible_crtcs = 0xf; 3305 break; 3306 case 6: 3307 encoder->possible_crtcs = 0x3f; 3308 break; 3309 } 3310 3311 amdgpu_encoder->enc_priv = NULL; 3312 amdgpu_encoder->encoder_enum = encoder_enum; 3313 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 3314 amdgpu_encoder->devices = supported_device; 3315 amdgpu_encoder->rmx_type = RMX_OFF; 3316 amdgpu_encoder->underscan_type = UNDERSCAN_OFF; 3317 amdgpu_encoder->is_ext_encoder = false; 3318 amdgpu_encoder->caps = caps; 3319 3320 switch (amdgpu_encoder->encoder_id) { 3321 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3322 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3323 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3324 DRM_MODE_ENCODER_DAC, NULL); 3325 drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs); 3326 break; 3327 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3328 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 3329 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 3330 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 3331 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 3332 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3333 amdgpu_encoder->rmx_type = RMX_FULL; 3334 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3335 DRM_MODE_ENCODER_LVDS, NULL); 3336 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3337 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3338 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3339 DRM_MODE_ENCODER_DAC, NULL); 3340 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3341 } else { 3342 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3343 DRM_MODE_ENCODER_TMDS, NULL); 3344 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3345 } 3346 drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs); 3347 break; 3348 case ENCODER_OBJECT_ID_SI170B: 3349 case ENCODER_OBJECT_ID_CH7303: 3350 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: 3351 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: 3352 case ENCODER_OBJECT_ID_TITFP513: 3353 case ENCODER_OBJECT_ID_VT1623: 3354 case ENCODER_OBJECT_ID_HDMI_SI1930: 3355 case ENCODER_OBJECT_ID_TRAVIS: 3356 case ENCODER_OBJECT_ID_NUTMEG: 3357 /* these are handled by the primary encoders */ 3358 amdgpu_encoder->is_ext_encoder = true; 3359 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3360 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3361 DRM_MODE_ENCODER_LVDS, NULL); 3362 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3363 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3364 DRM_MODE_ENCODER_DAC, NULL); 3365 else 3366 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs, 3367 DRM_MODE_ENCODER_TMDS, NULL); 3368 drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs); 3369 break; 3370 } 3371 } 3372 3373 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { 3374 .bandwidth_update = &dce_v6_0_bandwidth_update, 3375 .vblank_get_counter = &dce_v6_0_vblank_get_counter, 3376 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3377 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3378 .hpd_sense = &dce_v6_0_hpd_sense, 3379 .hpd_set_polarity = &dce_v6_0_hpd_set_polarity, 3380 .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg, 3381 .page_flip = &dce_v6_0_page_flip, 3382 .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos, 3383 .add_encoder = &dce_v6_0_encoder_add, 3384 .add_connector = &amdgpu_connector_add, 3385 }; 3386 3387 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev) 3388 { 3389 adev->mode_info.funcs = &dce_v6_0_display_funcs; 3390 } 3391 3392 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = { 3393 .set = dce_v6_0_set_crtc_interrupt_state, 3394 .process = dce_v6_0_crtc_irq, 3395 }; 3396 3397 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = { 3398 .set = dce_v6_0_set_pageflip_interrupt_state, 3399 .process = dce_v6_0_pageflip_irq, 3400 }; 3401 3402 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = { 3403 .set = dce_v6_0_set_hpd_interrupt_state, 3404 .process = dce_v6_0_hpd_irq, 3405 }; 3406 3407 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev) 3408 { 3409 if (adev->mode_info.num_crtc > 0) 3410 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; 3411 else 3412 adev->crtc_irq.num_types = 0; 3413 adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs; 3414 3415 adev->pageflip_irq.num_types = adev->mode_info.num_crtc; 3416 adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs; 3417 3418 adev->hpd_irq.num_types = adev->mode_info.num_hpd; 3419 adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs; 3420 } 3421 3422 const struct amdgpu_ip_block_version dce_v6_0_ip_block = 3423 { 3424 .type = AMD_IP_BLOCK_TYPE_DCE, 3425 .major = 6, 3426 .minor = 0, 3427 .rev = 0, 3428 .funcs = &dce_v6_0_ip_funcs, 3429 }; 3430 3431 const struct amdgpu_ip_block_version dce_v6_4_ip_block = 3432 { 3433 .type = AMD_IP_BLOCK_TYPE_DCE, 3434 .major = 6, 3435 .minor = 4, 3436 .rev = 0, 3437 .funcs = &dce_v6_0_ip_funcs, 3438 }; 3439