1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 /* The caprices of the preprocessor require that this be declared right here */ 27 #define CREATE_TRACE_POINTS 28 29 #include "dm_services_types.h" 30 #include "dc.h" 31 #include "dc/inc/core_types.h" 32 #include "dal_asic_id.h" 33 #include "dmub/inc/dmub_srv.h" 34 #include "dc/inc/hw/dmcu.h" 35 #include "dc/inc/hw/abm.h" 36 #include "dc/dc_dmub_srv.h" 37 38 #include "vid.h" 39 #include "amdgpu.h" 40 #include "amdgpu_display.h" 41 #include "amdgpu_ucode.h" 42 #include "atom.h" 43 #include "amdgpu_dm.h" 44 #ifdef CONFIG_DRM_AMD_DC_HDCP 45 #include "amdgpu_dm_hdcp.h" 46 #endif 47 #include "amdgpu_pm.h" 48 49 #include "amd_shared.h" 50 #include "amdgpu_dm_irq.h" 51 #include "dm_helpers.h" 52 #include "amdgpu_dm_mst_types.h" 53 #if defined(CONFIG_DEBUG_FS) 54 #include "amdgpu_dm_debugfs.h" 55 #endif 56 57 #include "ivsrcid/ivsrcid_vislands30.h" 58 59 #include <linux/module.h> 60 #include <linux/moduleparam.h> 61 #include <linux/version.h> 62 #include <linux/types.h> 63 #include <linux/pm_runtime.h> 64 #include <linux/pci.h> 65 #include <linux/firmware.h> 66 #include <linux/component.h> 67 68 #include <drm/drm_atomic.h> 69 #include <drm/drm_atomic_uapi.h> 70 #include <drm/drm_atomic_helper.h> 71 #include <drm/drm_dp_mst_helper.h> 72 #include <drm/drm_fb_helper.h> 73 #include <drm/drm_fourcc.h> 74 #include <drm/drm_edid.h> 75 #include <drm/drm_vblank.h> 76 #include <drm/drm_audio_component.h> 77 #include <drm/drm_hdcp.h> 78 79 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 80 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" 81 82 #include "dcn/dcn_1_0_offset.h" 83 #include "dcn/dcn_1_0_sh_mask.h" 84 #include "soc15_hw_ip.h" 85 #include "vega10_ip_offset.h" 86 87 #include "soc15_common.h" 88 #endif 89 90 #include "modules/inc/mod_freesync.h" 91 #include "modules/power/power_helpers.h" 92 #include "modules/inc/mod_info_packet.h" 93 94 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 95 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 96 97 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 98 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 99 100 /** 101 * DOC: overview 102 * 103 * The AMDgpu display manager, **amdgpu_dm** (or even simpler, 104 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM 105 * requests into DC requests, and DC responses into DRM responses. 106 * 107 * The root control structure is &struct amdgpu_display_manager. 108 */ 109 110 /* basic init/fini API */ 111 static int amdgpu_dm_init(struct amdgpu_device *adev); 112 static void amdgpu_dm_fini(struct amdgpu_device *adev); 113 114 /* 115 * initializes drm_device display related structures, based on the information 116 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 117 * drm_encoder, drm_mode_config 118 * 119 * Returns 0 on success 120 */ 121 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 122 /* removes and deallocates the drm structures, created by the above function */ 123 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 124 125 static void 126 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector); 127 128 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 129 struct drm_plane *plane, 130 unsigned long possible_crtcs, 131 const struct dc_plane_cap *plane_cap); 132 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 133 struct drm_plane *plane, 134 uint32_t link_index); 135 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 136 struct amdgpu_dm_connector *amdgpu_dm_connector, 137 uint32_t link_index, 138 struct amdgpu_encoder *amdgpu_encoder); 139 static int amdgpu_dm_encoder_init(struct drm_device *dev, 140 struct amdgpu_encoder *aencoder, 141 uint32_t link_index); 142 143 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 144 145 static int amdgpu_dm_atomic_commit(struct drm_device *dev, 146 struct drm_atomic_state *state, 147 bool nonblock); 148 149 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 150 151 static int amdgpu_dm_atomic_check(struct drm_device *dev, 152 struct drm_atomic_state *state); 153 154 static void handle_cursor_update(struct drm_plane *plane, 155 struct drm_plane_state *old_plane_state); 156 157 static void amdgpu_dm_set_psr_caps(struct dc_link *link); 158 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); 159 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); 160 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); 161 162 163 /* 164 * dm_vblank_get_counter 165 * 166 * @brief 167 * Get counter for number of vertical blanks 168 * 169 * @param 170 * struct amdgpu_device *adev - [in] desired amdgpu device 171 * int disp_idx - [in] which CRTC to get the counter from 172 * 173 * @return 174 * Counter for vertical blanks 175 */ 176 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 177 { 178 if (crtc >= adev->mode_info.num_crtc) 179 return 0; 180 else { 181 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; 182 struct dm_crtc_state *acrtc_state = to_dm_crtc_state( 183 acrtc->base.state); 184 185 186 if (acrtc_state->stream == NULL) { 187 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 188 crtc); 189 return 0; 190 } 191 192 return dc_stream_get_vblank_counter(acrtc_state->stream); 193 } 194 } 195 196 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 197 u32 *vbl, u32 *position) 198 { 199 uint32_t v_blank_start, v_blank_end, h_position, v_position; 200 201 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 202 return -EINVAL; 203 else { 204 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; 205 struct dm_crtc_state *acrtc_state = to_dm_crtc_state( 206 acrtc->base.state); 207 208 if (acrtc_state->stream == NULL) { 209 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 210 crtc); 211 return 0; 212 } 213 214 /* 215 * TODO rework base driver to use values directly. 216 * for now parse it back into reg-format 217 */ 218 dc_stream_get_scanoutpos(acrtc_state->stream, 219 &v_blank_start, 220 &v_blank_end, 221 &h_position, 222 &v_position); 223 224 *position = v_position | (h_position << 16); 225 *vbl = v_blank_start | (v_blank_end << 16); 226 } 227 228 return 0; 229 } 230 231 static bool dm_is_idle(void *handle) 232 { 233 /* XXX todo */ 234 return true; 235 } 236 237 static int dm_wait_for_idle(void *handle) 238 { 239 /* XXX todo */ 240 return 0; 241 } 242 243 static bool dm_check_soft_reset(void *handle) 244 { 245 return false; 246 } 247 248 static int dm_soft_reset(void *handle) 249 { 250 /* XXX todo */ 251 return 0; 252 } 253 254 static struct amdgpu_crtc * 255 get_crtc_by_otg_inst(struct amdgpu_device *adev, 256 int otg_inst) 257 { 258 struct drm_device *dev = adev->ddev; 259 struct drm_crtc *crtc; 260 struct amdgpu_crtc *amdgpu_crtc; 261 262 if (otg_inst == -1) { 263 WARN_ON(1); 264 return adev->mode_info.crtcs[0]; 265 } 266 267 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 268 amdgpu_crtc = to_amdgpu_crtc(crtc); 269 270 if (amdgpu_crtc->otg_inst == otg_inst) 271 return amdgpu_crtc; 272 } 273 274 return NULL; 275 } 276 277 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) 278 { 279 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || 280 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 281 } 282 283 /** 284 * dm_pflip_high_irq() - Handle pageflip interrupt 285 * @interrupt_params: ignored 286 * 287 * Handles the pageflip interrupt by notifying all interested parties 288 * that the pageflip has been completed. 289 */ 290 static void dm_pflip_high_irq(void *interrupt_params) 291 { 292 struct amdgpu_crtc *amdgpu_crtc; 293 struct common_irq_params *irq_params = interrupt_params; 294 struct amdgpu_device *adev = irq_params->adev; 295 unsigned long flags; 296 struct drm_pending_vblank_event *e; 297 struct dm_crtc_state *acrtc_state; 298 uint32_t vpos, hpos, v_blank_start, v_blank_end; 299 bool vrr_active; 300 301 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 302 303 /* IRQ could occur when in initial stage */ 304 /* TODO work and BO cleanup */ 305 if (amdgpu_crtc == NULL) { 306 DRM_DEBUG_DRIVER("CRTC is null, returning.\n"); 307 return; 308 } 309 310 spin_lock_irqsave(&adev->ddev->event_lock, flags); 311 312 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ 313 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", 314 amdgpu_crtc->pflip_status, 315 AMDGPU_FLIP_SUBMITTED, 316 amdgpu_crtc->crtc_id, 317 amdgpu_crtc); 318 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 319 return; 320 } 321 322 /* page flip completed. */ 323 e = amdgpu_crtc->event; 324 amdgpu_crtc->event = NULL; 325 326 if (!e) 327 WARN_ON(1); 328 329 acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state); 330 vrr_active = amdgpu_dm_vrr_active(acrtc_state); 331 332 /* Fixed refresh rate, or VRR scanout position outside front-porch? */ 333 if (!vrr_active || 334 !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start, 335 &v_blank_end, &hpos, &vpos) || 336 (vpos < v_blank_start)) { 337 /* Update to correct count and vblank timestamp if racing with 338 * vblank irq. This also updates to the correct vblank timestamp 339 * even in VRR mode, as scanout is past the front-porch atm. 340 */ 341 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 342 343 /* Wake up userspace by sending the pageflip event with proper 344 * count and timestamp of vblank of flip completion. 345 */ 346 if (e) { 347 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); 348 349 /* Event sent, so done with vblank for this flip */ 350 drm_crtc_vblank_put(&amdgpu_crtc->base); 351 } 352 } else if (e) { 353 /* VRR active and inside front-porch: vblank count and 354 * timestamp for pageflip event will only be up to date after 355 * drm_crtc_handle_vblank() has been executed from late vblank 356 * irq handler after start of back-porch (vline 0). We queue the 357 * pageflip event for send-out by drm_crtc_handle_vblank() with 358 * updated timestamp and count, once it runs after us. 359 * 360 * We need to open-code this instead of using the helper 361 * drm_crtc_arm_vblank_event(), as that helper would 362 * call drm_crtc_accurate_vblank_count(), which we must 363 * not call in VRR mode while we are in front-porch! 364 */ 365 366 /* sequence will be replaced by real count during send-out. */ 367 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); 368 e->pipe = amdgpu_crtc->crtc_id; 369 370 list_add_tail(&e->base.link, &adev->ddev->vblank_event_list); 371 e = NULL; 372 } 373 374 /* Keep track of vblank of this flip for flip throttling. We use the 375 * cooked hw counter, as that one incremented at start of this vblank 376 * of pageflip completion, so last_flip_vblank is the forbidden count 377 * for queueing new pageflips if vsync + VRR is enabled. 378 */ 379 amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev, 380 amdgpu_crtc->crtc_id); 381 382 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 383 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 384 385 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", 386 amdgpu_crtc->crtc_id, amdgpu_crtc, 387 vrr_active, (int) !e); 388 } 389 390 static void dm_vupdate_high_irq(void *interrupt_params) 391 { 392 struct common_irq_params *irq_params = interrupt_params; 393 struct amdgpu_device *adev = irq_params->adev; 394 struct amdgpu_crtc *acrtc; 395 struct dm_crtc_state *acrtc_state; 396 unsigned long flags; 397 398 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 399 400 if (acrtc) { 401 acrtc_state = to_dm_crtc_state(acrtc->base.state); 402 403 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, 404 amdgpu_dm_vrr_active(acrtc_state)); 405 406 /* Core vblank handling is done here after end of front-porch in 407 * vrr mode, as vblank timestamping will give valid results 408 * while now done after front-porch. This will also deliver 409 * page-flip completion events that have been queued to us 410 * if a pageflip happened inside front-porch. 411 */ 412 if (amdgpu_dm_vrr_active(acrtc_state)) { 413 drm_crtc_handle_vblank(&acrtc->base); 414 415 /* BTR processing for pre-DCE12 ASICs */ 416 if (acrtc_state->stream && 417 adev->family < AMDGPU_FAMILY_AI) { 418 spin_lock_irqsave(&adev->ddev->event_lock, flags); 419 mod_freesync_handle_v_update( 420 adev->dm.freesync_module, 421 acrtc_state->stream, 422 &acrtc_state->vrr_params); 423 424 dc_stream_adjust_vmin_vmax( 425 adev->dm.dc, 426 acrtc_state->stream, 427 &acrtc_state->vrr_params.adjust); 428 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 429 } 430 } 431 } 432 } 433 434 /** 435 * dm_crtc_high_irq() - Handles CRTC interrupt 436 * @interrupt_params: ignored 437 * 438 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK 439 * event handler. 440 */ 441 static void dm_crtc_high_irq(void *interrupt_params) 442 { 443 struct common_irq_params *irq_params = interrupt_params; 444 struct amdgpu_device *adev = irq_params->adev; 445 struct amdgpu_crtc *acrtc; 446 struct dm_crtc_state *acrtc_state; 447 unsigned long flags; 448 449 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 450 451 if (acrtc) { 452 acrtc_state = to_dm_crtc_state(acrtc->base.state); 453 454 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, 455 amdgpu_dm_vrr_active(acrtc_state)); 456 457 /* Core vblank handling at start of front-porch is only possible 458 * in non-vrr mode, as only there vblank timestamping will give 459 * valid results while done in front-porch. Otherwise defer it 460 * to dm_vupdate_high_irq after end of front-porch. 461 */ 462 if (!amdgpu_dm_vrr_active(acrtc_state)) 463 drm_crtc_handle_vblank(&acrtc->base); 464 465 /* Following stuff must happen at start of vblank, for crc 466 * computation and below-the-range btr support in vrr mode. 467 */ 468 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 469 470 if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI && 471 acrtc_state->vrr_params.supported && 472 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { 473 spin_lock_irqsave(&adev->ddev->event_lock, flags); 474 mod_freesync_handle_v_update( 475 adev->dm.freesync_module, 476 acrtc_state->stream, 477 &acrtc_state->vrr_params); 478 479 dc_stream_adjust_vmin_vmax( 480 adev->dm.dc, 481 acrtc_state->stream, 482 &acrtc_state->vrr_params.adjust); 483 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 484 } 485 } 486 } 487 488 489 /** 490 * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs 491 * @interrupt params - interrupt parameters 492 * 493 * Notify DRM's vblank event handler at VSTARTUP 494 * 495 * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which: 496 * * We are close enough to VUPDATE - the point of no return for hw 497 * * We are in the fixed portion of variable front porch when vrr is enabled 498 * * We are before VUPDATE, where double-buffered vrr registers are swapped 499 * 500 * It is therefore the correct place to signal vblank, send user flip events, 501 * and update VRR. 502 */ 503 static void dm_dcn_crtc_high_irq(void *interrupt_params) 504 { 505 struct common_irq_params *irq_params = interrupt_params; 506 struct amdgpu_device *adev = irq_params->adev; 507 struct amdgpu_crtc *acrtc; 508 struct dm_crtc_state *acrtc_state; 509 unsigned long flags; 510 511 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 512 513 if (!acrtc) 514 return; 515 516 acrtc_state = to_dm_crtc_state(acrtc->base.state); 517 518 DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id, 519 amdgpu_dm_vrr_active(acrtc_state)); 520 521 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 522 drm_crtc_handle_vblank(&acrtc->base); 523 524 spin_lock_irqsave(&adev->ddev->event_lock, flags); 525 526 if (acrtc_state->vrr_params.supported && 527 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { 528 mod_freesync_handle_v_update( 529 adev->dm.freesync_module, 530 acrtc_state->stream, 531 &acrtc_state->vrr_params); 532 533 dc_stream_adjust_vmin_vmax( 534 adev->dm.dc, 535 acrtc_state->stream, 536 &acrtc_state->vrr_params.adjust); 537 } 538 539 if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) { 540 if (acrtc->event) { 541 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); 542 acrtc->event = NULL; 543 drm_crtc_vblank_put(&acrtc->base); 544 } 545 acrtc->pflip_status = AMDGPU_FLIP_NONE; 546 } 547 548 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 549 } 550 551 static int dm_set_clockgating_state(void *handle, 552 enum amd_clockgating_state state) 553 { 554 return 0; 555 } 556 557 static int dm_set_powergating_state(void *handle, 558 enum amd_powergating_state state) 559 { 560 return 0; 561 } 562 563 /* Prototypes of private functions */ 564 static int dm_early_init(void* handle); 565 566 /* Allocate memory for FBC compressed data */ 567 static void amdgpu_dm_fbc_init(struct drm_connector *connector) 568 { 569 struct drm_device *dev = connector->dev; 570 struct amdgpu_device *adev = dev->dev_private; 571 struct dm_comressor_info *compressor = &adev->dm.compressor; 572 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); 573 struct drm_display_mode *mode; 574 unsigned long max_size = 0; 575 576 if (adev->dm.dc->fbc_compressor == NULL) 577 return; 578 579 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) 580 return; 581 582 if (compressor->bo_ptr) 583 return; 584 585 586 list_for_each_entry(mode, &connector->modes, head) { 587 if (max_size < mode->htotal * mode->vtotal) 588 max_size = mode->htotal * mode->vtotal; 589 } 590 591 if (max_size) { 592 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 593 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, 594 &compressor->gpu_addr, &compressor->cpu_addr); 595 596 if (r) 597 DRM_ERROR("DM: Failed to initialize FBC\n"); 598 else { 599 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 600 DRM_INFO("DM: FBC alloc %lu\n", max_size*4); 601 } 602 603 } 604 605 } 606 607 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, 608 int pipe, bool *enabled, 609 unsigned char *buf, int max_bytes) 610 { 611 struct drm_device *dev = dev_get_drvdata(kdev); 612 struct amdgpu_device *adev = dev->dev_private; 613 struct drm_connector *connector; 614 struct drm_connector_list_iter conn_iter; 615 struct amdgpu_dm_connector *aconnector; 616 int ret = 0; 617 618 *enabled = false; 619 620 mutex_lock(&adev->dm.audio_lock); 621 622 drm_connector_list_iter_begin(dev, &conn_iter); 623 drm_for_each_connector_iter(connector, &conn_iter) { 624 aconnector = to_amdgpu_dm_connector(connector); 625 if (aconnector->audio_inst != port) 626 continue; 627 628 *enabled = true; 629 ret = drm_eld_size(connector->eld); 630 memcpy(buf, connector->eld, min(max_bytes, ret)); 631 632 break; 633 } 634 drm_connector_list_iter_end(&conn_iter); 635 636 mutex_unlock(&adev->dm.audio_lock); 637 638 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); 639 640 return ret; 641 } 642 643 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { 644 .get_eld = amdgpu_dm_audio_component_get_eld, 645 }; 646 647 static int amdgpu_dm_audio_component_bind(struct device *kdev, 648 struct device *hda_kdev, void *data) 649 { 650 struct drm_device *dev = dev_get_drvdata(kdev); 651 struct amdgpu_device *adev = dev->dev_private; 652 struct drm_audio_component *acomp = data; 653 654 acomp->ops = &amdgpu_dm_audio_component_ops; 655 acomp->dev = kdev; 656 adev->dm.audio_component = acomp; 657 658 return 0; 659 } 660 661 static void amdgpu_dm_audio_component_unbind(struct device *kdev, 662 struct device *hda_kdev, void *data) 663 { 664 struct drm_device *dev = dev_get_drvdata(kdev); 665 struct amdgpu_device *adev = dev->dev_private; 666 struct drm_audio_component *acomp = data; 667 668 acomp->ops = NULL; 669 acomp->dev = NULL; 670 adev->dm.audio_component = NULL; 671 } 672 673 static const struct component_ops amdgpu_dm_audio_component_bind_ops = { 674 .bind = amdgpu_dm_audio_component_bind, 675 .unbind = amdgpu_dm_audio_component_unbind, 676 }; 677 678 static int amdgpu_dm_audio_init(struct amdgpu_device *adev) 679 { 680 int i, ret; 681 682 if (!amdgpu_audio) 683 return 0; 684 685 adev->mode_info.audio.enabled = true; 686 687 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; 688 689 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 690 adev->mode_info.audio.pin[i].channels = -1; 691 adev->mode_info.audio.pin[i].rate = -1; 692 adev->mode_info.audio.pin[i].bits_per_sample = -1; 693 adev->mode_info.audio.pin[i].status_bits = 0; 694 adev->mode_info.audio.pin[i].category_code = 0; 695 adev->mode_info.audio.pin[i].connected = false; 696 adev->mode_info.audio.pin[i].id = 697 adev->dm.dc->res_pool->audios[i]->inst; 698 adev->mode_info.audio.pin[i].offset = 0; 699 } 700 701 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); 702 if (ret < 0) 703 return ret; 704 705 adev->dm.audio_registered = true; 706 707 return 0; 708 } 709 710 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) 711 { 712 if (!amdgpu_audio) 713 return; 714 715 if (!adev->mode_info.audio.enabled) 716 return; 717 718 if (adev->dm.audio_registered) { 719 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); 720 adev->dm.audio_registered = false; 721 } 722 723 /* TODO: Disable audio? */ 724 725 adev->mode_info.audio.enabled = false; 726 } 727 728 void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) 729 { 730 struct drm_audio_component *acomp = adev->dm.audio_component; 731 732 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { 733 DRM_DEBUG_KMS("Notify ELD: %d\n", pin); 734 735 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 736 pin, -1); 737 } 738 } 739 740 static int dm_dmub_hw_init(struct amdgpu_device *adev) 741 { 742 const unsigned int psp_header_bytes = 0x100; 743 const unsigned int psp_footer_bytes = 0x100; 744 const struct dmcub_firmware_header_v1_0 *hdr; 745 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 746 const struct firmware *dmub_fw = adev->dm.dmub_fw; 747 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 748 struct abm *abm = adev->dm.dc->res_pool->abm; 749 struct dmub_srv_region_params region_params; 750 struct dmub_srv_region_info region_info; 751 struct dmub_srv_fb_params fb_params; 752 struct dmub_srv_fb_info fb_info; 753 struct dmub_srv_hw_params hw_params; 754 enum dmub_status status; 755 const unsigned char *fw_inst_const, *fw_bss_data; 756 uint32_t i; 757 int r; 758 bool has_hw_support; 759 760 if (!dmub_srv) 761 /* DMUB isn't supported on the ASIC. */ 762 return 0; 763 764 if (!dmub_fw) { 765 /* Firmware required for DMUB support. */ 766 DRM_ERROR("No firmware provided for DMUB.\n"); 767 return -EINVAL; 768 } 769 770 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); 771 if (status != DMUB_STATUS_OK) { 772 DRM_ERROR("Error checking HW support for DMUB: %d\n", status); 773 return -EINVAL; 774 } 775 776 if (!has_hw_support) { 777 DRM_INFO("DMUB unsupported on ASIC\n"); 778 return 0; 779 } 780 781 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 782 783 /* Calculate the size of all the regions for the DMUB service. */ 784 memset(®ion_params, 0, sizeof(region_params)); 785 786 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 787 psp_header_bytes - psp_footer_bytes; 788 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 789 region_params.vbios_size = adev->dm.dc->ctx->dc_bios->bios_size; 790 791 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, 792 ®ion_info); 793 794 if (status != DMUB_STATUS_OK) { 795 DRM_ERROR("Error calculating DMUB region info: %d\n", status); 796 return -EINVAL; 797 } 798 799 /* 800 * Allocate a framebuffer based on the total size of all the regions. 801 * TODO: Move this into GART. 802 */ 803 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 804 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, 805 &adev->dm.dmub_bo_gpu_addr, 806 &adev->dm.dmub_bo_cpu_addr); 807 if (r) 808 return r; 809 810 /* Rebase the regions on the framebuffer address. */ 811 memset(&fb_params, 0, sizeof(fb_params)); 812 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; 813 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; 814 fb_params.region_info = ®ion_info; 815 816 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, &fb_info); 817 if (status != DMUB_STATUS_OK) { 818 DRM_ERROR("Error calculating DMUB FB info: %d\n", status); 819 return -EINVAL; 820 } 821 822 fw_inst_const = dmub_fw->data + 823 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 824 psp_header_bytes; 825 826 fw_bss_data = dmub_fw->data + 827 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 828 le32_to_cpu(hdr->inst_const_bytes); 829 830 /* Copy firmware and bios info into FB memory. */ 831 memcpy(fb_info.fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 832 region_params.inst_const_size); 833 memcpy(fb_info.fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data, 834 region_params.bss_data_size); 835 memcpy(fb_info.fb[DMUB_WINDOW_3_VBIOS].cpu_addr, 836 adev->dm.dc->ctx->dc_bios->bios, region_params.vbios_size); 837 838 /* Initialize hardware. */ 839 memset(&hw_params, 0, sizeof(hw_params)); 840 hw_params.fb_base = adev->gmc.fb_start; 841 hw_params.fb_offset = adev->gmc.aper_base; 842 843 if (dmcu) 844 hw_params.psp_version = dmcu->psp_version; 845 846 for (i = 0; i < fb_info.num_fb; ++i) 847 hw_params.fb[i] = &fb_info.fb[i]; 848 849 status = dmub_srv_hw_init(dmub_srv, &hw_params); 850 if (status != DMUB_STATUS_OK) { 851 DRM_ERROR("Error initializing DMUB HW: %d\n", status); 852 return -EINVAL; 853 } 854 855 /* Wait for firmware load to finish. */ 856 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 857 if (status != DMUB_STATUS_OK) 858 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 859 860 /* Init DMCU and ABM if available. */ 861 if (dmcu && abm) { 862 dmcu->funcs->dmcu_init(dmcu); 863 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 864 } 865 866 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); 867 if (!adev->dm.dc->ctx->dmub_srv) { 868 DRM_ERROR("Couldn't allocate DC DMUB server!\n"); 869 return -ENOMEM; 870 } 871 872 DRM_INFO("DMUB hardware initialized: version=0x%08X\n", 873 adev->dm.dmcub_fw_version); 874 875 return 0; 876 } 877 878 static int amdgpu_dm_init(struct amdgpu_device *adev) 879 { 880 struct dc_init_data init_data; 881 #ifdef CONFIG_DRM_AMD_DC_HDCP 882 struct dc_callback_init init_params; 883 #endif 884 int r; 885 886 adev->dm.ddev = adev->ddev; 887 adev->dm.adev = adev; 888 889 /* Zero all the fields */ 890 memset(&init_data, 0, sizeof(init_data)); 891 #ifdef CONFIG_DRM_AMD_DC_HDCP 892 memset(&init_params, 0, sizeof(init_params)); 893 #endif 894 895 mutex_init(&adev->dm.dc_lock); 896 mutex_init(&adev->dm.audio_lock); 897 898 if(amdgpu_dm_irq_init(adev)) { 899 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); 900 goto error; 901 } 902 903 init_data.asic_id.chip_family = adev->family; 904 905 init_data.asic_id.pci_revision_id = adev->rev_id; 906 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 907 908 init_data.asic_id.vram_width = adev->gmc.vram_width; 909 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 910 init_data.asic_id.atombios_base_address = 911 adev->mode_info.atom_context->bios; 912 913 init_data.driver = adev; 914 915 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 916 917 if (!adev->dm.cgs_device) { 918 DRM_ERROR("amdgpu: failed to create cgs device.\n"); 919 goto error; 920 } 921 922 init_data.cgs_device = adev->dm.cgs_device; 923 924 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 925 926 /* 927 * TODO debug why this doesn't work on Raven 928 */ 929 if (adev->flags & AMD_IS_APU && 930 adev->asic_type >= CHIP_CARRIZO && 931 adev->asic_type <= CHIP_RAVEN) 932 init_data.flags.gpu_vm_support = true; 933 934 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 935 init_data.flags.fbc_support = true; 936 937 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) 938 init_data.flags.multi_mon_pp_mclk_switch = true; 939 940 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) 941 init_data.flags.disable_fractional_pwm = true; 942 943 init_data.flags.power_down_display_on_boot = true; 944 945 init_data.soc_bounding_box = adev->dm.soc_bounding_box; 946 947 /* Display Core create. */ 948 adev->dm.dc = dc_create(&init_data); 949 950 if (adev->dm.dc) { 951 DRM_INFO("Display Core initialized with v%s!\n", DC_VER); 952 } else { 953 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); 954 goto error; 955 } 956 957 dc_hardware_init(adev->dm.dc); 958 959 r = dm_dmub_hw_init(adev); 960 if (r) { 961 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 962 goto error; 963 } 964 965 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 966 if (!adev->dm.freesync_module) { 967 DRM_ERROR( 968 "amdgpu: failed to initialize freesync_module.\n"); 969 } else 970 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", 971 adev->dm.freesync_module); 972 973 amdgpu_dm_init_color_mod(); 974 975 #ifdef CONFIG_DRM_AMD_DC_HDCP 976 if (adev->asic_type >= CHIP_RAVEN) { 977 adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc); 978 979 if (!adev->dm.hdcp_workqueue) 980 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); 981 else 982 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); 983 984 dc_init_callbacks(adev->dm.dc, &init_params); 985 } 986 #endif 987 if (amdgpu_dm_initialize_drm_device(adev)) { 988 DRM_ERROR( 989 "amdgpu: failed to initialize sw for display support.\n"); 990 goto error; 991 } 992 993 /* Update the actual used number of crtc */ 994 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 995 996 /* TODO: Add_display_info? */ 997 998 /* TODO use dynamic cursor width */ 999 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 1000 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 1001 1002 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) { 1003 DRM_ERROR( 1004 "amdgpu: failed to initialize sw for display support.\n"); 1005 goto error; 1006 } 1007 1008 #if defined(CONFIG_DEBUG_FS) 1009 if (dtn_debugfs_init(adev)) 1010 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n"); 1011 #endif 1012 1013 DRM_DEBUG_DRIVER("KMS initialized.\n"); 1014 1015 return 0; 1016 error: 1017 amdgpu_dm_fini(adev); 1018 1019 return -EINVAL; 1020 } 1021 1022 static void amdgpu_dm_fini(struct amdgpu_device *adev) 1023 { 1024 amdgpu_dm_audio_fini(adev); 1025 1026 amdgpu_dm_destroy_drm_device(&adev->dm); 1027 1028 #ifdef CONFIG_DRM_AMD_DC_HDCP 1029 if (adev->dm.hdcp_workqueue) { 1030 hdcp_destroy(adev->dm.hdcp_workqueue); 1031 adev->dm.hdcp_workqueue = NULL; 1032 } 1033 1034 if (adev->dm.dc) 1035 dc_deinit_callbacks(adev->dm.dc); 1036 #endif 1037 if (adev->dm.dc->ctx->dmub_srv) { 1038 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 1039 adev->dm.dc->ctx->dmub_srv = NULL; 1040 } 1041 1042 if (adev->dm.dmub_bo) 1043 amdgpu_bo_free_kernel(&adev->dm.dmub_bo, 1044 &adev->dm.dmub_bo_gpu_addr, 1045 &adev->dm.dmub_bo_cpu_addr); 1046 1047 /* DC Destroy TODO: Replace destroy DAL */ 1048 if (adev->dm.dc) 1049 dc_destroy(&adev->dm.dc); 1050 /* 1051 * TODO: pageflip, vlank interrupt 1052 * 1053 * amdgpu_dm_irq_fini(adev); 1054 */ 1055 1056 if (adev->dm.cgs_device) { 1057 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 1058 adev->dm.cgs_device = NULL; 1059 } 1060 if (adev->dm.freesync_module) { 1061 mod_freesync_destroy(adev->dm.freesync_module); 1062 adev->dm.freesync_module = NULL; 1063 } 1064 1065 mutex_destroy(&adev->dm.audio_lock); 1066 mutex_destroy(&adev->dm.dc_lock); 1067 1068 return; 1069 } 1070 1071 static int load_dmcu_fw(struct amdgpu_device *adev) 1072 { 1073 const char *fw_name_dmcu = NULL; 1074 int r; 1075 const struct dmcu_firmware_header_v1_0 *hdr; 1076 1077 switch(adev->asic_type) { 1078 case CHIP_BONAIRE: 1079 case CHIP_HAWAII: 1080 case CHIP_KAVERI: 1081 case CHIP_KABINI: 1082 case CHIP_MULLINS: 1083 case CHIP_TONGA: 1084 case CHIP_FIJI: 1085 case CHIP_CARRIZO: 1086 case CHIP_STONEY: 1087 case CHIP_POLARIS11: 1088 case CHIP_POLARIS10: 1089 case CHIP_POLARIS12: 1090 case CHIP_VEGAM: 1091 case CHIP_VEGA10: 1092 case CHIP_VEGA12: 1093 case CHIP_VEGA20: 1094 case CHIP_NAVI10: 1095 case CHIP_NAVI14: 1096 case CHIP_NAVI12: 1097 case CHIP_RENOIR: 1098 return 0; 1099 case CHIP_RAVEN: 1100 if (ASICREV_IS_PICASSO(adev->external_rev_id)) 1101 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1102 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) 1103 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1104 else 1105 return 0; 1106 break; 1107 default: 1108 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 1109 return -EINVAL; 1110 } 1111 1112 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1113 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); 1114 return 0; 1115 } 1116 1117 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev); 1118 if (r == -ENOENT) { 1119 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ 1120 DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); 1121 adev->dm.fw_dmcu = NULL; 1122 return 0; 1123 } 1124 if (r) { 1125 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n", 1126 fw_name_dmcu); 1127 return r; 1128 } 1129 1130 r = amdgpu_ucode_validate(adev->dm.fw_dmcu); 1131 if (r) { 1132 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", 1133 fw_name_dmcu); 1134 release_firmware(adev->dm.fw_dmcu); 1135 adev->dm.fw_dmcu = NULL; 1136 return r; 1137 } 1138 1139 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; 1140 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; 1141 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; 1142 adev->firmware.fw_size += 1143 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 1144 1145 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; 1146 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; 1147 adev->firmware.fw_size += 1148 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 1149 1150 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); 1151 1152 DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); 1153 1154 return 0; 1155 } 1156 1157 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) 1158 { 1159 struct amdgpu_device *adev = ctx; 1160 1161 return dm_read_reg(adev->dm.dc->ctx, address); 1162 } 1163 1164 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, 1165 uint32_t value) 1166 { 1167 struct amdgpu_device *adev = ctx; 1168 1169 return dm_write_reg(adev->dm.dc->ctx, address, value); 1170 } 1171 1172 static int dm_dmub_sw_init(struct amdgpu_device *adev) 1173 { 1174 struct dmub_srv_create_params create_params; 1175 const struct dmcub_firmware_header_v1_0 *hdr; 1176 const char *fw_name_dmub; 1177 enum dmub_asic dmub_asic; 1178 enum dmub_status status; 1179 int r; 1180 1181 switch (adev->asic_type) { 1182 case CHIP_RENOIR: 1183 dmub_asic = DMUB_ASIC_DCN21; 1184 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 1185 break; 1186 1187 default: 1188 /* ASIC doesn't support DMUB. */ 1189 return 0; 1190 } 1191 1192 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 1193 if (!adev->dm.dmub_srv) { 1194 DRM_ERROR("Failed to allocate DMUB service!\n"); 1195 return -ENOMEM; 1196 } 1197 1198 memset(&create_params, 0, sizeof(create_params)); 1199 create_params.user_ctx = adev; 1200 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 1201 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 1202 create_params.asic = dmub_asic; 1203 1204 status = dmub_srv_create(adev->dm.dmub_srv, &create_params); 1205 if (status != DMUB_STATUS_OK) { 1206 DRM_ERROR("Error creating DMUB service: %d\n", status); 1207 return -EINVAL; 1208 } 1209 1210 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); 1211 if (r) { 1212 DRM_ERROR("DMUB firmware loading failed: %d\n", r); 1213 return 0; 1214 } 1215 1216 r = amdgpu_ucode_validate(adev->dm.dmub_fw); 1217 if (r) { 1218 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r); 1219 return 0; 1220 } 1221 1222 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1223 DRM_WARN("Only PSP firmware loading is supported for DMUB\n"); 1224 return 0; 1225 } 1226 1227 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; 1228 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = 1229 AMDGPU_UCODE_ID_DMCUB; 1230 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw; 1231 adev->firmware.fw_size += 1232 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); 1233 1234 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); 1235 1236 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", 1237 adev->dm.dmcub_fw_version); 1238 1239 return 0; 1240 } 1241 1242 static int dm_sw_init(void *handle) 1243 { 1244 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1245 int r; 1246 1247 r = dm_dmub_sw_init(adev); 1248 if (r) 1249 return r; 1250 1251 return load_dmcu_fw(adev); 1252 } 1253 1254 static int dm_sw_fini(void *handle) 1255 { 1256 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1257 1258 if (adev->dm.dmub_srv) { 1259 dmub_srv_destroy(adev->dm.dmub_srv); 1260 adev->dm.dmub_srv = NULL; 1261 } 1262 1263 if (adev->dm.dmub_fw) { 1264 release_firmware(adev->dm.dmub_fw); 1265 adev->dm.dmub_fw = NULL; 1266 } 1267 1268 if(adev->dm.fw_dmcu) { 1269 release_firmware(adev->dm.fw_dmcu); 1270 adev->dm.fw_dmcu = NULL; 1271 } 1272 1273 return 0; 1274 } 1275 1276 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 1277 { 1278 struct amdgpu_dm_connector *aconnector; 1279 struct drm_connector *connector; 1280 struct drm_connector_list_iter iter; 1281 int ret = 0; 1282 1283 drm_connector_list_iter_begin(dev, &iter); 1284 drm_for_each_connector_iter(connector, &iter) { 1285 aconnector = to_amdgpu_dm_connector(connector); 1286 if (aconnector->dc_link->type == dc_connection_mst_branch && 1287 aconnector->mst_mgr.aux) { 1288 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", 1289 aconnector, 1290 aconnector->base.base.id); 1291 1292 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 1293 if (ret < 0) { 1294 DRM_ERROR("DM_MST: Failed to start MST\n"); 1295 aconnector->dc_link->type = 1296 dc_connection_single; 1297 break; 1298 } 1299 } 1300 } 1301 drm_connector_list_iter_end(&iter); 1302 1303 return ret; 1304 } 1305 1306 static int dm_late_init(void *handle) 1307 { 1308 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1309 1310 struct dmcu_iram_parameters params; 1311 unsigned int linear_lut[16]; 1312 int i; 1313 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 1314 bool ret = false; 1315 1316 for (i = 0; i < 16; i++) 1317 linear_lut[i] = 0xFFFF * i / 15; 1318 1319 params.set = 0; 1320 params.backlight_ramping_start = 0xCCCC; 1321 params.backlight_ramping_reduction = 0xCCCCCCCC; 1322 params.backlight_lut_array_size = 16; 1323 params.backlight_lut_array = linear_lut; 1324 1325 /* Min backlight level after ABM reduction, Don't allow below 1% 1326 * 0xFFFF x 0.01 = 0x28F 1327 */ 1328 params.min_abm_backlight = 0x28F; 1329 1330 /* todo will enable for navi10 */ 1331 if (adev->asic_type <= CHIP_RAVEN) { 1332 ret = dmcu_load_iram(dmcu, params); 1333 1334 if (!ret) 1335 return -EINVAL; 1336 } 1337 1338 return detect_mst_link_for_all_connectors(adev->ddev); 1339 } 1340 1341 static void s3_handle_mst(struct drm_device *dev, bool suspend) 1342 { 1343 struct amdgpu_dm_connector *aconnector; 1344 struct drm_connector *connector; 1345 struct drm_connector_list_iter iter; 1346 struct drm_dp_mst_topology_mgr *mgr; 1347 int ret; 1348 bool need_hotplug = false; 1349 1350 drm_connector_list_iter_begin(dev, &iter); 1351 drm_for_each_connector_iter(connector, &iter) { 1352 aconnector = to_amdgpu_dm_connector(connector); 1353 if (aconnector->dc_link->type != dc_connection_mst_branch || 1354 aconnector->mst_port) 1355 continue; 1356 1357 mgr = &aconnector->mst_mgr; 1358 1359 if (suspend) { 1360 drm_dp_mst_topology_mgr_suspend(mgr); 1361 } else { 1362 ret = drm_dp_mst_topology_mgr_resume(mgr, true); 1363 if (ret < 0) { 1364 drm_dp_mst_topology_mgr_set_mst(mgr, false); 1365 need_hotplug = true; 1366 } 1367 } 1368 } 1369 drm_connector_list_iter_end(&iter); 1370 1371 if (need_hotplug) 1372 drm_kms_helper_hotplug_event(dev); 1373 } 1374 1375 /** 1376 * dm_hw_init() - Initialize DC device 1377 * @handle: The base driver device containing the amdgpu_dm device. 1378 * 1379 * Initialize the &struct amdgpu_display_manager device. This involves calling 1380 * the initializers of each DM component, then populating the struct with them. 1381 * 1382 * Although the function implies hardware initialization, both hardware and 1383 * software are initialized here. Splitting them out to their relevant init 1384 * hooks is a future TODO item. 1385 * 1386 * Some notable things that are initialized here: 1387 * 1388 * - Display Core, both software and hardware 1389 * - DC modules that we need (freesync and color management) 1390 * - DRM software states 1391 * - Interrupt sources and handlers 1392 * - Vblank support 1393 * - Debug FS entries, if enabled 1394 */ 1395 static int dm_hw_init(void *handle) 1396 { 1397 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1398 /* Create DAL display manager */ 1399 amdgpu_dm_init(adev); 1400 amdgpu_dm_hpd_init(adev); 1401 1402 return 0; 1403 } 1404 1405 /** 1406 * dm_hw_fini() - Teardown DC device 1407 * @handle: The base driver device containing the amdgpu_dm device. 1408 * 1409 * Teardown components within &struct amdgpu_display_manager that require 1410 * cleanup. This involves cleaning up the DRM device, DC, and any modules that 1411 * were loaded. Also flush IRQ workqueues and disable them. 1412 */ 1413 static int dm_hw_fini(void *handle) 1414 { 1415 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1416 1417 amdgpu_dm_hpd_fini(adev); 1418 1419 amdgpu_dm_irq_fini(adev); 1420 amdgpu_dm_fini(adev); 1421 return 0; 1422 } 1423 1424 static int dm_suspend(void *handle) 1425 { 1426 struct amdgpu_device *adev = handle; 1427 struct amdgpu_display_manager *dm = &adev->dm; 1428 int ret = 0; 1429 1430 WARN_ON(adev->dm.cached_state); 1431 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); 1432 1433 s3_handle_mst(adev->ddev, true); 1434 1435 amdgpu_dm_irq_suspend(adev); 1436 1437 1438 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 1439 1440 return ret; 1441 } 1442 1443 static struct amdgpu_dm_connector * 1444 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 1445 struct drm_crtc *crtc) 1446 { 1447 uint32_t i; 1448 struct drm_connector_state *new_con_state; 1449 struct drm_connector *connector; 1450 struct drm_crtc *crtc_from_state; 1451 1452 for_each_new_connector_in_state(state, connector, new_con_state, i) { 1453 crtc_from_state = new_con_state->crtc; 1454 1455 if (crtc_from_state == crtc) 1456 return to_amdgpu_dm_connector(connector); 1457 } 1458 1459 return NULL; 1460 } 1461 1462 static void emulated_link_detect(struct dc_link *link) 1463 { 1464 struct dc_sink_init_data sink_init_data = { 0 }; 1465 struct display_sink_capability sink_caps = { 0 }; 1466 enum dc_edid_status edid_status; 1467 struct dc_context *dc_ctx = link->ctx; 1468 struct dc_sink *sink = NULL; 1469 struct dc_sink *prev_sink = NULL; 1470 1471 link->type = dc_connection_none; 1472 prev_sink = link->local_sink; 1473 1474 if (prev_sink != NULL) 1475 dc_sink_retain(prev_sink); 1476 1477 switch (link->connector_signal) { 1478 case SIGNAL_TYPE_HDMI_TYPE_A: { 1479 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 1480 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; 1481 break; 1482 } 1483 1484 case SIGNAL_TYPE_DVI_SINGLE_LINK: { 1485 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 1486 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 1487 break; 1488 } 1489 1490 case SIGNAL_TYPE_DVI_DUAL_LINK: { 1491 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 1492 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; 1493 break; 1494 } 1495 1496 case SIGNAL_TYPE_LVDS: { 1497 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 1498 sink_caps.signal = SIGNAL_TYPE_LVDS; 1499 break; 1500 } 1501 1502 case SIGNAL_TYPE_EDP: { 1503 sink_caps.transaction_type = 1504 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 1505 sink_caps.signal = SIGNAL_TYPE_EDP; 1506 break; 1507 } 1508 1509 case SIGNAL_TYPE_DISPLAY_PORT: { 1510 sink_caps.transaction_type = 1511 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 1512 sink_caps.signal = SIGNAL_TYPE_VIRTUAL; 1513 break; 1514 } 1515 1516 default: 1517 DC_ERROR("Invalid connector type! signal:%d\n", 1518 link->connector_signal); 1519 return; 1520 } 1521 1522 sink_init_data.link = link; 1523 sink_init_data.sink_signal = sink_caps.signal; 1524 1525 sink = dc_sink_create(&sink_init_data); 1526 if (!sink) { 1527 DC_ERROR("Failed to create sink!\n"); 1528 return; 1529 } 1530 1531 /* dc_sink_create returns a new reference */ 1532 link->local_sink = sink; 1533 1534 edid_status = dm_helpers_read_local_edid( 1535 link->ctx, 1536 link, 1537 sink); 1538 1539 if (edid_status != EDID_OK) 1540 DC_ERROR("Failed to read EDID"); 1541 1542 } 1543 1544 static int dm_resume(void *handle) 1545 { 1546 struct amdgpu_device *adev = handle; 1547 struct drm_device *ddev = adev->ddev; 1548 struct amdgpu_display_manager *dm = &adev->dm; 1549 struct amdgpu_dm_connector *aconnector; 1550 struct drm_connector *connector; 1551 struct drm_connector_list_iter iter; 1552 struct drm_crtc *crtc; 1553 struct drm_crtc_state *new_crtc_state; 1554 struct dm_crtc_state *dm_new_crtc_state; 1555 struct drm_plane *plane; 1556 struct drm_plane_state *new_plane_state; 1557 struct dm_plane_state *dm_new_plane_state; 1558 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 1559 enum dc_connection_type new_connection_type = dc_connection_none; 1560 int i; 1561 1562 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 1563 dc_release_state(dm_state->context); 1564 dm_state->context = dc_create_state(dm->dc); 1565 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 1566 dc_resource_state_construct(dm->dc, dm_state->context); 1567 1568 /* power on hardware */ 1569 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 1570 1571 /* program HPD filter */ 1572 dc_resume(dm->dc); 1573 1574 /* 1575 * early enable HPD Rx IRQ, should be done before set mode as short 1576 * pulse interrupts are used for MST 1577 */ 1578 amdgpu_dm_irq_resume_early(adev); 1579 1580 /* On resume we need to rewrite the MSTM control bits to enable MST*/ 1581 s3_handle_mst(ddev, false); 1582 1583 /* Do detection*/ 1584 drm_connector_list_iter_begin(ddev, &iter); 1585 drm_for_each_connector_iter(connector, &iter) { 1586 aconnector = to_amdgpu_dm_connector(connector); 1587 1588 /* 1589 * this is the case when traversing through already created 1590 * MST connectors, should be skipped 1591 */ 1592 if (aconnector->mst_port) 1593 continue; 1594 1595 mutex_lock(&aconnector->hpd_lock); 1596 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) 1597 DRM_ERROR("KMS: Failed to detect connector\n"); 1598 1599 if (aconnector->base.force && new_connection_type == dc_connection_none) 1600 emulated_link_detect(aconnector->dc_link); 1601 else 1602 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 1603 1604 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 1605 aconnector->fake_enable = false; 1606 1607 if (aconnector->dc_sink) 1608 dc_sink_release(aconnector->dc_sink); 1609 aconnector->dc_sink = NULL; 1610 amdgpu_dm_update_connector_after_detect(aconnector); 1611 mutex_unlock(&aconnector->hpd_lock); 1612 } 1613 drm_connector_list_iter_end(&iter); 1614 1615 /* Force mode set in atomic commit */ 1616 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) 1617 new_crtc_state->active_changed = true; 1618 1619 /* 1620 * atomic_check is expected to create the dc states. We need to release 1621 * them here, since they were duplicated as part of the suspend 1622 * procedure. 1623 */ 1624 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 1625 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 1626 if (dm_new_crtc_state->stream) { 1627 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 1628 dc_stream_release(dm_new_crtc_state->stream); 1629 dm_new_crtc_state->stream = NULL; 1630 } 1631 } 1632 1633 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { 1634 dm_new_plane_state = to_dm_plane_state(new_plane_state); 1635 if (dm_new_plane_state->dc_state) { 1636 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 1637 dc_plane_state_release(dm_new_plane_state->dc_state); 1638 dm_new_plane_state->dc_state = NULL; 1639 } 1640 } 1641 1642 drm_atomic_helper_resume(ddev, dm->cached_state); 1643 1644 dm->cached_state = NULL; 1645 1646 amdgpu_dm_irq_resume_late(adev); 1647 1648 return 0; 1649 } 1650 1651 /** 1652 * DOC: DM Lifecycle 1653 * 1654 * DM (and consequently DC) is registered in the amdgpu base driver as a IP 1655 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to 1656 * the base driver's device list to be initialized and torn down accordingly. 1657 * 1658 * The functions to do so are provided as hooks in &struct amd_ip_funcs. 1659 */ 1660 1661 static const struct amd_ip_funcs amdgpu_dm_funcs = { 1662 .name = "dm", 1663 .early_init = dm_early_init, 1664 .late_init = dm_late_init, 1665 .sw_init = dm_sw_init, 1666 .sw_fini = dm_sw_fini, 1667 .hw_init = dm_hw_init, 1668 .hw_fini = dm_hw_fini, 1669 .suspend = dm_suspend, 1670 .resume = dm_resume, 1671 .is_idle = dm_is_idle, 1672 .wait_for_idle = dm_wait_for_idle, 1673 .check_soft_reset = dm_check_soft_reset, 1674 .soft_reset = dm_soft_reset, 1675 .set_clockgating_state = dm_set_clockgating_state, 1676 .set_powergating_state = dm_set_powergating_state, 1677 }; 1678 1679 const struct amdgpu_ip_block_version dm_ip_block = 1680 { 1681 .type = AMD_IP_BLOCK_TYPE_DCE, 1682 .major = 1, 1683 .minor = 0, 1684 .rev = 0, 1685 .funcs = &amdgpu_dm_funcs, 1686 }; 1687 1688 1689 /** 1690 * DOC: atomic 1691 * 1692 * *WIP* 1693 */ 1694 1695 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 1696 .fb_create = amdgpu_display_user_framebuffer_create, 1697 .output_poll_changed = drm_fb_helper_output_poll_changed, 1698 .atomic_check = amdgpu_dm_atomic_check, 1699 .atomic_commit = amdgpu_dm_atomic_commit, 1700 }; 1701 1702 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 1703 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail 1704 }; 1705 1706 static void 1707 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) 1708 { 1709 struct drm_connector *connector = &aconnector->base; 1710 struct drm_device *dev = connector->dev; 1711 struct dc_sink *sink; 1712 1713 /* MST handled by drm_mst framework */ 1714 if (aconnector->mst_mgr.mst_state == true) 1715 return; 1716 1717 1718 sink = aconnector->dc_link->local_sink; 1719 if (sink) 1720 dc_sink_retain(sink); 1721 1722 /* 1723 * Edid mgmt connector gets first update only in mode_valid hook and then 1724 * the connector sink is set to either fake or physical sink depends on link status. 1725 * Skip if already done during boot. 1726 */ 1727 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 1728 && aconnector->dc_em_sink) { 1729 1730 /* 1731 * For S3 resume with headless use eml_sink to fake stream 1732 * because on resume connector->sink is set to NULL 1733 */ 1734 mutex_lock(&dev->mode_config.mutex); 1735 1736 if (sink) { 1737 if (aconnector->dc_sink) { 1738 amdgpu_dm_update_freesync_caps(connector, NULL); 1739 /* 1740 * retain and release below are used to 1741 * bump up refcount for sink because the link doesn't point 1742 * to it anymore after disconnect, so on next crtc to connector 1743 * reshuffle by UMD we will get into unwanted dc_sink release 1744 */ 1745 dc_sink_release(aconnector->dc_sink); 1746 } 1747 aconnector->dc_sink = sink; 1748 dc_sink_retain(aconnector->dc_sink); 1749 amdgpu_dm_update_freesync_caps(connector, 1750 aconnector->edid); 1751 } else { 1752 amdgpu_dm_update_freesync_caps(connector, NULL); 1753 if (!aconnector->dc_sink) { 1754 aconnector->dc_sink = aconnector->dc_em_sink; 1755 dc_sink_retain(aconnector->dc_sink); 1756 } 1757 } 1758 1759 mutex_unlock(&dev->mode_config.mutex); 1760 1761 if (sink) 1762 dc_sink_release(sink); 1763 return; 1764 } 1765 1766 /* 1767 * TODO: temporary guard to look for proper fix 1768 * if this sink is MST sink, we should not do anything 1769 */ 1770 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 1771 dc_sink_release(sink); 1772 return; 1773 } 1774 1775 if (aconnector->dc_sink == sink) { 1776 /* 1777 * We got a DP short pulse (Link Loss, DP CTS, etc...). 1778 * Do nothing!! 1779 */ 1780 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", 1781 aconnector->connector_id); 1782 if (sink) 1783 dc_sink_release(sink); 1784 return; 1785 } 1786 1787 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 1788 aconnector->connector_id, aconnector->dc_sink, sink); 1789 1790 mutex_lock(&dev->mode_config.mutex); 1791 1792 /* 1793 * 1. Update status of the drm connector 1794 * 2. Send an event and let userspace tell us what to do 1795 */ 1796 if (sink) { 1797 /* 1798 * TODO: check if we still need the S3 mode update workaround. 1799 * If yes, put it here. 1800 */ 1801 if (aconnector->dc_sink) 1802 amdgpu_dm_update_freesync_caps(connector, NULL); 1803 1804 aconnector->dc_sink = sink; 1805 dc_sink_retain(aconnector->dc_sink); 1806 if (sink->dc_edid.length == 0) { 1807 aconnector->edid = NULL; 1808 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 1809 } else { 1810 aconnector->edid = 1811 (struct edid *) sink->dc_edid.raw_edid; 1812 1813 1814 drm_connector_update_edid_property(connector, 1815 aconnector->edid); 1816 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, 1817 aconnector->edid); 1818 } 1819 amdgpu_dm_update_freesync_caps(connector, aconnector->edid); 1820 1821 } else { 1822 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 1823 amdgpu_dm_update_freesync_caps(connector, NULL); 1824 drm_connector_update_edid_property(connector, NULL); 1825 aconnector->num_modes = 0; 1826 dc_sink_release(aconnector->dc_sink); 1827 aconnector->dc_sink = NULL; 1828 aconnector->edid = NULL; 1829 #ifdef CONFIG_DRM_AMD_DC_HDCP 1830 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ 1831 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 1832 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 1833 #endif 1834 } 1835 1836 mutex_unlock(&dev->mode_config.mutex); 1837 1838 if (sink) 1839 dc_sink_release(sink); 1840 } 1841 1842 static void handle_hpd_irq(void *param) 1843 { 1844 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 1845 struct drm_connector *connector = &aconnector->base; 1846 struct drm_device *dev = connector->dev; 1847 enum dc_connection_type new_connection_type = dc_connection_none; 1848 #ifdef CONFIG_DRM_AMD_DC_HDCP 1849 struct amdgpu_device *adev = dev->dev_private; 1850 #endif 1851 1852 /* 1853 * In case of failure or MST no need to update connector status or notify the OS 1854 * since (for MST case) MST does this in its own context. 1855 */ 1856 mutex_lock(&aconnector->hpd_lock); 1857 1858 #ifdef CONFIG_DRM_AMD_DC_HDCP 1859 if (adev->asic_type >= CHIP_RAVEN) 1860 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 1861 #endif 1862 if (aconnector->fake_enable) 1863 aconnector->fake_enable = false; 1864 1865 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) 1866 DRM_ERROR("KMS: Failed to detect connector\n"); 1867 1868 if (aconnector->base.force && new_connection_type == dc_connection_none) { 1869 emulated_link_detect(aconnector->dc_link); 1870 1871 1872 drm_modeset_lock_all(dev); 1873 dm_restore_drm_connector_state(dev, connector); 1874 drm_modeset_unlock_all(dev); 1875 1876 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 1877 drm_kms_helper_hotplug_event(dev); 1878 1879 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { 1880 amdgpu_dm_update_connector_after_detect(aconnector); 1881 1882 1883 drm_modeset_lock_all(dev); 1884 dm_restore_drm_connector_state(dev, connector); 1885 drm_modeset_unlock_all(dev); 1886 1887 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 1888 drm_kms_helper_hotplug_event(dev); 1889 } 1890 mutex_unlock(&aconnector->hpd_lock); 1891 1892 } 1893 1894 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) 1895 { 1896 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; 1897 uint8_t dret; 1898 bool new_irq_handled = false; 1899 int dpcd_addr; 1900 int dpcd_bytes_to_read; 1901 1902 const int max_process_count = 30; 1903 int process_count = 0; 1904 1905 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); 1906 1907 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { 1908 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; 1909 /* DPCD 0x200 - 0x201 for downstream IRQ */ 1910 dpcd_addr = DP_SINK_COUNT; 1911 } else { 1912 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; 1913 /* DPCD 0x2002 - 0x2005 for downstream IRQ */ 1914 dpcd_addr = DP_SINK_COUNT_ESI; 1915 } 1916 1917 dret = drm_dp_dpcd_read( 1918 &aconnector->dm_dp_aux.aux, 1919 dpcd_addr, 1920 esi, 1921 dpcd_bytes_to_read); 1922 1923 while (dret == dpcd_bytes_to_read && 1924 process_count < max_process_count) { 1925 uint8_t retry; 1926 dret = 0; 1927 1928 process_count++; 1929 1930 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); 1931 /* handle HPD short pulse irq */ 1932 if (aconnector->mst_mgr.mst_state) 1933 drm_dp_mst_hpd_irq( 1934 &aconnector->mst_mgr, 1935 esi, 1936 &new_irq_handled); 1937 1938 if (new_irq_handled) { 1939 /* ACK at DPCD to notify down stream */ 1940 const int ack_dpcd_bytes_to_write = 1941 dpcd_bytes_to_read - 1; 1942 1943 for (retry = 0; retry < 3; retry++) { 1944 uint8_t wret; 1945 1946 wret = drm_dp_dpcd_write( 1947 &aconnector->dm_dp_aux.aux, 1948 dpcd_addr + 1, 1949 &esi[1], 1950 ack_dpcd_bytes_to_write); 1951 if (wret == ack_dpcd_bytes_to_write) 1952 break; 1953 } 1954 1955 /* check if there is new irq to be handled */ 1956 dret = drm_dp_dpcd_read( 1957 &aconnector->dm_dp_aux.aux, 1958 dpcd_addr, 1959 esi, 1960 dpcd_bytes_to_read); 1961 1962 new_irq_handled = false; 1963 } else { 1964 break; 1965 } 1966 } 1967 1968 if (process_count == max_process_count) 1969 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); 1970 } 1971 1972 static void handle_hpd_rx_irq(void *param) 1973 { 1974 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 1975 struct drm_connector *connector = &aconnector->base; 1976 struct drm_device *dev = connector->dev; 1977 struct dc_link *dc_link = aconnector->dc_link; 1978 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 1979 enum dc_connection_type new_connection_type = dc_connection_none; 1980 #ifdef CONFIG_DRM_AMD_DC_HDCP 1981 union hpd_irq_data hpd_irq_data; 1982 struct amdgpu_device *adev = dev->dev_private; 1983 1984 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 1985 #endif 1986 1987 /* 1988 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio 1989 * conflict, after implement i2c helper, this mutex should be 1990 * retired. 1991 */ 1992 if (dc_link->type != dc_connection_mst_branch) 1993 mutex_lock(&aconnector->hpd_lock); 1994 1995 1996 #ifdef CONFIG_DRM_AMD_DC_HDCP 1997 if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) && 1998 #else 1999 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && 2000 #endif 2001 !is_mst_root_connector) { 2002 /* Downstream Port status changed. */ 2003 if (!dc_link_detect_sink(dc_link, &new_connection_type)) 2004 DRM_ERROR("KMS: Failed to detect connector\n"); 2005 2006 if (aconnector->base.force && new_connection_type == dc_connection_none) { 2007 emulated_link_detect(dc_link); 2008 2009 if (aconnector->fake_enable) 2010 aconnector->fake_enable = false; 2011 2012 amdgpu_dm_update_connector_after_detect(aconnector); 2013 2014 2015 drm_modeset_lock_all(dev); 2016 dm_restore_drm_connector_state(dev, connector); 2017 drm_modeset_unlock_all(dev); 2018 2019 drm_kms_helper_hotplug_event(dev); 2020 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { 2021 2022 if (aconnector->fake_enable) 2023 aconnector->fake_enable = false; 2024 2025 amdgpu_dm_update_connector_after_detect(aconnector); 2026 2027 2028 drm_modeset_lock_all(dev); 2029 dm_restore_drm_connector_state(dev, connector); 2030 drm_modeset_unlock_all(dev); 2031 2032 drm_kms_helper_hotplug_event(dev); 2033 } 2034 } 2035 #ifdef CONFIG_DRM_AMD_DC_HDCP 2036 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) 2037 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); 2038 #endif 2039 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || 2040 (dc_link->type == dc_connection_mst_branch)) 2041 dm_handle_hpd_rx_irq(aconnector); 2042 2043 if (dc_link->type != dc_connection_mst_branch) { 2044 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); 2045 mutex_unlock(&aconnector->hpd_lock); 2046 } 2047 } 2048 2049 static void register_hpd_handlers(struct amdgpu_device *adev) 2050 { 2051 struct drm_device *dev = adev->ddev; 2052 struct drm_connector *connector; 2053 struct amdgpu_dm_connector *aconnector; 2054 const struct dc_link *dc_link; 2055 struct dc_interrupt_params int_params = {0}; 2056 2057 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 2058 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 2059 2060 list_for_each_entry(connector, 2061 &dev->mode_config.connector_list, head) { 2062 2063 aconnector = to_amdgpu_dm_connector(connector); 2064 dc_link = aconnector->dc_link; 2065 2066 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { 2067 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 2068 int_params.irq_source = dc_link->irq_source_hpd; 2069 2070 amdgpu_dm_irq_register_interrupt(adev, &int_params, 2071 handle_hpd_irq, 2072 (void *) aconnector); 2073 } 2074 2075 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { 2076 2077 /* Also register for DP short pulse (hpd_rx). */ 2078 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 2079 int_params.irq_source = dc_link->irq_source_hpd_rx; 2080 2081 amdgpu_dm_irq_register_interrupt(adev, &int_params, 2082 handle_hpd_rx_irq, 2083 (void *) aconnector); 2084 } 2085 } 2086 } 2087 2088 /* Register IRQ sources and initialize IRQ callbacks */ 2089 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 2090 { 2091 struct dc *dc = adev->dm.dc; 2092 struct common_irq_params *c_irq_params; 2093 struct dc_interrupt_params int_params = {0}; 2094 int r; 2095 int i; 2096 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 2097 2098 if (adev->asic_type >= CHIP_VEGA10) 2099 client_id = SOC15_IH_CLIENTID_DCE; 2100 2101 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 2102 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 2103 2104 /* 2105 * Actions of amdgpu_irq_add_id(): 2106 * 1. Register a set() function with base driver. 2107 * Base driver will call set() function to enable/disable an 2108 * interrupt in DC hardware. 2109 * 2. Register amdgpu_dm_irq_handler(). 2110 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 2111 * coming from DC hardware. 2112 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 2113 * for acknowledging and handling. */ 2114 2115 /* Use VBLANK interrupt */ 2116 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { 2117 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); 2118 if (r) { 2119 DRM_ERROR("Failed to add crtc irq id!\n"); 2120 return r; 2121 } 2122 2123 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 2124 int_params.irq_source = 2125 dc_interrupt_to_irq_source(dc, i, 0); 2126 2127 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 2128 2129 c_irq_params->adev = adev; 2130 c_irq_params->irq_src = int_params.irq_source; 2131 2132 amdgpu_dm_irq_register_interrupt(adev, &int_params, 2133 dm_crtc_high_irq, c_irq_params); 2134 } 2135 2136 /* Use VUPDATE interrupt */ 2137 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { 2138 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); 2139 if (r) { 2140 DRM_ERROR("Failed to add vupdate irq id!\n"); 2141 return r; 2142 } 2143 2144 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 2145 int_params.irq_source = 2146 dc_interrupt_to_irq_source(dc, i, 0); 2147 2148 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 2149 2150 c_irq_params->adev = adev; 2151 c_irq_params->irq_src = int_params.irq_source; 2152 2153 amdgpu_dm_irq_register_interrupt(adev, &int_params, 2154 dm_vupdate_high_irq, c_irq_params); 2155 } 2156 2157 /* Use GRPH_PFLIP interrupt */ 2158 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 2159 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 2160 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 2161 if (r) { 2162 DRM_ERROR("Failed to add page flip irq id!\n"); 2163 return r; 2164 } 2165 2166 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 2167 int_params.irq_source = 2168 dc_interrupt_to_irq_source(dc, i, 0); 2169 2170 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 2171 2172 c_irq_params->adev = adev; 2173 c_irq_params->irq_src = int_params.irq_source; 2174 2175 amdgpu_dm_irq_register_interrupt(adev, &int_params, 2176 dm_pflip_high_irq, c_irq_params); 2177 2178 } 2179 2180 /* HPD */ 2181 r = amdgpu_irq_add_id(adev, client_id, 2182 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 2183 if (r) { 2184 DRM_ERROR("Failed to add hpd irq id!\n"); 2185 return r; 2186 } 2187 2188 register_hpd_handlers(adev); 2189 2190 return 0; 2191 } 2192 2193 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 2194 /* Register IRQ sources and initialize IRQ callbacks */ 2195 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 2196 { 2197 struct dc *dc = adev->dm.dc; 2198 struct common_irq_params *c_irq_params; 2199 struct dc_interrupt_params int_params = {0}; 2200 int r; 2201 int i; 2202 2203 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 2204 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 2205 2206 /* 2207 * Actions of amdgpu_irq_add_id(): 2208 * 1. Register a set() function with base driver. 2209 * Base driver will call set() function to enable/disable an 2210 * interrupt in DC hardware. 2211 * 2. Register amdgpu_dm_irq_handler(). 2212 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 2213 * coming from DC hardware. 2214 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 2215 * for acknowledging and handling. 2216 */ 2217 2218 /* Use VSTARTUP interrupt */ 2219 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 2220 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 2221 i++) { 2222 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); 2223 2224 if (r) { 2225 DRM_ERROR("Failed to add crtc irq id!\n"); 2226 return r; 2227 } 2228 2229 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 2230 int_params.irq_source = 2231 dc_interrupt_to_irq_source(dc, i, 0); 2232 2233 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 2234 2235 c_irq_params->adev = adev; 2236 c_irq_params->irq_src = int_params.irq_source; 2237 2238 amdgpu_dm_irq_register_interrupt(adev, &int_params, 2239 dm_dcn_crtc_high_irq, c_irq_params); 2240 } 2241 2242 /* Use GRPH_PFLIP interrupt */ 2243 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 2244 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; 2245 i++) { 2246 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 2247 if (r) { 2248 DRM_ERROR("Failed to add page flip irq id!\n"); 2249 return r; 2250 } 2251 2252 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 2253 int_params.irq_source = 2254 dc_interrupt_to_irq_source(dc, i, 0); 2255 2256 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 2257 2258 c_irq_params->adev = adev; 2259 c_irq_params->irq_src = int_params.irq_source; 2260 2261 amdgpu_dm_irq_register_interrupt(adev, &int_params, 2262 dm_pflip_high_irq, c_irq_params); 2263 2264 } 2265 2266 /* HPD */ 2267 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 2268 &adev->hpd_irq); 2269 if (r) { 2270 DRM_ERROR("Failed to add hpd irq id!\n"); 2271 return r; 2272 } 2273 2274 register_hpd_handlers(adev); 2275 2276 return 0; 2277 } 2278 #endif 2279 2280 /* 2281 * Acquires the lock for the atomic state object and returns 2282 * the new atomic state. 2283 * 2284 * This should only be called during atomic check. 2285 */ 2286 static int dm_atomic_get_state(struct drm_atomic_state *state, 2287 struct dm_atomic_state **dm_state) 2288 { 2289 struct drm_device *dev = state->dev; 2290 struct amdgpu_device *adev = dev->dev_private; 2291 struct amdgpu_display_manager *dm = &adev->dm; 2292 struct drm_private_state *priv_state; 2293 2294 if (*dm_state) 2295 return 0; 2296 2297 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); 2298 if (IS_ERR(priv_state)) 2299 return PTR_ERR(priv_state); 2300 2301 *dm_state = to_dm_atomic_state(priv_state); 2302 2303 return 0; 2304 } 2305 2306 struct dm_atomic_state * 2307 dm_atomic_get_new_state(struct drm_atomic_state *state) 2308 { 2309 struct drm_device *dev = state->dev; 2310 struct amdgpu_device *adev = dev->dev_private; 2311 struct amdgpu_display_manager *dm = &adev->dm; 2312 struct drm_private_obj *obj; 2313 struct drm_private_state *new_obj_state; 2314 int i; 2315 2316 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { 2317 if (obj->funcs == dm->atomic_obj.funcs) 2318 return to_dm_atomic_state(new_obj_state); 2319 } 2320 2321 return NULL; 2322 } 2323 2324 struct dm_atomic_state * 2325 dm_atomic_get_old_state(struct drm_atomic_state *state) 2326 { 2327 struct drm_device *dev = state->dev; 2328 struct amdgpu_device *adev = dev->dev_private; 2329 struct amdgpu_display_manager *dm = &adev->dm; 2330 struct drm_private_obj *obj; 2331 struct drm_private_state *old_obj_state; 2332 int i; 2333 2334 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) { 2335 if (obj->funcs == dm->atomic_obj.funcs) 2336 return to_dm_atomic_state(old_obj_state); 2337 } 2338 2339 return NULL; 2340 } 2341 2342 static struct drm_private_state * 2343 dm_atomic_duplicate_state(struct drm_private_obj *obj) 2344 { 2345 struct dm_atomic_state *old_state, *new_state; 2346 2347 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); 2348 if (!new_state) 2349 return NULL; 2350 2351 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); 2352 2353 old_state = to_dm_atomic_state(obj->state); 2354 2355 if (old_state && old_state->context) 2356 new_state->context = dc_copy_state(old_state->context); 2357 2358 if (!new_state->context) { 2359 kfree(new_state); 2360 return NULL; 2361 } 2362 2363 return &new_state->base; 2364 } 2365 2366 static void dm_atomic_destroy_state(struct drm_private_obj *obj, 2367 struct drm_private_state *state) 2368 { 2369 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 2370 2371 if (dm_state && dm_state->context) 2372 dc_release_state(dm_state->context); 2373 2374 kfree(dm_state); 2375 } 2376 2377 static struct drm_private_state_funcs dm_atomic_state_funcs = { 2378 .atomic_duplicate_state = dm_atomic_duplicate_state, 2379 .atomic_destroy_state = dm_atomic_destroy_state, 2380 }; 2381 2382 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 2383 { 2384 struct dm_atomic_state *state; 2385 int r; 2386 2387 adev->mode_info.mode_config_initialized = true; 2388 2389 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 2390 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 2391 2392 adev->ddev->mode_config.max_width = 16384; 2393 adev->ddev->mode_config.max_height = 16384; 2394 2395 adev->ddev->mode_config.preferred_depth = 24; 2396 adev->ddev->mode_config.prefer_shadow = 1; 2397 /* indicates support for immediate flip */ 2398 adev->ddev->mode_config.async_page_flip = true; 2399 2400 adev->ddev->mode_config.fb_base = adev->gmc.aper_base; 2401 2402 state = kzalloc(sizeof(*state), GFP_KERNEL); 2403 if (!state) 2404 return -ENOMEM; 2405 2406 state->context = dc_create_state(adev->dm.dc); 2407 if (!state->context) { 2408 kfree(state); 2409 return -ENOMEM; 2410 } 2411 2412 dc_resource_state_copy_construct_current(adev->dm.dc, state->context); 2413 2414 drm_atomic_private_obj_init(adev->ddev, 2415 &adev->dm.atomic_obj, 2416 &state->base, 2417 &dm_atomic_state_funcs); 2418 2419 r = amdgpu_display_modeset_create_props(adev); 2420 if (r) 2421 return r; 2422 2423 r = amdgpu_dm_audio_init(adev); 2424 if (r) 2425 return r; 2426 2427 return 0; 2428 } 2429 2430 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 2431 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 2432 2433 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 2434 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 2435 2436 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm) 2437 { 2438 #if defined(CONFIG_ACPI) 2439 struct amdgpu_dm_backlight_caps caps; 2440 2441 if (dm->backlight_caps.caps_valid) 2442 return; 2443 2444 amdgpu_acpi_get_backlight_caps(dm->adev, &caps); 2445 if (caps.caps_valid) { 2446 dm->backlight_caps.min_input_signal = caps.min_input_signal; 2447 dm->backlight_caps.max_input_signal = caps.max_input_signal; 2448 dm->backlight_caps.caps_valid = true; 2449 } else { 2450 dm->backlight_caps.min_input_signal = 2451 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 2452 dm->backlight_caps.max_input_signal = 2453 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 2454 } 2455 #else 2456 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 2457 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 2458 #endif 2459 } 2460 2461 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 2462 { 2463 struct amdgpu_display_manager *dm = bl_get_data(bd); 2464 struct amdgpu_dm_backlight_caps caps; 2465 uint32_t brightness = bd->props.brightness; 2466 2467 amdgpu_dm_update_backlight_caps(dm); 2468 caps = dm->backlight_caps; 2469 /* 2470 * The brightness input is in the range 0-255 2471 * It needs to be rescaled to be between the 2472 * requested min and max input signal 2473 * 2474 * It also needs to be scaled up by 0x101 to 2475 * match the DC interface which has a range of 2476 * 0 to 0xffff 2477 */ 2478 brightness = 2479 brightness 2480 * 0x101 2481 * (caps.max_input_signal - caps.min_input_signal) 2482 / AMDGPU_MAX_BL_LEVEL 2483 + caps.min_input_signal * 0x101; 2484 2485 if (dc_link_set_backlight_level(dm->backlight_link, 2486 brightness, 0)) 2487 return 0; 2488 else 2489 return 1; 2490 } 2491 2492 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 2493 { 2494 struct amdgpu_display_manager *dm = bl_get_data(bd); 2495 int ret = dc_link_get_backlight_level(dm->backlight_link); 2496 2497 if (ret == DC_ERROR_UNEXPECTED) 2498 return bd->props.brightness; 2499 return ret; 2500 } 2501 2502 static const struct backlight_ops amdgpu_dm_backlight_ops = { 2503 .options = BL_CORE_SUSPENDRESUME, 2504 .get_brightness = amdgpu_dm_backlight_get_brightness, 2505 .update_status = amdgpu_dm_backlight_update_status, 2506 }; 2507 2508 static void 2509 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) 2510 { 2511 char bl_name[16]; 2512 struct backlight_properties props = { 0 }; 2513 2514 amdgpu_dm_update_backlight_caps(dm); 2515 2516 props.max_brightness = AMDGPU_MAX_BL_LEVEL; 2517 props.brightness = AMDGPU_MAX_BL_LEVEL; 2518 props.type = BACKLIGHT_RAW; 2519 2520 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 2521 dm->adev->ddev->primary->index); 2522 2523 dm->backlight_dev = backlight_device_register(bl_name, 2524 dm->adev->ddev->dev, 2525 dm, 2526 &amdgpu_dm_backlight_ops, 2527 &props); 2528 2529 if (IS_ERR(dm->backlight_dev)) 2530 DRM_ERROR("DM: Backlight registration failed!\n"); 2531 else 2532 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); 2533 } 2534 2535 #endif 2536 2537 static int initialize_plane(struct amdgpu_display_manager *dm, 2538 struct amdgpu_mode_info *mode_info, int plane_id, 2539 enum drm_plane_type plane_type, 2540 const struct dc_plane_cap *plane_cap) 2541 { 2542 struct drm_plane *plane; 2543 unsigned long possible_crtcs; 2544 int ret = 0; 2545 2546 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); 2547 if (!plane) { 2548 DRM_ERROR("KMS: Failed to allocate plane\n"); 2549 return -ENOMEM; 2550 } 2551 plane->type = plane_type; 2552 2553 /* 2554 * HACK: IGT tests expect that the primary plane for a CRTC 2555 * can only have one possible CRTC. Only expose support for 2556 * any CRTC if they're not going to be used as a primary plane 2557 * for a CRTC - like overlay or underlay planes. 2558 */ 2559 possible_crtcs = 1 << plane_id; 2560 if (plane_id >= dm->dc->caps.max_streams) 2561 possible_crtcs = 0xff; 2562 2563 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); 2564 2565 if (ret) { 2566 DRM_ERROR("KMS: Failed to initialize plane\n"); 2567 kfree(plane); 2568 return ret; 2569 } 2570 2571 if (mode_info) 2572 mode_info->planes[plane_id] = plane; 2573 2574 return ret; 2575 } 2576 2577 2578 static void register_backlight_device(struct amdgpu_display_manager *dm, 2579 struct dc_link *link) 2580 { 2581 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 2582 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 2583 2584 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && 2585 link->type != dc_connection_none) { 2586 /* 2587 * Event if registration failed, we should continue with 2588 * DM initialization because not having a backlight control 2589 * is better then a black screen. 2590 */ 2591 amdgpu_dm_register_backlight_device(dm); 2592 2593 if (dm->backlight_dev) 2594 dm->backlight_link = link; 2595 } 2596 #endif 2597 } 2598 2599 2600 /* 2601 * In this architecture, the association 2602 * connector -> encoder -> crtc 2603 * id not really requried. The crtc and connector will hold the 2604 * display_index as an abstraction to use with DAL component 2605 * 2606 * Returns 0 on success 2607 */ 2608 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 2609 { 2610 struct amdgpu_display_manager *dm = &adev->dm; 2611 int32_t i; 2612 struct amdgpu_dm_connector *aconnector = NULL; 2613 struct amdgpu_encoder *aencoder = NULL; 2614 struct amdgpu_mode_info *mode_info = &adev->mode_info; 2615 uint32_t link_cnt; 2616 int32_t primary_planes; 2617 enum dc_connection_type new_connection_type = dc_connection_none; 2618 const struct dc_plane_cap *plane; 2619 2620 link_cnt = dm->dc->caps.max_links; 2621 if (amdgpu_dm_mode_config_init(dm->adev)) { 2622 DRM_ERROR("DM: Failed to initialize mode config\n"); 2623 return -EINVAL; 2624 } 2625 2626 /* There is one primary plane per CRTC */ 2627 primary_planes = dm->dc->caps.max_streams; 2628 ASSERT(primary_planes <= AMDGPU_MAX_PLANES); 2629 2630 /* 2631 * Initialize primary planes, implicit planes for legacy IOCTLS. 2632 * Order is reversed to match iteration order in atomic check. 2633 */ 2634 for (i = (primary_planes - 1); i >= 0; i--) { 2635 plane = &dm->dc->caps.planes[i]; 2636 2637 if (initialize_plane(dm, mode_info, i, 2638 DRM_PLANE_TYPE_PRIMARY, plane)) { 2639 DRM_ERROR("KMS: Failed to initialize primary plane\n"); 2640 goto fail; 2641 } 2642 } 2643 2644 /* 2645 * Initialize overlay planes, index starting after primary planes. 2646 * These planes have a higher DRM index than the primary planes since 2647 * they should be considered as having a higher z-order. 2648 * Order is reversed to match iteration order in atomic check. 2649 * 2650 * Only support DCN for now, and only expose one so we don't encourage 2651 * userspace to use up all the pipes. 2652 */ 2653 for (i = 0; i < dm->dc->caps.max_planes; ++i) { 2654 struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; 2655 2656 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) 2657 continue; 2658 2659 if (!plane->blends_with_above || !plane->blends_with_below) 2660 continue; 2661 2662 if (!plane->pixel_format_support.argb8888) 2663 continue; 2664 2665 if (initialize_plane(dm, NULL, primary_planes + i, 2666 DRM_PLANE_TYPE_OVERLAY, plane)) { 2667 DRM_ERROR("KMS: Failed to initialize overlay plane\n"); 2668 goto fail; 2669 } 2670 2671 /* Only create one overlay plane. */ 2672 break; 2673 } 2674 2675 for (i = 0; i < dm->dc->caps.max_streams; i++) 2676 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { 2677 DRM_ERROR("KMS: Failed to initialize crtc\n"); 2678 goto fail; 2679 } 2680 2681 dm->display_indexes_num = dm->dc->caps.max_streams; 2682 2683 /* loops over all connectors on the board */ 2684 for (i = 0; i < link_cnt; i++) { 2685 struct dc_link *link = NULL; 2686 2687 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { 2688 DRM_ERROR( 2689 "KMS: Cannot support more than %d display indexes\n", 2690 AMDGPU_DM_MAX_DISPLAY_INDEX); 2691 continue; 2692 } 2693 2694 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 2695 if (!aconnector) 2696 goto fail; 2697 2698 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); 2699 if (!aencoder) 2700 goto fail; 2701 2702 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 2703 DRM_ERROR("KMS: Failed to initialize encoder\n"); 2704 goto fail; 2705 } 2706 2707 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 2708 DRM_ERROR("KMS: Failed to initialize connector\n"); 2709 goto fail; 2710 } 2711 2712 link = dc_get_link_at_index(dm->dc, i); 2713 2714 if (!dc_link_detect_sink(link, &new_connection_type)) 2715 DRM_ERROR("KMS: Failed to detect connector\n"); 2716 2717 if (aconnector->base.force && new_connection_type == dc_connection_none) { 2718 emulated_link_detect(link); 2719 amdgpu_dm_update_connector_after_detect(aconnector); 2720 2721 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { 2722 amdgpu_dm_update_connector_after_detect(aconnector); 2723 register_backlight_device(dm, link); 2724 if (amdgpu_dc_feature_mask & DC_PSR_MASK) 2725 amdgpu_dm_set_psr_caps(link); 2726 } 2727 2728 2729 } 2730 2731 /* Software is initialized. Now we can register interrupt handlers. */ 2732 switch (adev->asic_type) { 2733 case CHIP_BONAIRE: 2734 case CHIP_HAWAII: 2735 case CHIP_KAVERI: 2736 case CHIP_KABINI: 2737 case CHIP_MULLINS: 2738 case CHIP_TONGA: 2739 case CHIP_FIJI: 2740 case CHIP_CARRIZO: 2741 case CHIP_STONEY: 2742 case CHIP_POLARIS11: 2743 case CHIP_POLARIS10: 2744 case CHIP_POLARIS12: 2745 case CHIP_VEGAM: 2746 case CHIP_VEGA10: 2747 case CHIP_VEGA12: 2748 case CHIP_VEGA20: 2749 if (dce110_register_irq_handlers(dm->adev)) { 2750 DRM_ERROR("DM: Failed to initialize IRQ\n"); 2751 goto fail; 2752 } 2753 break; 2754 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 2755 case CHIP_RAVEN: 2756 case CHIP_NAVI12: 2757 case CHIP_NAVI10: 2758 case CHIP_NAVI14: 2759 #if defined(CONFIG_DRM_AMD_DC_DCN2_1) 2760 case CHIP_RENOIR: 2761 #endif 2762 if (dcn10_register_irq_handlers(dm->adev)) { 2763 DRM_ERROR("DM: Failed to initialize IRQ\n"); 2764 goto fail; 2765 } 2766 break; 2767 #endif 2768 default: 2769 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 2770 goto fail; 2771 } 2772 2773 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 2774 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 2775 2776 return 0; 2777 fail: 2778 kfree(aencoder); 2779 kfree(aconnector); 2780 2781 return -EINVAL; 2782 } 2783 2784 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 2785 { 2786 drm_mode_config_cleanup(dm->ddev); 2787 drm_atomic_private_obj_fini(&dm->atomic_obj); 2788 return; 2789 } 2790 2791 /****************************************************************************** 2792 * amdgpu_display_funcs functions 2793 *****************************************************************************/ 2794 2795 /* 2796 * dm_bandwidth_update - program display watermarks 2797 * 2798 * @adev: amdgpu_device pointer 2799 * 2800 * Calculate and program the display watermarks and line buffer allocation. 2801 */ 2802 static void dm_bandwidth_update(struct amdgpu_device *adev) 2803 { 2804 /* TODO: implement later */ 2805 } 2806 2807 static const struct amdgpu_display_funcs dm_display_funcs = { 2808 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 2809 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 2810 .backlight_set_level = NULL, /* never called for DC */ 2811 .backlight_get_level = NULL, /* never called for DC */ 2812 .hpd_sense = NULL,/* called unconditionally */ 2813 .hpd_set_polarity = NULL, /* called unconditionally */ 2814 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 2815 .page_flip_get_scanoutpos = 2816 dm_crtc_get_scanoutpos,/* called unconditionally */ 2817 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 2818 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 2819 }; 2820 2821 #if defined(CONFIG_DEBUG_KERNEL_DC) 2822 2823 static ssize_t s3_debug_store(struct device *device, 2824 struct device_attribute *attr, 2825 const char *buf, 2826 size_t count) 2827 { 2828 int ret; 2829 int s3_state; 2830 struct drm_device *drm_dev = dev_get_drvdata(device); 2831 struct amdgpu_device *adev = drm_dev->dev_private; 2832 2833 ret = kstrtoint(buf, 0, &s3_state); 2834 2835 if (ret == 0) { 2836 if (s3_state) { 2837 dm_resume(adev); 2838 drm_kms_helper_hotplug_event(adev->ddev); 2839 } else 2840 dm_suspend(adev); 2841 } 2842 2843 return ret == 0 ? count : 0; 2844 } 2845 2846 DEVICE_ATTR_WO(s3_debug); 2847 2848 #endif 2849 2850 static int dm_early_init(void *handle) 2851 { 2852 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2853 2854 switch (adev->asic_type) { 2855 case CHIP_BONAIRE: 2856 case CHIP_HAWAII: 2857 adev->mode_info.num_crtc = 6; 2858 adev->mode_info.num_hpd = 6; 2859 adev->mode_info.num_dig = 6; 2860 break; 2861 case CHIP_KAVERI: 2862 adev->mode_info.num_crtc = 4; 2863 adev->mode_info.num_hpd = 6; 2864 adev->mode_info.num_dig = 7; 2865 break; 2866 case CHIP_KABINI: 2867 case CHIP_MULLINS: 2868 adev->mode_info.num_crtc = 2; 2869 adev->mode_info.num_hpd = 6; 2870 adev->mode_info.num_dig = 6; 2871 break; 2872 case CHIP_FIJI: 2873 case CHIP_TONGA: 2874 adev->mode_info.num_crtc = 6; 2875 adev->mode_info.num_hpd = 6; 2876 adev->mode_info.num_dig = 7; 2877 break; 2878 case CHIP_CARRIZO: 2879 adev->mode_info.num_crtc = 3; 2880 adev->mode_info.num_hpd = 6; 2881 adev->mode_info.num_dig = 9; 2882 break; 2883 case CHIP_STONEY: 2884 adev->mode_info.num_crtc = 2; 2885 adev->mode_info.num_hpd = 6; 2886 adev->mode_info.num_dig = 9; 2887 break; 2888 case CHIP_POLARIS11: 2889 case CHIP_POLARIS12: 2890 adev->mode_info.num_crtc = 5; 2891 adev->mode_info.num_hpd = 5; 2892 adev->mode_info.num_dig = 5; 2893 break; 2894 case CHIP_POLARIS10: 2895 case CHIP_VEGAM: 2896 adev->mode_info.num_crtc = 6; 2897 adev->mode_info.num_hpd = 6; 2898 adev->mode_info.num_dig = 6; 2899 break; 2900 case CHIP_VEGA10: 2901 case CHIP_VEGA12: 2902 case CHIP_VEGA20: 2903 adev->mode_info.num_crtc = 6; 2904 adev->mode_info.num_hpd = 6; 2905 adev->mode_info.num_dig = 6; 2906 break; 2907 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 2908 case CHIP_RAVEN: 2909 adev->mode_info.num_crtc = 4; 2910 adev->mode_info.num_hpd = 4; 2911 adev->mode_info.num_dig = 4; 2912 break; 2913 #endif 2914 case CHIP_NAVI10: 2915 case CHIP_NAVI12: 2916 adev->mode_info.num_crtc = 6; 2917 adev->mode_info.num_hpd = 6; 2918 adev->mode_info.num_dig = 6; 2919 break; 2920 case CHIP_NAVI14: 2921 adev->mode_info.num_crtc = 5; 2922 adev->mode_info.num_hpd = 5; 2923 adev->mode_info.num_dig = 5; 2924 break; 2925 #if defined(CONFIG_DRM_AMD_DC_DCN2_1) 2926 case CHIP_RENOIR: 2927 adev->mode_info.num_crtc = 4; 2928 adev->mode_info.num_hpd = 4; 2929 adev->mode_info.num_dig = 4; 2930 break; 2931 #endif 2932 default: 2933 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 2934 return -EINVAL; 2935 } 2936 2937 amdgpu_dm_set_irq_funcs(adev); 2938 2939 if (adev->mode_info.funcs == NULL) 2940 adev->mode_info.funcs = &dm_display_funcs; 2941 2942 /* 2943 * Note: Do NOT change adev->audio_endpt_rreg and 2944 * adev->audio_endpt_wreg because they are initialised in 2945 * amdgpu_device_init() 2946 */ 2947 #if defined(CONFIG_DEBUG_KERNEL_DC) 2948 device_create_file( 2949 adev->ddev->dev, 2950 &dev_attr_s3_debug); 2951 #endif 2952 2953 return 0; 2954 } 2955 2956 static bool modeset_required(struct drm_crtc_state *crtc_state, 2957 struct dc_stream_state *new_stream, 2958 struct dc_stream_state *old_stream) 2959 { 2960 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 2961 return false; 2962 2963 if (!crtc_state->enable) 2964 return false; 2965 2966 return crtc_state->active; 2967 } 2968 2969 static bool modereset_required(struct drm_crtc_state *crtc_state) 2970 { 2971 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 2972 return false; 2973 2974 return !crtc_state->enable || !crtc_state->active; 2975 } 2976 2977 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 2978 { 2979 drm_encoder_cleanup(encoder); 2980 kfree(encoder); 2981 } 2982 2983 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 2984 .destroy = amdgpu_dm_encoder_destroy, 2985 }; 2986 2987 2988 static int fill_dc_scaling_info(const struct drm_plane_state *state, 2989 struct dc_scaling_info *scaling_info) 2990 { 2991 int scale_w, scale_h; 2992 2993 memset(scaling_info, 0, sizeof(*scaling_info)); 2994 2995 /* Source is fixed 16.16 but we ignore mantissa for now... */ 2996 scaling_info->src_rect.x = state->src_x >> 16; 2997 scaling_info->src_rect.y = state->src_y >> 16; 2998 2999 scaling_info->src_rect.width = state->src_w >> 16; 3000 if (scaling_info->src_rect.width == 0) 3001 return -EINVAL; 3002 3003 scaling_info->src_rect.height = state->src_h >> 16; 3004 if (scaling_info->src_rect.height == 0) 3005 return -EINVAL; 3006 3007 scaling_info->dst_rect.x = state->crtc_x; 3008 scaling_info->dst_rect.y = state->crtc_y; 3009 3010 if (state->crtc_w == 0) 3011 return -EINVAL; 3012 3013 scaling_info->dst_rect.width = state->crtc_w; 3014 3015 if (state->crtc_h == 0) 3016 return -EINVAL; 3017 3018 scaling_info->dst_rect.height = state->crtc_h; 3019 3020 /* DRM doesn't specify clipping on destination output. */ 3021 scaling_info->clip_rect = scaling_info->dst_rect; 3022 3023 /* TODO: Validate scaling per-format with DC plane caps */ 3024 scale_w = scaling_info->dst_rect.width * 1000 / 3025 scaling_info->src_rect.width; 3026 3027 if (scale_w < 250 || scale_w > 16000) 3028 return -EINVAL; 3029 3030 scale_h = scaling_info->dst_rect.height * 1000 / 3031 scaling_info->src_rect.height; 3032 3033 if (scale_h < 250 || scale_h > 16000) 3034 return -EINVAL; 3035 3036 /* 3037 * The "scaling_quality" can be ignored for now, quality = 0 has DC 3038 * assume reasonable defaults based on the format. 3039 */ 3040 3041 return 0; 3042 } 3043 3044 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, 3045 uint64_t *tiling_flags) 3046 { 3047 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]); 3048 int r = amdgpu_bo_reserve(rbo, false); 3049 3050 if (unlikely(r)) { 3051 /* Don't show error message when returning -ERESTARTSYS */ 3052 if (r != -ERESTARTSYS) 3053 DRM_ERROR("Unable to reserve buffer: %d\n", r); 3054 return r; 3055 } 3056 3057 if (tiling_flags) 3058 amdgpu_bo_get_tiling_flags(rbo, tiling_flags); 3059 3060 amdgpu_bo_unreserve(rbo); 3061 3062 return r; 3063 } 3064 3065 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags) 3066 { 3067 uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B); 3068 3069 return offset ? (address + offset * 256) : 0; 3070 } 3071 3072 static int 3073 fill_plane_dcc_attributes(struct amdgpu_device *adev, 3074 const struct amdgpu_framebuffer *afb, 3075 const enum surface_pixel_format format, 3076 const enum dc_rotation_angle rotation, 3077 const struct plane_size *plane_size, 3078 const union dc_tiling_info *tiling_info, 3079 const uint64_t info, 3080 struct dc_plane_dcc_param *dcc, 3081 struct dc_plane_address *address) 3082 { 3083 struct dc *dc = adev->dm.dc; 3084 struct dc_dcc_surface_param input; 3085 struct dc_surface_dcc_cap output; 3086 uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B); 3087 uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0; 3088 uint64_t dcc_address; 3089 3090 memset(&input, 0, sizeof(input)); 3091 memset(&output, 0, sizeof(output)); 3092 3093 if (!offset) 3094 return 0; 3095 3096 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 3097 return 0; 3098 3099 if (!dc->cap_funcs.get_dcc_compression_cap) 3100 return -EINVAL; 3101 3102 input.format = format; 3103 input.surface_size.width = plane_size->surface_size.width; 3104 input.surface_size.height = plane_size->surface_size.height; 3105 input.swizzle_mode = tiling_info->gfx9.swizzle; 3106 3107 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) 3108 input.scan = SCAN_DIRECTION_HORIZONTAL; 3109 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) 3110 input.scan = SCAN_DIRECTION_VERTICAL; 3111 3112 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) 3113 return -EINVAL; 3114 3115 if (!output.capable) 3116 return -EINVAL; 3117 3118 if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0) 3119 return -EINVAL; 3120 3121 dcc->enable = 1; 3122 dcc->meta_pitch = 3123 AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1; 3124 dcc->independent_64b_blks = i64b; 3125 3126 dcc_address = get_dcc_address(afb->address, info); 3127 address->grph.meta_addr.low_part = lower_32_bits(dcc_address); 3128 address->grph.meta_addr.high_part = upper_32_bits(dcc_address); 3129 3130 return 0; 3131 } 3132 3133 static int 3134 fill_plane_buffer_attributes(struct amdgpu_device *adev, 3135 const struct amdgpu_framebuffer *afb, 3136 const enum surface_pixel_format format, 3137 const enum dc_rotation_angle rotation, 3138 const uint64_t tiling_flags, 3139 union dc_tiling_info *tiling_info, 3140 struct plane_size *plane_size, 3141 struct dc_plane_dcc_param *dcc, 3142 struct dc_plane_address *address) 3143 { 3144 const struct drm_framebuffer *fb = &afb->base; 3145 int ret; 3146 3147 memset(tiling_info, 0, sizeof(*tiling_info)); 3148 memset(plane_size, 0, sizeof(*plane_size)); 3149 memset(dcc, 0, sizeof(*dcc)); 3150 memset(address, 0, sizeof(*address)); 3151 3152 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 3153 plane_size->surface_size.x = 0; 3154 plane_size->surface_size.y = 0; 3155 plane_size->surface_size.width = fb->width; 3156 plane_size->surface_size.height = fb->height; 3157 plane_size->surface_pitch = 3158 fb->pitches[0] / fb->format->cpp[0]; 3159 3160 address->type = PLN_ADDR_TYPE_GRAPHICS; 3161 address->grph.addr.low_part = lower_32_bits(afb->address); 3162 address->grph.addr.high_part = upper_32_bits(afb->address); 3163 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { 3164 uint64_t chroma_addr = afb->address + fb->offsets[1]; 3165 3166 plane_size->surface_size.x = 0; 3167 plane_size->surface_size.y = 0; 3168 plane_size->surface_size.width = fb->width; 3169 plane_size->surface_size.height = fb->height; 3170 plane_size->surface_pitch = 3171 fb->pitches[0] / fb->format->cpp[0]; 3172 3173 plane_size->chroma_size.x = 0; 3174 plane_size->chroma_size.y = 0; 3175 /* TODO: set these based on surface format */ 3176 plane_size->chroma_size.width = fb->width / 2; 3177 plane_size->chroma_size.height = fb->height / 2; 3178 3179 plane_size->chroma_pitch = 3180 fb->pitches[1] / fb->format->cpp[1]; 3181 3182 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; 3183 address->video_progressive.luma_addr.low_part = 3184 lower_32_bits(afb->address); 3185 address->video_progressive.luma_addr.high_part = 3186 upper_32_bits(afb->address); 3187 address->video_progressive.chroma_addr.low_part = 3188 lower_32_bits(chroma_addr); 3189 address->video_progressive.chroma_addr.high_part = 3190 upper_32_bits(chroma_addr); 3191 } 3192 3193 /* Fill GFX8 params */ 3194 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { 3195 unsigned int bankw, bankh, mtaspect, tile_split, num_banks; 3196 3197 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 3198 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 3199 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 3200 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 3201 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 3202 3203 /* XXX fix me for VI */ 3204 tiling_info->gfx8.num_banks = num_banks; 3205 tiling_info->gfx8.array_mode = 3206 DC_ARRAY_2D_TILED_THIN1; 3207 tiling_info->gfx8.tile_split = tile_split; 3208 tiling_info->gfx8.bank_width = bankw; 3209 tiling_info->gfx8.bank_height = bankh; 3210 tiling_info->gfx8.tile_aspect = mtaspect; 3211 tiling_info->gfx8.tile_mode = 3212 DC_ADDR_SURF_MICRO_TILING_DISPLAY; 3213 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) 3214 == DC_ARRAY_1D_TILED_THIN1) { 3215 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; 3216 } 3217 3218 tiling_info->gfx8.pipe_config = 3219 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 3220 3221 if (adev->asic_type == CHIP_VEGA10 || 3222 adev->asic_type == CHIP_VEGA12 || 3223 adev->asic_type == CHIP_VEGA20 || 3224 adev->asic_type == CHIP_NAVI10 || 3225 adev->asic_type == CHIP_NAVI14 || 3226 adev->asic_type == CHIP_NAVI12 || 3227 #if defined(CONFIG_DRM_AMD_DC_DCN2_1) 3228 adev->asic_type == CHIP_RENOIR || 3229 #endif 3230 adev->asic_type == CHIP_RAVEN) { 3231 /* Fill GFX9 params */ 3232 tiling_info->gfx9.num_pipes = 3233 adev->gfx.config.gb_addr_config_fields.num_pipes; 3234 tiling_info->gfx9.num_banks = 3235 adev->gfx.config.gb_addr_config_fields.num_banks; 3236 tiling_info->gfx9.pipe_interleave = 3237 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; 3238 tiling_info->gfx9.num_shader_engines = 3239 adev->gfx.config.gb_addr_config_fields.num_se; 3240 tiling_info->gfx9.max_compressed_frags = 3241 adev->gfx.config.gb_addr_config_fields.max_compress_frags; 3242 tiling_info->gfx9.num_rb_per_se = 3243 adev->gfx.config.gb_addr_config_fields.num_rb_per_se; 3244 tiling_info->gfx9.swizzle = 3245 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE); 3246 tiling_info->gfx9.shaderEnable = 1; 3247 3248 ret = fill_plane_dcc_attributes(adev, afb, format, rotation, 3249 plane_size, tiling_info, 3250 tiling_flags, dcc, address); 3251 if (ret) 3252 return ret; 3253 } 3254 3255 return 0; 3256 } 3257 3258 static void 3259 fill_blending_from_plane_state(const struct drm_plane_state *plane_state, 3260 bool *per_pixel_alpha, bool *global_alpha, 3261 int *global_alpha_value) 3262 { 3263 *per_pixel_alpha = false; 3264 *global_alpha = false; 3265 *global_alpha_value = 0xff; 3266 3267 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) 3268 return; 3269 3270 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { 3271 static const uint32_t alpha_formats[] = { 3272 DRM_FORMAT_ARGB8888, 3273 DRM_FORMAT_RGBA8888, 3274 DRM_FORMAT_ABGR8888, 3275 }; 3276 uint32_t format = plane_state->fb->format->format; 3277 unsigned int i; 3278 3279 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { 3280 if (format == alpha_formats[i]) { 3281 *per_pixel_alpha = true; 3282 break; 3283 } 3284 } 3285 } 3286 3287 if (plane_state->alpha < 0xffff) { 3288 *global_alpha = true; 3289 *global_alpha_value = plane_state->alpha >> 8; 3290 } 3291 } 3292 3293 static int 3294 fill_plane_color_attributes(const struct drm_plane_state *plane_state, 3295 const enum surface_pixel_format format, 3296 enum dc_color_space *color_space) 3297 { 3298 bool full_range; 3299 3300 *color_space = COLOR_SPACE_SRGB; 3301 3302 /* DRM color properties only affect non-RGB formats. */ 3303 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 3304 return 0; 3305 3306 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); 3307 3308 switch (plane_state->color_encoding) { 3309 case DRM_COLOR_YCBCR_BT601: 3310 if (full_range) 3311 *color_space = COLOR_SPACE_YCBCR601; 3312 else 3313 *color_space = COLOR_SPACE_YCBCR601_LIMITED; 3314 break; 3315 3316 case DRM_COLOR_YCBCR_BT709: 3317 if (full_range) 3318 *color_space = COLOR_SPACE_YCBCR709; 3319 else 3320 *color_space = COLOR_SPACE_YCBCR709_LIMITED; 3321 break; 3322 3323 case DRM_COLOR_YCBCR_BT2020: 3324 if (full_range) 3325 *color_space = COLOR_SPACE_2020_YCBCR; 3326 else 3327 return -EINVAL; 3328 break; 3329 3330 default: 3331 return -EINVAL; 3332 } 3333 3334 return 0; 3335 } 3336 3337 static int 3338 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 3339 const struct drm_plane_state *plane_state, 3340 const uint64_t tiling_flags, 3341 struct dc_plane_info *plane_info, 3342 struct dc_plane_address *address) 3343 { 3344 const struct drm_framebuffer *fb = plane_state->fb; 3345 const struct amdgpu_framebuffer *afb = 3346 to_amdgpu_framebuffer(plane_state->fb); 3347 struct drm_format_name_buf format_name; 3348 int ret; 3349 3350 memset(plane_info, 0, sizeof(*plane_info)); 3351 3352 switch (fb->format->format) { 3353 case DRM_FORMAT_C8: 3354 plane_info->format = 3355 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 3356 break; 3357 case DRM_FORMAT_RGB565: 3358 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 3359 break; 3360 case DRM_FORMAT_XRGB8888: 3361 case DRM_FORMAT_ARGB8888: 3362 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 3363 break; 3364 case DRM_FORMAT_XRGB2101010: 3365 case DRM_FORMAT_ARGB2101010: 3366 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 3367 break; 3368 case DRM_FORMAT_XBGR2101010: 3369 case DRM_FORMAT_ABGR2101010: 3370 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 3371 break; 3372 case DRM_FORMAT_XBGR8888: 3373 case DRM_FORMAT_ABGR8888: 3374 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; 3375 break; 3376 case DRM_FORMAT_NV21: 3377 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 3378 break; 3379 case DRM_FORMAT_NV12: 3380 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 3381 break; 3382 default: 3383 DRM_ERROR( 3384 "Unsupported screen format %s\n", 3385 drm_get_format_name(fb->format->format, &format_name)); 3386 return -EINVAL; 3387 } 3388 3389 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 3390 case DRM_MODE_ROTATE_0: 3391 plane_info->rotation = ROTATION_ANGLE_0; 3392 break; 3393 case DRM_MODE_ROTATE_90: 3394 plane_info->rotation = ROTATION_ANGLE_90; 3395 break; 3396 case DRM_MODE_ROTATE_180: 3397 plane_info->rotation = ROTATION_ANGLE_180; 3398 break; 3399 case DRM_MODE_ROTATE_270: 3400 plane_info->rotation = ROTATION_ANGLE_270; 3401 break; 3402 default: 3403 plane_info->rotation = ROTATION_ANGLE_0; 3404 break; 3405 } 3406 3407 plane_info->visible = true; 3408 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; 3409 3410 plane_info->layer_index = 0; 3411 3412 ret = fill_plane_color_attributes(plane_state, plane_info->format, 3413 &plane_info->color_space); 3414 if (ret) 3415 return ret; 3416 3417 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format, 3418 plane_info->rotation, tiling_flags, 3419 &plane_info->tiling_info, 3420 &plane_info->plane_size, 3421 &plane_info->dcc, address); 3422 if (ret) 3423 return ret; 3424 3425 fill_blending_from_plane_state( 3426 plane_state, &plane_info->per_pixel_alpha, 3427 &plane_info->global_alpha, &plane_info->global_alpha_value); 3428 3429 return 0; 3430 } 3431 3432 static int fill_dc_plane_attributes(struct amdgpu_device *adev, 3433 struct dc_plane_state *dc_plane_state, 3434 struct drm_plane_state *plane_state, 3435 struct drm_crtc_state *crtc_state) 3436 { 3437 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 3438 const struct amdgpu_framebuffer *amdgpu_fb = 3439 to_amdgpu_framebuffer(plane_state->fb); 3440 struct dc_scaling_info scaling_info; 3441 struct dc_plane_info plane_info; 3442 uint64_t tiling_flags; 3443 int ret; 3444 3445 ret = fill_dc_scaling_info(plane_state, &scaling_info); 3446 if (ret) 3447 return ret; 3448 3449 dc_plane_state->src_rect = scaling_info.src_rect; 3450 dc_plane_state->dst_rect = scaling_info.dst_rect; 3451 dc_plane_state->clip_rect = scaling_info.clip_rect; 3452 dc_plane_state->scaling_quality = scaling_info.scaling_quality; 3453 3454 ret = get_fb_info(amdgpu_fb, &tiling_flags); 3455 if (ret) 3456 return ret; 3457 3458 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags, 3459 &plane_info, 3460 &dc_plane_state->address); 3461 if (ret) 3462 return ret; 3463 3464 dc_plane_state->format = plane_info.format; 3465 dc_plane_state->color_space = plane_info.color_space; 3466 dc_plane_state->format = plane_info.format; 3467 dc_plane_state->plane_size = plane_info.plane_size; 3468 dc_plane_state->rotation = plane_info.rotation; 3469 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; 3470 dc_plane_state->stereo_format = plane_info.stereo_format; 3471 dc_plane_state->tiling_info = plane_info.tiling_info; 3472 dc_plane_state->visible = plane_info.visible; 3473 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; 3474 dc_plane_state->global_alpha = plane_info.global_alpha; 3475 dc_plane_state->global_alpha_value = plane_info.global_alpha_value; 3476 dc_plane_state->dcc = plane_info.dcc; 3477 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0 3478 3479 /* 3480 * Always set input transfer function, since plane state is refreshed 3481 * every time. 3482 */ 3483 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); 3484 if (ret) 3485 return ret; 3486 3487 return 0; 3488 } 3489 3490 static void update_stream_scaling_settings(const struct drm_display_mode *mode, 3491 const struct dm_connector_state *dm_state, 3492 struct dc_stream_state *stream) 3493 { 3494 enum amdgpu_rmx_type rmx_type; 3495 3496 struct rect src = { 0 }; /* viewport in composition space*/ 3497 struct rect dst = { 0 }; /* stream addressable area */ 3498 3499 /* no mode. nothing to be done */ 3500 if (!mode) 3501 return; 3502 3503 /* Full screen scaling by default */ 3504 src.width = mode->hdisplay; 3505 src.height = mode->vdisplay; 3506 dst.width = stream->timing.h_addressable; 3507 dst.height = stream->timing.v_addressable; 3508 3509 if (dm_state) { 3510 rmx_type = dm_state->scaling; 3511 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 3512 if (src.width * dst.height < 3513 src.height * dst.width) { 3514 /* height needs less upscaling/more downscaling */ 3515 dst.width = src.width * 3516 dst.height / src.height; 3517 } else { 3518 /* width needs less upscaling/more downscaling */ 3519 dst.height = src.height * 3520 dst.width / src.width; 3521 } 3522 } else if (rmx_type == RMX_CENTER) { 3523 dst = src; 3524 } 3525 3526 dst.x = (stream->timing.h_addressable - dst.width) / 2; 3527 dst.y = (stream->timing.v_addressable - dst.height) / 2; 3528 3529 if (dm_state->underscan_enable) { 3530 dst.x += dm_state->underscan_hborder / 2; 3531 dst.y += dm_state->underscan_vborder / 2; 3532 dst.width -= dm_state->underscan_hborder; 3533 dst.height -= dm_state->underscan_vborder; 3534 } 3535 } 3536 3537 stream->src = src; 3538 stream->dst = dst; 3539 3540 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n", 3541 dst.x, dst.y, dst.width, dst.height); 3542 3543 } 3544 3545 static enum dc_color_depth 3546 convert_color_depth_from_display_info(const struct drm_connector *connector, 3547 const struct drm_connector_state *state) 3548 { 3549 uint8_t bpc = (uint8_t)connector->display_info.bpc; 3550 3551 /* Assume 8 bpc by default if no bpc is specified. */ 3552 bpc = bpc ? bpc : 8; 3553 3554 if (!state) 3555 state = connector->state; 3556 3557 if (state) { 3558 /* 3559 * Cap display bpc based on the user requested value. 3560 * 3561 * The value for state->max_bpc may not correctly updated 3562 * depending on when the connector gets added to the state 3563 * or if this was called outside of atomic check, so it 3564 * can't be used directly. 3565 */ 3566 bpc = min(bpc, state->max_requested_bpc); 3567 3568 /* Round down to the nearest even number. */ 3569 bpc = bpc - (bpc & 1); 3570 } 3571 3572 switch (bpc) { 3573 case 0: 3574 /* 3575 * Temporary Work around, DRM doesn't parse color depth for 3576 * EDID revision before 1.4 3577 * TODO: Fix edid parsing 3578 */ 3579 return COLOR_DEPTH_888; 3580 case 6: 3581 return COLOR_DEPTH_666; 3582 case 8: 3583 return COLOR_DEPTH_888; 3584 case 10: 3585 return COLOR_DEPTH_101010; 3586 case 12: 3587 return COLOR_DEPTH_121212; 3588 case 14: 3589 return COLOR_DEPTH_141414; 3590 case 16: 3591 return COLOR_DEPTH_161616; 3592 default: 3593 return COLOR_DEPTH_UNDEFINED; 3594 } 3595 } 3596 3597 static enum dc_aspect_ratio 3598 get_aspect_ratio(const struct drm_display_mode *mode_in) 3599 { 3600 /* 1-1 mapping, since both enums follow the HDMI spec. */ 3601 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; 3602 } 3603 3604 static enum dc_color_space 3605 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) 3606 { 3607 enum dc_color_space color_space = COLOR_SPACE_SRGB; 3608 3609 switch (dc_crtc_timing->pixel_encoding) { 3610 case PIXEL_ENCODING_YCBCR422: 3611 case PIXEL_ENCODING_YCBCR444: 3612 case PIXEL_ENCODING_YCBCR420: 3613 { 3614 /* 3615 * 27030khz is the separation point between HDTV and SDTV 3616 * according to HDMI spec, we use YCbCr709 and YCbCr601 3617 * respectively 3618 */ 3619 if (dc_crtc_timing->pix_clk_100hz > 270300) { 3620 if (dc_crtc_timing->flags.Y_ONLY) 3621 color_space = 3622 COLOR_SPACE_YCBCR709_LIMITED; 3623 else 3624 color_space = COLOR_SPACE_YCBCR709; 3625 } else { 3626 if (dc_crtc_timing->flags.Y_ONLY) 3627 color_space = 3628 COLOR_SPACE_YCBCR601_LIMITED; 3629 else 3630 color_space = COLOR_SPACE_YCBCR601; 3631 } 3632 3633 } 3634 break; 3635 case PIXEL_ENCODING_RGB: 3636 color_space = COLOR_SPACE_SRGB; 3637 break; 3638 3639 default: 3640 WARN_ON(1); 3641 break; 3642 } 3643 3644 return color_space; 3645 } 3646 3647 static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out) 3648 { 3649 if (timing_out->display_color_depth <= COLOR_DEPTH_888) 3650 return; 3651 3652 timing_out->display_color_depth--; 3653 } 3654 3655 static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out, 3656 const struct drm_display_info *info) 3657 { 3658 int normalized_clk; 3659 if (timing_out->display_color_depth <= COLOR_DEPTH_888) 3660 return; 3661 do { 3662 normalized_clk = timing_out->pix_clk_100hz / 10; 3663 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 3664 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 3665 normalized_clk /= 2; 3666 /* Adjusting pix clock following on HDMI spec based on colour depth */ 3667 switch (timing_out->display_color_depth) { 3668 case COLOR_DEPTH_101010: 3669 normalized_clk = (normalized_clk * 30) / 24; 3670 break; 3671 case COLOR_DEPTH_121212: 3672 normalized_clk = (normalized_clk * 36) / 24; 3673 break; 3674 case COLOR_DEPTH_161616: 3675 normalized_clk = (normalized_clk * 48) / 24; 3676 break; 3677 default: 3678 return; 3679 } 3680 if (normalized_clk <= info->max_tmds_clock) 3681 return; 3682 reduce_mode_colour_depth(timing_out); 3683 3684 } while (timing_out->display_color_depth > COLOR_DEPTH_888); 3685 3686 } 3687 3688 static void fill_stream_properties_from_drm_display_mode( 3689 struct dc_stream_state *stream, 3690 const struct drm_display_mode *mode_in, 3691 const struct drm_connector *connector, 3692 const struct drm_connector_state *connector_state, 3693 const struct dc_stream_state *old_stream) 3694 { 3695 struct dc_crtc_timing *timing_out = &stream->timing; 3696 const struct drm_display_info *info = &connector->display_info; 3697 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 3698 struct hdmi_vendor_infoframe hv_frame; 3699 struct hdmi_avi_infoframe avi_frame; 3700 3701 memset(&hv_frame, 0, sizeof(hv_frame)); 3702 memset(&avi_frame, 0, sizeof(avi_frame)); 3703 3704 timing_out->h_border_left = 0; 3705 timing_out->h_border_right = 0; 3706 timing_out->v_border_top = 0; 3707 timing_out->v_border_bottom = 0; 3708 /* TODO: un-hardcode */ 3709 if (drm_mode_is_420_only(info, mode_in) 3710 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 3711 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 3712 else if (drm_mode_is_420_also(info, mode_in) 3713 && aconnector->force_yuv420_output) 3714 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 3715 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) 3716 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 3717 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 3718 else 3719 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 3720 3721 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 3722 timing_out->display_color_depth = convert_color_depth_from_display_info( 3723 connector, connector_state); 3724 timing_out->scan_type = SCANNING_TYPE_NODATA; 3725 timing_out->hdmi_vic = 0; 3726 3727 if(old_stream) { 3728 timing_out->vic = old_stream->timing.vic; 3729 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; 3730 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; 3731 } else { 3732 timing_out->vic = drm_match_cea_mode(mode_in); 3733 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 3734 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 3735 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 3736 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 3737 } 3738 3739 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 3740 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); 3741 timing_out->vic = avi_frame.video_code; 3742 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); 3743 timing_out->hdmi_vic = hv_frame.vic; 3744 } 3745 3746 timing_out->h_addressable = mode_in->crtc_hdisplay; 3747 timing_out->h_total = mode_in->crtc_htotal; 3748 timing_out->h_sync_width = 3749 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 3750 timing_out->h_front_porch = 3751 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 3752 timing_out->v_total = mode_in->crtc_vtotal; 3753 timing_out->v_addressable = mode_in->crtc_vdisplay; 3754 timing_out->v_front_porch = 3755 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 3756 timing_out->v_sync_width = 3757 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 3758 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; 3759 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 3760 3761 stream->output_color_space = get_output_color_space(timing_out); 3762 3763 stream->out_transfer_func->type = TF_TYPE_PREDEFINED; 3764 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; 3765 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 3766 adjust_colour_depth_from_display_info(timing_out, info); 3767 } 3768 3769 static void fill_audio_info(struct audio_info *audio_info, 3770 const struct drm_connector *drm_connector, 3771 const struct dc_sink *dc_sink) 3772 { 3773 int i = 0; 3774 int cea_revision = 0; 3775 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 3776 3777 audio_info->manufacture_id = edid_caps->manufacturer_id; 3778 audio_info->product_id = edid_caps->product_id; 3779 3780 cea_revision = drm_connector->display_info.cea_rev; 3781 3782 strscpy(audio_info->display_name, 3783 edid_caps->display_name, 3784 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 3785 3786 if (cea_revision >= 3) { 3787 audio_info->mode_count = edid_caps->audio_mode_count; 3788 3789 for (i = 0; i < audio_info->mode_count; ++i) { 3790 audio_info->modes[i].format_code = 3791 (enum audio_format_code) 3792 (edid_caps->audio_modes[i].format_code); 3793 audio_info->modes[i].channel_count = 3794 edid_caps->audio_modes[i].channel_count; 3795 audio_info->modes[i].sample_rates.all = 3796 edid_caps->audio_modes[i].sample_rate; 3797 audio_info->modes[i].sample_size = 3798 edid_caps->audio_modes[i].sample_size; 3799 } 3800 } 3801 3802 audio_info->flags.all = edid_caps->speaker_flags; 3803 3804 /* TODO: We only check for the progressive mode, check for interlace mode too */ 3805 if (drm_connector->latency_present[0]) { 3806 audio_info->video_latency = drm_connector->video_latency[0]; 3807 audio_info->audio_latency = drm_connector->audio_latency[0]; 3808 } 3809 3810 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 3811 3812 } 3813 3814 static void 3815 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 3816 struct drm_display_mode *dst_mode) 3817 { 3818 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 3819 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 3820 dst_mode->crtc_clock = src_mode->crtc_clock; 3821 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 3822 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 3823 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 3824 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 3825 dst_mode->crtc_htotal = src_mode->crtc_htotal; 3826 dst_mode->crtc_hskew = src_mode->crtc_hskew; 3827 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 3828 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 3829 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 3830 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 3831 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 3832 } 3833 3834 static void 3835 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 3836 const struct drm_display_mode *native_mode, 3837 bool scale_enabled) 3838 { 3839 if (scale_enabled) { 3840 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 3841 } else if (native_mode->clock == drm_mode->clock && 3842 native_mode->htotal == drm_mode->htotal && 3843 native_mode->vtotal == drm_mode->vtotal) { 3844 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 3845 } else { 3846 /* no scaling nor amdgpu inserted, no need to patch */ 3847 } 3848 } 3849 3850 static struct dc_sink * 3851 create_fake_sink(struct amdgpu_dm_connector *aconnector) 3852 { 3853 struct dc_sink_init_data sink_init_data = { 0 }; 3854 struct dc_sink *sink = NULL; 3855 sink_init_data.link = aconnector->dc_link; 3856 sink_init_data.sink_signal = aconnector->dc_link->connector_signal; 3857 3858 sink = dc_sink_create(&sink_init_data); 3859 if (!sink) { 3860 DRM_ERROR("Failed to create sink!\n"); 3861 return NULL; 3862 } 3863 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 3864 3865 return sink; 3866 } 3867 3868 static void set_multisync_trigger_params( 3869 struct dc_stream_state *stream) 3870 { 3871 if (stream->triggered_crtc_reset.enabled) { 3872 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING; 3873 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE; 3874 } 3875 } 3876 3877 static void set_master_stream(struct dc_stream_state *stream_set[], 3878 int stream_count) 3879 { 3880 int j, highest_rfr = 0, master_stream = 0; 3881 3882 for (j = 0; j < stream_count; j++) { 3883 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 3884 int refresh_rate = 0; 3885 3886 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ 3887 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 3888 if (refresh_rate > highest_rfr) { 3889 highest_rfr = refresh_rate; 3890 master_stream = j; 3891 } 3892 } 3893 } 3894 for (j = 0; j < stream_count; j++) { 3895 if (stream_set[j]) 3896 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 3897 } 3898 } 3899 3900 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 3901 { 3902 int i = 0; 3903 3904 if (context->stream_count < 2) 3905 return; 3906 for (i = 0; i < context->stream_count ; i++) { 3907 if (!context->streams[i]) 3908 continue; 3909 /* 3910 * TODO: add a function to read AMD VSDB bits and set 3911 * crtc_sync_master.multi_sync_enabled flag 3912 * For now it's set to false 3913 */ 3914 set_multisync_trigger_params(context->streams[i]); 3915 } 3916 set_master_stream(context->streams, context->stream_count); 3917 } 3918 3919 static struct dc_stream_state * 3920 create_stream_for_sink(struct amdgpu_dm_connector *aconnector, 3921 const struct drm_display_mode *drm_mode, 3922 const struct dm_connector_state *dm_state, 3923 const struct dc_stream_state *old_stream) 3924 { 3925 struct drm_display_mode *preferred_mode = NULL; 3926 struct drm_connector *drm_connector; 3927 const struct drm_connector_state *con_state = 3928 dm_state ? &dm_state->base : NULL; 3929 struct dc_stream_state *stream = NULL; 3930 struct drm_display_mode mode = *drm_mode; 3931 bool native_mode_found = false; 3932 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; 3933 int mode_refresh; 3934 int preferred_refresh = 0; 3935 struct dsc_dec_dpcd_caps dsc_caps; 3936 uint32_t link_bandwidth_kbps; 3937 3938 struct dc_sink *sink = NULL; 3939 if (aconnector == NULL) { 3940 DRM_ERROR("aconnector is NULL!\n"); 3941 return stream; 3942 } 3943 3944 drm_connector = &aconnector->base; 3945 3946 if (!aconnector->dc_sink) { 3947 sink = create_fake_sink(aconnector); 3948 if (!sink) 3949 return stream; 3950 } else { 3951 sink = aconnector->dc_sink; 3952 dc_sink_retain(sink); 3953 } 3954 3955 stream = dc_create_stream_for_sink(sink); 3956 3957 if (stream == NULL) { 3958 DRM_ERROR("Failed to create stream for sink!\n"); 3959 goto finish; 3960 } 3961 3962 stream->dm_stream_context = aconnector; 3963 3964 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 3965 drm_connector->display_info.hdmi.scdc.scrambling.low_rates; 3966 3967 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 3968 /* Search for preferred mode */ 3969 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 3970 native_mode_found = true; 3971 break; 3972 } 3973 } 3974 if (!native_mode_found) 3975 preferred_mode = list_first_entry_or_null( 3976 &aconnector->base.modes, 3977 struct drm_display_mode, 3978 head); 3979 3980 mode_refresh = drm_mode_vrefresh(&mode); 3981 3982 if (preferred_mode == NULL) { 3983 /* 3984 * This may not be an error, the use case is when we have no 3985 * usermode calls to reset and set mode upon hotplug. In this 3986 * case, we call set mode ourselves to restore the previous mode 3987 * and the modelist may not be filled in in time. 3988 */ 3989 DRM_DEBUG_DRIVER("No preferred mode found\n"); 3990 } else { 3991 decide_crtc_timing_for_drm_display_mode( 3992 &mode, preferred_mode, 3993 dm_state ? (dm_state->scaling != RMX_OFF) : false); 3994 preferred_refresh = drm_mode_vrefresh(preferred_mode); 3995 } 3996 3997 if (!dm_state) 3998 drm_mode_set_crtcinfo(&mode, 0); 3999 4000 /* 4001 * If scaling is enabled and refresh rate didn't change 4002 * we copy the vic and polarities of the old timings 4003 */ 4004 if (!scale || mode_refresh != preferred_refresh) 4005 fill_stream_properties_from_drm_display_mode(stream, 4006 &mode, &aconnector->base, con_state, NULL); 4007 else 4008 fill_stream_properties_from_drm_display_mode(stream, 4009 &mode, &aconnector->base, con_state, old_stream); 4010 4011 stream->timing.flags.DSC = 0; 4012 4013 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 4014 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 4015 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw, 4016 &dsc_caps); 4017 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 4018 dc_link_get_link_cap(aconnector->dc_link)); 4019 4020 if (dsc_caps.is_dsc_supported) 4021 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 4022 &dsc_caps, 4023 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, 4024 link_bandwidth_kbps, 4025 &stream->timing, 4026 &stream->timing.dsc_cfg)) 4027 stream->timing.flags.DSC = 1; 4028 } 4029 4030 update_stream_scaling_settings(&mode, dm_state, stream); 4031 4032 fill_audio_info( 4033 &stream->audio_info, 4034 drm_connector, 4035 sink); 4036 4037 update_stream_signal(stream, sink); 4038 4039 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 4040 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false); 4041 if (stream->link->psr_feature_enabled) { 4042 struct dc *core_dc = stream->link->ctx->dc; 4043 4044 if (dc_is_dmcu_initialized(core_dc)) { 4045 struct dmcu *dmcu = core_dc->res_pool->dmcu; 4046 4047 stream->psr_version = dmcu->dmcu_version.psr_version; 4048 mod_build_vsc_infopacket(stream, 4049 &stream->vsc_infopacket, 4050 &stream->use_vsc_sdp_for_colorimetry); 4051 } 4052 } 4053 finish: 4054 dc_sink_release(sink); 4055 4056 return stream; 4057 } 4058 4059 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) 4060 { 4061 drm_crtc_cleanup(crtc); 4062 kfree(crtc); 4063 } 4064 4065 static void dm_crtc_destroy_state(struct drm_crtc *crtc, 4066 struct drm_crtc_state *state) 4067 { 4068 struct dm_crtc_state *cur = to_dm_crtc_state(state); 4069 4070 /* TODO Destroy dc_stream objects are stream object is flattened */ 4071 if (cur->stream) 4072 dc_stream_release(cur->stream); 4073 4074 4075 __drm_atomic_helper_crtc_destroy_state(state); 4076 4077 4078 kfree(state); 4079 } 4080 4081 static void dm_crtc_reset_state(struct drm_crtc *crtc) 4082 { 4083 struct dm_crtc_state *state; 4084 4085 if (crtc->state) 4086 dm_crtc_destroy_state(crtc, crtc->state); 4087 4088 state = kzalloc(sizeof(*state), GFP_KERNEL); 4089 if (WARN_ON(!state)) 4090 return; 4091 4092 crtc->state = &state->base; 4093 crtc->state->crtc = crtc; 4094 4095 } 4096 4097 static struct drm_crtc_state * 4098 dm_crtc_duplicate_state(struct drm_crtc *crtc) 4099 { 4100 struct dm_crtc_state *state, *cur; 4101 4102 cur = to_dm_crtc_state(crtc->state); 4103 4104 if (WARN_ON(!crtc->state)) 4105 return NULL; 4106 4107 state = kzalloc(sizeof(*state), GFP_KERNEL); 4108 if (!state) 4109 return NULL; 4110 4111 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); 4112 4113 if (cur->stream) { 4114 state->stream = cur->stream; 4115 dc_stream_retain(state->stream); 4116 } 4117 4118 state->active_planes = cur->active_planes; 4119 state->interrupts_enabled = cur->interrupts_enabled; 4120 state->vrr_params = cur->vrr_params; 4121 state->vrr_infopacket = cur->vrr_infopacket; 4122 state->abm_level = cur->abm_level; 4123 state->vrr_supported = cur->vrr_supported; 4124 state->freesync_config = cur->freesync_config; 4125 state->crc_src = cur->crc_src; 4126 state->cm_has_degamma = cur->cm_has_degamma; 4127 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; 4128 4129 /* TODO Duplicate dc_stream after objects are stream object is flattened */ 4130 4131 return &state->base; 4132 } 4133 4134 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) 4135 { 4136 enum dc_irq_source irq_source; 4137 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 4138 struct amdgpu_device *adev = crtc->dev->dev_private; 4139 int rc; 4140 4141 /* Do not set vupdate for DCN hardware */ 4142 if (adev->family > AMDGPU_FAMILY_AI) 4143 return 0; 4144 4145 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; 4146 4147 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 4148 4149 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n", 4150 acrtc->crtc_id, enable ? "en" : "dis", rc); 4151 return rc; 4152 } 4153 4154 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) 4155 { 4156 enum dc_irq_source irq_source; 4157 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 4158 struct amdgpu_device *adev = crtc->dev->dev_private; 4159 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); 4160 int rc = 0; 4161 4162 if (enable) { 4163 /* vblank irq on -> Only need vupdate irq in vrr mode */ 4164 if (amdgpu_dm_vrr_active(acrtc_state)) 4165 rc = dm_set_vupdate_irq(crtc, true); 4166 } else { 4167 /* vblank irq off -> vupdate irq off */ 4168 rc = dm_set_vupdate_irq(crtc, false); 4169 } 4170 4171 if (rc) 4172 return rc; 4173 4174 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 4175 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 4176 } 4177 4178 static int dm_enable_vblank(struct drm_crtc *crtc) 4179 { 4180 return dm_set_vblank(crtc, true); 4181 } 4182 4183 static void dm_disable_vblank(struct drm_crtc *crtc) 4184 { 4185 dm_set_vblank(crtc, false); 4186 } 4187 4188 /* Implemented only the options currently availible for the driver */ 4189 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { 4190 .reset = dm_crtc_reset_state, 4191 .destroy = amdgpu_dm_crtc_destroy, 4192 .gamma_set = drm_atomic_helper_legacy_gamma_set, 4193 .set_config = drm_atomic_helper_set_config, 4194 .page_flip = drm_atomic_helper_page_flip, 4195 .atomic_duplicate_state = dm_crtc_duplicate_state, 4196 .atomic_destroy_state = dm_crtc_destroy_state, 4197 .set_crc_source = amdgpu_dm_crtc_set_crc_source, 4198 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, 4199 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, 4200 .enable_vblank = dm_enable_vblank, 4201 .disable_vblank = dm_disable_vblank, 4202 }; 4203 4204 static enum drm_connector_status 4205 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 4206 { 4207 bool connected; 4208 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 4209 4210 /* 4211 * Notes: 4212 * 1. This interface is NOT called in context of HPD irq. 4213 * 2. This interface *is called* in context of user-mode ioctl. Which 4214 * makes it a bad place for *any* MST-related activity. 4215 */ 4216 4217 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && 4218 !aconnector->fake_enable) 4219 connected = (aconnector->dc_sink != NULL); 4220 else 4221 connected = (aconnector->base.force == DRM_FORCE_ON); 4222 4223 return (connected ? connector_status_connected : 4224 connector_status_disconnected); 4225 } 4226 4227 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 4228 struct drm_connector_state *connector_state, 4229 struct drm_property *property, 4230 uint64_t val) 4231 { 4232 struct drm_device *dev = connector->dev; 4233 struct amdgpu_device *adev = dev->dev_private; 4234 struct dm_connector_state *dm_old_state = 4235 to_dm_connector_state(connector->state); 4236 struct dm_connector_state *dm_new_state = 4237 to_dm_connector_state(connector_state); 4238 4239 int ret = -EINVAL; 4240 4241 if (property == dev->mode_config.scaling_mode_property) { 4242 enum amdgpu_rmx_type rmx_type; 4243 4244 switch (val) { 4245 case DRM_MODE_SCALE_CENTER: 4246 rmx_type = RMX_CENTER; 4247 break; 4248 case DRM_MODE_SCALE_ASPECT: 4249 rmx_type = RMX_ASPECT; 4250 break; 4251 case DRM_MODE_SCALE_FULLSCREEN: 4252 rmx_type = RMX_FULL; 4253 break; 4254 case DRM_MODE_SCALE_NONE: 4255 default: 4256 rmx_type = RMX_OFF; 4257 break; 4258 } 4259 4260 if (dm_old_state->scaling == rmx_type) 4261 return 0; 4262 4263 dm_new_state->scaling = rmx_type; 4264 ret = 0; 4265 } else if (property == adev->mode_info.underscan_hborder_property) { 4266 dm_new_state->underscan_hborder = val; 4267 ret = 0; 4268 } else if (property == adev->mode_info.underscan_vborder_property) { 4269 dm_new_state->underscan_vborder = val; 4270 ret = 0; 4271 } else if (property == adev->mode_info.underscan_property) { 4272 dm_new_state->underscan_enable = val; 4273 ret = 0; 4274 } else if (property == adev->mode_info.abm_level_property) { 4275 dm_new_state->abm_level = val; 4276 ret = 0; 4277 } 4278 4279 return ret; 4280 } 4281 4282 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 4283 const struct drm_connector_state *state, 4284 struct drm_property *property, 4285 uint64_t *val) 4286 { 4287 struct drm_device *dev = connector->dev; 4288 struct amdgpu_device *adev = dev->dev_private; 4289 struct dm_connector_state *dm_state = 4290 to_dm_connector_state(state); 4291 int ret = -EINVAL; 4292 4293 if (property == dev->mode_config.scaling_mode_property) { 4294 switch (dm_state->scaling) { 4295 case RMX_CENTER: 4296 *val = DRM_MODE_SCALE_CENTER; 4297 break; 4298 case RMX_ASPECT: 4299 *val = DRM_MODE_SCALE_ASPECT; 4300 break; 4301 case RMX_FULL: 4302 *val = DRM_MODE_SCALE_FULLSCREEN; 4303 break; 4304 case RMX_OFF: 4305 default: 4306 *val = DRM_MODE_SCALE_NONE; 4307 break; 4308 } 4309 ret = 0; 4310 } else if (property == adev->mode_info.underscan_hborder_property) { 4311 *val = dm_state->underscan_hborder; 4312 ret = 0; 4313 } else if (property == adev->mode_info.underscan_vborder_property) { 4314 *val = dm_state->underscan_vborder; 4315 ret = 0; 4316 } else if (property == adev->mode_info.underscan_property) { 4317 *val = dm_state->underscan_enable; 4318 ret = 0; 4319 } else if (property == adev->mode_info.abm_level_property) { 4320 *val = dm_state->abm_level; 4321 ret = 0; 4322 } 4323 4324 return ret; 4325 } 4326 4327 static void amdgpu_dm_connector_unregister(struct drm_connector *connector) 4328 { 4329 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 4330 4331 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); 4332 } 4333 4334 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 4335 { 4336 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 4337 const struct dc_link *link = aconnector->dc_link; 4338 struct amdgpu_device *adev = connector->dev->dev_private; 4339 struct amdgpu_display_manager *dm = &adev->dm; 4340 4341 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 4342 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 4343 4344 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && 4345 link->type != dc_connection_none && 4346 dm->backlight_dev) { 4347 backlight_device_unregister(dm->backlight_dev); 4348 dm->backlight_dev = NULL; 4349 } 4350 #endif 4351 4352 if (aconnector->dc_em_sink) 4353 dc_sink_release(aconnector->dc_em_sink); 4354 aconnector->dc_em_sink = NULL; 4355 if (aconnector->dc_sink) 4356 dc_sink_release(aconnector->dc_sink); 4357 aconnector->dc_sink = NULL; 4358 4359 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 4360 drm_connector_unregister(connector); 4361 drm_connector_cleanup(connector); 4362 if (aconnector->i2c) { 4363 i2c_del_adapter(&aconnector->i2c->base); 4364 kfree(aconnector->i2c); 4365 } 4366 4367 kfree(connector); 4368 } 4369 4370 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 4371 { 4372 struct dm_connector_state *state = 4373 to_dm_connector_state(connector->state); 4374 4375 if (connector->state) 4376 __drm_atomic_helper_connector_destroy_state(connector->state); 4377 4378 kfree(state); 4379 4380 state = kzalloc(sizeof(*state), GFP_KERNEL); 4381 4382 if (state) { 4383 state->scaling = RMX_OFF; 4384 state->underscan_enable = false; 4385 state->underscan_hborder = 0; 4386 state->underscan_vborder = 0; 4387 state->base.max_requested_bpc = 8; 4388 state->vcpi_slots = 0; 4389 state->pbn = 0; 4390 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 4391 state->abm_level = amdgpu_dm_abm_level; 4392 4393 __drm_atomic_helper_connector_reset(connector, &state->base); 4394 } 4395 } 4396 4397 struct drm_connector_state * 4398 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 4399 { 4400 struct dm_connector_state *state = 4401 to_dm_connector_state(connector->state); 4402 4403 struct dm_connector_state *new_state = 4404 kmemdup(state, sizeof(*state), GFP_KERNEL); 4405 4406 if (!new_state) 4407 return NULL; 4408 4409 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); 4410 4411 new_state->freesync_capable = state->freesync_capable; 4412 new_state->abm_level = state->abm_level; 4413 new_state->scaling = state->scaling; 4414 new_state->underscan_enable = state->underscan_enable; 4415 new_state->underscan_hborder = state->underscan_hborder; 4416 new_state->underscan_vborder = state->underscan_vborder; 4417 new_state->vcpi_slots = state->vcpi_slots; 4418 new_state->pbn = state->pbn; 4419 return &new_state->base; 4420 } 4421 4422 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 4423 .reset = amdgpu_dm_connector_funcs_reset, 4424 .detect = amdgpu_dm_connector_detect, 4425 .fill_modes = drm_helper_probe_single_connector_modes, 4426 .destroy = amdgpu_dm_connector_destroy, 4427 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 4428 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 4429 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 4430 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 4431 .early_unregister = amdgpu_dm_connector_unregister 4432 }; 4433 4434 static int get_modes(struct drm_connector *connector) 4435 { 4436 return amdgpu_dm_connector_get_modes(connector); 4437 } 4438 4439 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 4440 { 4441 struct dc_sink_init_data init_params = { 4442 .link = aconnector->dc_link, 4443 .sink_signal = SIGNAL_TYPE_VIRTUAL 4444 }; 4445 struct edid *edid; 4446 4447 if (!aconnector->base.edid_blob_ptr) { 4448 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", 4449 aconnector->base.name); 4450 4451 aconnector->base.force = DRM_FORCE_OFF; 4452 aconnector->base.override_edid = false; 4453 return; 4454 } 4455 4456 edid = (struct edid *) aconnector->base.edid_blob_ptr->data; 4457 4458 aconnector->edid = edid; 4459 4460 aconnector->dc_em_sink = dc_link_add_remote_sink( 4461 aconnector->dc_link, 4462 (uint8_t *)edid, 4463 (edid->extensions + 1) * EDID_LENGTH, 4464 &init_params); 4465 4466 if (aconnector->base.force == DRM_FORCE_ON) { 4467 aconnector->dc_sink = aconnector->dc_link->local_sink ? 4468 aconnector->dc_link->local_sink : 4469 aconnector->dc_em_sink; 4470 dc_sink_retain(aconnector->dc_sink); 4471 } 4472 } 4473 4474 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 4475 { 4476 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 4477 4478 /* 4479 * In case of headless boot with force on for DP managed connector 4480 * Those settings have to be != 0 to get initial modeset 4481 */ 4482 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 4483 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 4484 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 4485 } 4486 4487 4488 aconnector->base.override_edid = true; 4489 create_eml_sink(aconnector); 4490 } 4491 4492 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 4493 struct drm_display_mode *mode) 4494 { 4495 int result = MODE_ERROR; 4496 struct dc_sink *dc_sink; 4497 struct amdgpu_device *adev = connector->dev->dev_private; 4498 /* TODO: Unhardcode stream count */ 4499 struct dc_stream_state *stream; 4500 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 4501 enum dc_status dc_result = DC_OK; 4502 4503 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 4504 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 4505 return result; 4506 4507 /* 4508 * Only run this the first time mode_valid is called to initilialize 4509 * EDID mgmt 4510 */ 4511 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 4512 !aconnector->dc_em_sink) 4513 handle_edid_mgmt(aconnector); 4514 4515 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 4516 4517 if (dc_sink == NULL) { 4518 DRM_ERROR("dc_sink is NULL!\n"); 4519 goto fail; 4520 } 4521 4522 stream = create_stream_for_sink(aconnector, mode, NULL, NULL); 4523 if (stream == NULL) { 4524 DRM_ERROR("Failed to create stream for sink!\n"); 4525 goto fail; 4526 } 4527 4528 dc_result = dc_validate_stream(adev->dm.dc, stream); 4529 4530 if (dc_result == DC_OK) 4531 result = MODE_OK; 4532 else 4533 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n", 4534 mode->hdisplay, 4535 mode->vdisplay, 4536 mode->clock, 4537 dc_result); 4538 4539 dc_stream_release(stream); 4540 4541 fail: 4542 /* TODO: error handling*/ 4543 return result; 4544 } 4545 4546 static int fill_hdr_info_packet(const struct drm_connector_state *state, 4547 struct dc_info_packet *out) 4548 { 4549 struct hdmi_drm_infoframe frame; 4550 unsigned char buf[30]; /* 26 + 4 */ 4551 ssize_t len; 4552 int ret, i; 4553 4554 memset(out, 0, sizeof(*out)); 4555 4556 if (!state->hdr_output_metadata) 4557 return 0; 4558 4559 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); 4560 if (ret) 4561 return ret; 4562 4563 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); 4564 if (len < 0) 4565 return (int)len; 4566 4567 /* Static metadata is a fixed 26 bytes + 4 byte header. */ 4568 if (len != 30) 4569 return -EINVAL; 4570 4571 /* Prepare the infopacket for DC. */ 4572 switch (state->connector->connector_type) { 4573 case DRM_MODE_CONNECTOR_HDMIA: 4574 out->hb0 = 0x87; /* type */ 4575 out->hb1 = 0x01; /* version */ 4576 out->hb2 = 0x1A; /* length */ 4577 out->sb[0] = buf[3]; /* checksum */ 4578 i = 1; 4579 break; 4580 4581 case DRM_MODE_CONNECTOR_DisplayPort: 4582 case DRM_MODE_CONNECTOR_eDP: 4583 out->hb0 = 0x00; /* sdp id, zero */ 4584 out->hb1 = 0x87; /* type */ 4585 out->hb2 = 0x1D; /* payload len - 1 */ 4586 out->hb3 = (0x13 << 2); /* sdp version */ 4587 out->sb[0] = 0x01; /* version */ 4588 out->sb[1] = 0x1A; /* length */ 4589 i = 2; 4590 break; 4591 4592 default: 4593 return -EINVAL; 4594 } 4595 4596 memcpy(&out->sb[i], &buf[4], 26); 4597 out->valid = true; 4598 4599 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, 4600 sizeof(out->sb), false); 4601 4602 return 0; 4603 } 4604 4605 static bool 4606 is_hdr_metadata_different(const struct drm_connector_state *old_state, 4607 const struct drm_connector_state *new_state) 4608 { 4609 struct drm_property_blob *old_blob = old_state->hdr_output_metadata; 4610 struct drm_property_blob *new_blob = new_state->hdr_output_metadata; 4611 4612 if (old_blob != new_blob) { 4613 if (old_blob && new_blob && 4614 old_blob->length == new_blob->length) 4615 return memcmp(old_blob->data, new_blob->data, 4616 old_blob->length); 4617 4618 return true; 4619 } 4620 4621 return false; 4622 } 4623 4624 static int 4625 amdgpu_dm_connector_atomic_check(struct drm_connector *conn, 4626 struct drm_atomic_state *state) 4627 { 4628 struct drm_connector_state *new_con_state = 4629 drm_atomic_get_new_connector_state(state, conn); 4630 struct drm_connector_state *old_con_state = 4631 drm_atomic_get_old_connector_state(state, conn); 4632 struct drm_crtc *crtc = new_con_state->crtc; 4633 struct drm_crtc_state *new_crtc_state; 4634 int ret; 4635 4636 if (!crtc) 4637 return 0; 4638 4639 if (is_hdr_metadata_different(old_con_state, new_con_state)) { 4640 struct dc_info_packet hdr_infopacket; 4641 4642 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); 4643 if (ret) 4644 return ret; 4645 4646 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 4647 if (IS_ERR(new_crtc_state)) 4648 return PTR_ERR(new_crtc_state); 4649 4650 /* 4651 * DC considers the stream backends changed if the 4652 * static metadata changes. Forcing the modeset also 4653 * gives a simple way for userspace to switch from 4654 * 8bpc to 10bpc when setting the metadata to enter 4655 * or exit HDR. 4656 * 4657 * Changing the static metadata after it's been 4658 * set is permissible, however. So only force a 4659 * modeset if we're entering or exiting HDR. 4660 */ 4661 new_crtc_state->mode_changed = 4662 !old_con_state->hdr_output_metadata || 4663 !new_con_state->hdr_output_metadata; 4664 } 4665 4666 return 0; 4667 } 4668 4669 static const struct drm_connector_helper_funcs 4670 amdgpu_dm_connector_helper_funcs = { 4671 /* 4672 * If hotplugging a second bigger display in FB Con mode, bigger resolution 4673 * modes will be filtered by drm_mode_validate_size(), and those modes 4674 * are missing after user start lightdm. So we need to renew modes list. 4675 * in get_modes call back, not just return the modes count 4676 */ 4677 .get_modes = get_modes, 4678 .mode_valid = amdgpu_dm_connector_mode_valid, 4679 .atomic_check = amdgpu_dm_connector_atomic_check, 4680 }; 4681 4682 static void dm_crtc_helper_disable(struct drm_crtc *crtc) 4683 { 4684 } 4685 4686 static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state) 4687 { 4688 struct drm_device *dev = new_crtc_state->crtc->dev; 4689 struct drm_plane *plane; 4690 4691 drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) { 4692 if (plane->type == DRM_PLANE_TYPE_CURSOR) 4693 return true; 4694 } 4695 4696 return false; 4697 } 4698 4699 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) 4700 { 4701 struct drm_atomic_state *state = new_crtc_state->state; 4702 struct drm_plane *plane; 4703 int num_active = 0; 4704 4705 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) { 4706 struct drm_plane_state *new_plane_state; 4707 4708 /* Cursor planes are "fake". */ 4709 if (plane->type == DRM_PLANE_TYPE_CURSOR) 4710 continue; 4711 4712 new_plane_state = drm_atomic_get_new_plane_state(state, plane); 4713 4714 if (!new_plane_state) { 4715 /* 4716 * The plane is enable on the CRTC and hasn't changed 4717 * state. This means that it previously passed 4718 * validation and is therefore enabled. 4719 */ 4720 num_active += 1; 4721 continue; 4722 } 4723 4724 /* We need a framebuffer to be considered enabled. */ 4725 num_active += (new_plane_state->fb != NULL); 4726 } 4727 4728 return num_active; 4729 } 4730 4731 /* 4732 * Sets whether interrupts should be enabled on a specific CRTC. 4733 * We require that the stream be enabled and that there exist active 4734 * DC planes on the stream. 4735 */ 4736 static void 4737 dm_update_crtc_interrupt_state(struct drm_crtc *crtc, 4738 struct drm_crtc_state *new_crtc_state) 4739 { 4740 struct dm_crtc_state *dm_new_crtc_state = 4741 to_dm_crtc_state(new_crtc_state); 4742 4743 dm_new_crtc_state->active_planes = 0; 4744 dm_new_crtc_state->interrupts_enabled = false; 4745 4746 if (!dm_new_crtc_state->stream) 4747 return; 4748 4749 dm_new_crtc_state->active_planes = 4750 count_crtc_active_planes(new_crtc_state); 4751 4752 dm_new_crtc_state->interrupts_enabled = 4753 dm_new_crtc_state->active_planes > 0; 4754 } 4755 4756 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, 4757 struct drm_crtc_state *state) 4758 { 4759 struct amdgpu_device *adev = crtc->dev->dev_private; 4760 struct dc *dc = adev->dm.dc; 4761 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state); 4762 int ret = -EINVAL; 4763 4764 /* 4765 * Update interrupt state for the CRTC. This needs to happen whenever 4766 * the CRTC has changed or whenever any of its planes have changed. 4767 * Atomic check satisfies both of these requirements since the CRTC 4768 * is added to the state by DRM during drm_atomic_helper_check_planes. 4769 */ 4770 dm_update_crtc_interrupt_state(crtc, state); 4771 4772 if (unlikely(!dm_crtc_state->stream && 4773 modeset_required(state, NULL, dm_crtc_state->stream))) { 4774 WARN_ON(1); 4775 return ret; 4776 } 4777 4778 /* In some use cases, like reset, no stream is attached */ 4779 if (!dm_crtc_state->stream) 4780 return 0; 4781 4782 /* 4783 * We want at least one hardware plane enabled to use 4784 * the stream with a cursor enabled. 4785 */ 4786 if (state->enable && state->active && 4787 does_crtc_have_active_cursor(state) && 4788 dm_crtc_state->active_planes == 0) 4789 return -EINVAL; 4790 4791 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) 4792 return 0; 4793 4794 return ret; 4795 } 4796 4797 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, 4798 const struct drm_display_mode *mode, 4799 struct drm_display_mode *adjusted_mode) 4800 { 4801 return true; 4802 } 4803 4804 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { 4805 .disable = dm_crtc_helper_disable, 4806 .atomic_check = dm_crtc_helper_atomic_check, 4807 .mode_fixup = dm_crtc_helper_mode_fixup 4808 }; 4809 4810 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 4811 { 4812 4813 } 4814 4815 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth) 4816 { 4817 switch (display_color_depth) { 4818 case COLOR_DEPTH_666: 4819 return 6; 4820 case COLOR_DEPTH_888: 4821 return 8; 4822 case COLOR_DEPTH_101010: 4823 return 10; 4824 case COLOR_DEPTH_121212: 4825 return 12; 4826 case COLOR_DEPTH_141414: 4827 return 14; 4828 case COLOR_DEPTH_161616: 4829 return 16; 4830 default: 4831 break; 4832 } 4833 return 0; 4834 } 4835 4836 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 4837 struct drm_crtc_state *crtc_state, 4838 struct drm_connector_state *conn_state) 4839 { 4840 struct drm_atomic_state *state = crtc_state->state; 4841 struct drm_connector *connector = conn_state->connector; 4842 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 4843 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); 4844 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 4845 struct drm_dp_mst_topology_mgr *mst_mgr; 4846 struct drm_dp_mst_port *mst_port; 4847 enum dc_color_depth color_depth; 4848 int clock, bpp = 0; 4849 4850 if (!aconnector->port || !aconnector->dc_sink) 4851 return 0; 4852 4853 mst_port = aconnector->port; 4854 mst_mgr = &aconnector->mst_port->mst_mgr; 4855 4856 if (!crtc_state->connectors_changed && !crtc_state->mode_changed) 4857 return 0; 4858 4859 if (!state->duplicated) { 4860 color_depth = convert_color_depth_from_display_info(connector, conn_state); 4861 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 4862 clock = adjusted_mode->clock; 4863 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp); 4864 } 4865 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state, 4866 mst_mgr, 4867 mst_port, 4868 dm_new_connector_state->pbn); 4869 if (dm_new_connector_state->vcpi_slots < 0) { 4870 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); 4871 return dm_new_connector_state->vcpi_slots; 4872 } 4873 return 0; 4874 } 4875 4876 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 4877 .disable = dm_encoder_helper_disable, 4878 .atomic_check = dm_encoder_helper_atomic_check 4879 }; 4880 4881 static void dm_drm_plane_reset(struct drm_plane *plane) 4882 { 4883 struct dm_plane_state *amdgpu_state = NULL; 4884 4885 if (plane->state) 4886 plane->funcs->atomic_destroy_state(plane, plane->state); 4887 4888 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); 4889 WARN_ON(amdgpu_state == NULL); 4890 4891 if (amdgpu_state) 4892 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); 4893 } 4894 4895 static struct drm_plane_state * 4896 dm_drm_plane_duplicate_state(struct drm_plane *plane) 4897 { 4898 struct dm_plane_state *dm_plane_state, *old_dm_plane_state; 4899 4900 old_dm_plane_state = to_dm_plane_state(plane->state); 4901 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); 4902 if (!dm_plane_state) 4903 return NULL; 4904 4905 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); 4906 4907 if (old_dm_plane_state->dc_state) { 4908 dm_plane_state->dc_state = old_dm_plane_state->dc_state; 4909 dc_plane_state_retain(dm_plane_state->dc_state); 4910 } 4911 4912 return &dm_plane_state->base; 4913 } 4914 4915 void dm_drm_plane_destroy_state(struct drm_plane *plane, 4916 struct drm_plane_state *state) 4917 { 4918 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 4919 4920 if (dm_plane_state->dc_state) 4921 dc_plane_state_release(dm_plane_state->dc_state); 4922 4923 drm_atomic_helper_plane_destroy_state(plane, state); 4924 } 4925 4926 static const struct drm_plane_funcs dm_plane_funcs = { 4927 .update_plane = drm_atomic_helper_update_plane, 4928 .disable_plane = drm_atomic_helper_disable_plane, 4929 .destroy = drm_primary_helper_destroy, 4930 .reset = dm_drm_plane_reset, 4931 .atomic_duplicate_state = dm_drm_plane_duplicate_state, 4932 .atomic_destroy_state = dm_drm_plane_destroy_state, 4933 }; 4934 4935 static int dm_plane_helper_prepare_fb(struct drm_plane *plane, 4936 struct drm_plane_state *new_state) 4937 { 4938 struct amdgpu_framebuffer *afb; 4939 struct drm_gem_object *obj; 4940 struct amdgpu_device *adev; 4941 struct amdgpu_bo *rbo; 4942 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; 4943 struct list_head list; 4944 struct ttm_validate_buffer tv; 4945 struct ww_acquire_ctx ticket; 4946 uint64_t tiling_flags; 4947 uint32_t domain; 4948 int r; 4949 4950 dm_plane_state_old = to_dm_plane_state(plane->state); 4951 dm_plane_state_new = to_dm_plane_state(new_state); 4952 4953 if (!new_state->fb) { 4954 DRM_DEBUG_DRIVER("No FB bound\n"); 4955 return 0; 4956 } 4957 4958 afb = to_amdgpu_framebuffer(new_state->fb); 4959 obj = new_state->fb->obj[0]; 4960 rbo = gem_to_amdgpu_bo(obj); 4961 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 4962 INIT_LIST_HEAD(&list); 4963 4964 tv.bo = &rbo->tbo; 4965 tv.num_shared = 1; 4966 list_add(&tv.head, &list); 4967 4968 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); 4969 if (r) { 4970 dev_err(adev->dev, "fail to reserve bo (%d)\n", r); 4971 return r; 4972 } 4973 4974 if (plane->type != DRM_PLANE_TYPE_CURSOR) 4975 domain = amdgpu_display_supported_domains(adev, rbo->flags); 4976 else 4977 domain = AMDGPU_GEM_DOMAIN_VRAM; 4978 4979 r = amdgpu_bo_pin(rbo, domain); 4980 if (unlikely(r != 0)) { 4981 if (r != -ERESTARTSYS) 4982 DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 4983 ttm_eu_backoff_reservation(&ticket, &list); 4984 return r; 4985 } 4986 4987 r = amdgpu_ttm_alloc_gart(&rbo->tbo); 4988 if (unlikely(r != 0)) { 4989 amdgpu_bo_unpin(rbo); 4990 ttm_eu_backoff_reservation(&ticket, &list); 4991 DRM_ERROR("%p bind failed\n", rbo); 4992 return r; 4993 } 4994 4995 amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); 4996 4997 ttm_eu_backoff_reservation(&ticket, &list); 4998 4999 afb->address = amdgpu_bo_gpu_offset(rbo); 5000 5001 amdgpu_bo_ref(rbo); 5002 5003 if (dm_plane_state_new->dc_state && 5004 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { 5005 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; 5006 5007 fill_plane_buffer_attributes( 5008 adev, afb, plane_state->format, plane_state->rotation, 5009 tiling_flags, &plane_state->tiling_info, 5010 &plane_state->plane_size, &plane_state->dcc, 5011 &plane_state->address); 5012 } 5013 5014 return 0; 5015 } 5016 5017 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, 5018 struct drm_plane_state *old_state) 5019 { 5020 struct amdgpu_bo *rbo; 5021 int r; 5022 5023 if (!old_state->fb) 5024 return; 5025 5026 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); 5027 r = amdgpu_bo_reserve(rbo, false); 5028 if (unlikely(r)) { 5029 DRM_ERROR("failed to reserve rbo before unpin\n"); 5030 return; 5031 } 5032 5033 amdgpu_bo_unpin(rbo); 5034 amdgpu_bo_unreserve(rbo); 5035 amdgpu_bo_unref(&rbo); 5036 } 5037 5038 static int dm_plane_atomic_check(struct drm_plane *plane, 5039 struct drm_plane_state *state) 5040 { 5041 struct amdgpu_device *adev = plane->dev->dev_private; 5042 struct dc *dc = adev->dm.dc; 5043 struct dm_plane_state *dm_plane_state; 5044 struct dc_scaling_info scaling_info; 5045 int ret; 5046 5047 dm_plane_state = to_dm_plane_state(state); 5048 5049 if (!dm_plane_state->dc_state) 5050 return 0; 5051 5052 ret = fill_dc_scaling_info(state, &scaling_info); 5053 if (ret) 5054 return ret; 5055 5056 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 5057 return 0; 5058 5059 return -EINVAL; 5060 } 5061 5062 static int dm_plane_atomic_async_check(struct drm_plane *plane, 5063 struct drm_plane_state *new_plane_state) 5064 { 5065 /* Only support async updates on cursor planes. */ 5066 if (plane->type != DRM_PLANE_TYPE_CURSOR) 5067 return -EINVAL; 5068 5069 return 0; 5070 } 5071 5072 static void dm_plane_atomic_async_update(struct drm_plane *plane, 5073 struct drm_plane_state *new_state) 5074 { 5075 struct drm_plane_state *old_state = 5076 drm_atomic_get_old_plane_state(new_state->state, plane); 5077 5078 swap(plane->state->fb, new_state->fb); 5079 5080 plane->state->src_x = new_state->src_x; 5081 plane->state->src_y = new_state->src_y; 5082 plane->state->src_w = new_state->src_w; 5083 plane->state->src_h = new_state->src_h; 5084 plane->state->crtc_x = new_state->crtc_x; 5085 plane->state->crtc_y = new_state->crtc_y; 5086 plane->state->crtc_w = new_state->crtc_w; 5087 plane->state->crtc_h = new_state->crtc_h; 5088 5089 handle_cursor_update(plane, old_state); 5090 } 5091 5092 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { 5093 .prepare_fb = dm_plane_helper_prepare_fb, 5094 .cleanup_fb = dm_plane_helper_cleanup_fb, 5095 .atomic_check = dm_plane_atomic_check, 5096 .atomic_async_check = dm_plane_atomic_async_check, 5097 .atomic_async_update = dm_plane_atomic_async_update 5098 }; 5099 5100 /* 5101 * TODO: these are currently initialized to rgb formats only. 5102 * For future use cases we should either initialize them dynamically based on 5103 * plane capabilities, or initialize this array to all formats, so internal drm 5104 * check will succeed, and let DC implement proper check 5105 */ 5106 static const uint32_t rgb_formats[] = { 5107 DRM_FORMAT_XRGB8888, 5108 DRM_FORMAT_ARGB8888, 5109 DRM_FORMAT_RGBA8888, 5110 DRM_FORMAT_XRGB2101010, 5111 DRM_FORMAT_XBGR2101010, 5112 DRM_FORMAT_ARGB2101010, 5113 DRM_FORMAT_ABGR2101010, 5114 DRM_FORMAT_XBGR8888, 5115 DRM_FORMAT_ABGR8888, 5116 DRM_FORMAT_RGB565, 5117 }; 5118 5119 static const uint32_t overlay_formats[] = { 5120 DRM_FORMAT_XRGB8888, 5121 DRM_FORMAT_ARGB8888, 5122 DRM_FORMAT_RGBA8888, 5123 DRM_FORMAT_XBGR8888, 5124 DRM_FORMAT_ABGR8888, 5125 DRM_FORMAT_RGB565 5126 }; 5127 5128 static const u32 cursor_formats[] = { 5129 DRM_FORMAT_ARGB8888 5130 }; 5131 5132 static int get_plane_formats(const struct drm_plane *plane, 5133 const struct dc_plane_cap *plane_cap, 5134 uint32_t *formats, int max_formats) 5135 { 5136 int i, num_formats = 0; 5137 5138 /* 5139 * TODO: Query support for each group of formats directly from 5140 * DC plane caps. This will require adding more formats to the 5141 * caps list. 5142 */ 5143 5144 switch (plane->type) { 5145 case DRM_PLANE_TYPE_PRIMARY: 5146 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { 5147 if (num_formats >= max_formats) 5148 break; 5149 5150 formats[num_formats++] = rgb_formats[i]; 5151 } 5152 5153 if (plane_cap && plane_cap->pixel_format_support.nv12) 5154 formats[num_formats++] = DRM_FORMAT_NV12; 5155 break; 5156 5157 case DRM_PLANE_TYPE_OVERLAY: 5158 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { 5159 if (num_formats >= max_formats) 5160 break; 5161 5162 formats[num_formats++] = overlay_formats[i]; 5163 } 5164 break; 5165 5166 case DRM_PLANE_TYPE_CURSOR: 5167 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { 5168 if (num_formats >= max_formats) 5169 break; 5170 5171 formats[num_formats++] = cursor_formats[i]; 5172 } 5173 break; 5174 } 5175 5176 return num_formats; 5177 } 5178 5179 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 5180 struct drm_plane *plane, 5181 unsigned long possible_crtcs, 5182 const struct dc_plane_cap *plane_cap) 5183 { 5184 uint32_t formats[32]; 5185 int num_formats; 5186 int res = -EPERM; 5187 5188 num_formats = get_plane_formats(plane, plane_cap, formats, 5189 ARRAY_SIZE(formats)); 5190 5191 res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs, 5192 &dm_plane_funcs, formats, num_formats, 5193 NULL, plane->type, NULL); 5194 if (res) 5195 return res; 5196 5197 if (plane->type == DRM_PLANE_TYPE_OVERLAY && 5198 plane_cap && plane_cap->per_pixel_alpha) { 5199 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | 5200 BIT(DRM_MODE_BLEND_PREMULTI); 5201 5202 drm_plane_create_alpha_property(plane); 5203 drm_plane_create_blend_mode_property(plane, blend_caps); 5204 } 5205 5206 if (plane->type == DRM_PLANE_TYPE_PRIMARY && 5207 plane_cap && plane_cap->pixel_format_support.nv12) { 5208 /* This only affects YUV formats. */ 5209 drm_plane_create_color_properties( 5210 plane, 5211 BIT(DRM_COLOR_YCBCR_BT601) | 5212 BIT(DRM_COLOR_YCBCR_BT709), 5213 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | 5214 BIT(DRM_COLOR_YCBCR_FULL_RANGE), 5215 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); 5216 } 5217 5218 drm_plane_helper_add(plane, &dm_plane_helper_funcs); 5219 5220 /* Create (reset) the plane state */ 5221 if (plane->funcs->reset) 5222 plane->funcs->reset(plane); 5223 5224 return 0; 5225 } 5226 5227 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 5228 struct drm_plane *plane, 5229 uint32_t crtc_index) 5230 { 5231 struct amdgpu_crtc *acrtc = NULL; 5232 struct drm_plane *cursor_plane; 5233 5234 int res = -ENOMEM; 5235 5236 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); 5237 if (!cursor_plane) 5238 goto fail; 5239 5240 cursor_plane->type = DRM_PLANE_TYPE_CURSOR; 5241 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL); 5242 5243 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); 5244 if (!acrtc) 5245 goto fail; 5246 5247 res = drm_crtc_init_with_planes( 5248 dm->ddev, 5249 &acrtc->base, 5250 plane, 5251 cursor_plane, 5252 &amdgpu_dm_crtc_funcs, NULL); 5253 5254 if (res) 5255 goto fail; 5256 5257 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); 5258 5259 /* Create (reset) the plane state */ 5260 if (acrtc->base.funcs->reset) 5261 acrtc->base.funcs->reset(&acrtc->base); 5262 5263 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size; 5264 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size; 5265 5266 acrtc->crtc_id = crtc_index; 5267 acrtc->base.enabled = false; 5268 acrtc->otg_inst = -1; 5269 5270 dm->adev->mode_info.crtcs[crtc_index] = acrtc; 5271 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES, 5272 true, MAX_COLOR_LUT_ENTRIES); 5273 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); 5274 5275 return 0; 5276 5277 fail: 5278 kfree(acrtc); 5279 kfree(cursor_plane); 5280 return res; 5281 } 5282 5283 5284 static int to_drm_connector_type(enum signal_type st) 5285 { 5286 switch (st) { 5287 case SIGNAL_TYPE_HDMI_TYPE_A: 5288 return DRM_MODE_CONNECTOR_HDMIA; 5289 case SIGNAL_TYPE_EDP: 5290 return DRM_MODE_CONNECTOR_eDP; 5291 case SIGNAL_TYPE_LVDS: 5292 return DRM_MODE_CONNECTOR_LVDS; 5293 case SIGNAL_TYPE_RGB: 5294 return DRM_MODE_CONNECTOR_VGA; 5295 case SIGNAL_TYPE_DISPLAY_PORT: 5296 case SIGNAL_TYPE_DISPLAY_PORT_MST: 5297 return DRM_MODE_CONNECTOR_DisplayPort; 5298 case SIGNAL_TYPE_DVI_DUAL_LINK: 5299 case SIGNAL_TYPE_DVI_SINGLE_LINK: 5300 return DRM_MODE_CONNECTOR_DVID; 5301 case SIGNAL_TYPE_VIRTUAL: 5302 return DRM_MODE_CONNECTOR_VIRTUAL; 5303 5304 default: 5305 return DRM_MODE_CONNECTOR_Unknown; 5306 } 5307 } 5308 5309 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) 5310 { 5311 struct drm_encoder *encoder; 5312 5313 /* There is only one encoder per connector */ 5314 drm_connector_for_each_possible_encoder(connector, encoder) 5315 return encoder; 5316 5317 return NULL; 5318 } 5319 5320 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 5321 { 5322 struct drm_encoder *encoder; 5323 struct amdgpu_encoder *amdgpu_encoder; 5324 5325 encoder = amdgpu_dm_connector_to_encoder(connector); 5326 5327 if (encoder == NULL) 5328 return; 5329 5330 amdgpu_encoder = to_amdgpu_encoder(encoder); 5331 5332 amdgpu_encoder->native_mode.clock = 0; 5333 5334 if (!list_empty(&connector->probed_modes)) { 5335 struct drm_display_mode *preferred_mode = NULL; 5336 5337 list_for_each_entry(preferred_mode, 5338 &connector->probed_modes, 5339 head) { 5340 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 5341 amdgpu_encoder->native_mode = *preferred_mode; 5342 5343 break; 5344 } 5345 5346 } 5347 } 5348 5349 static struct drm_display_mode * 5350 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 5351 char *name, 5352 int hdisplay, int vdisplay) 5353 { 5354 struct drm_device *dev = encoder->dev; 5355 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 5356 struct drm_display_mode *mode = NULL; 5357 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 5358 5359 mode = drm_mode_duplicate(dev, native_mode); 5360 5361 if (mode == NULL) 5362 return NULL; 5363 5364 mode->hdisplay = hdisplay; 5365 mode->vdisplay = vdisplay; 5366 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 5367 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 5368 5369 return mode; 5370 5371 } 5372 5373 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 5374 struct drm_connector *connector) 5375 { 5376 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 5377 struct drm_display_mode *mode = NULL; 5378 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 5379 struct amdgpu_dm_connector *amdgpu_dm_connector = 5380 to_amdgpu_dm_connector(connector); 5381 int i; 5382 int n; 5383 struct mode_size { 5384 char name[DRM_DISPLAY_MODE_LEN]; 5385 int w; 5386 int h; 5387 } common_modes[] = { 5388 { "640x480", 640, 480}, 5389 { "800x600", 800, 600}, 5390 { "1024x768", 1024, 768}, 5391 { "1280x720", 1280, 720}, 5392 { "1280x800", 1280, 800}, 5393 {"1280x1024", 1280, 1024}, 5394 { "1440x900", 1440, 900}, 5395 {"1680x1050", 1680, 1050}, 5396 {"1600x1200", 1600, 1200}, 5397 {"1920x1080", 1920, 1080}, 5398 {"1920x1200", 1920, 1200} 5399 }; 5400 5401 n = ARRAY_SIZE(common_modes); 5402 5403 for (i = 0; i < n; i++) { 5404 struct drm_display_mode *curmode = NULL; 5405 bool mode_existed = false; 5406 5407 if (common_modes[i].w > native_mode->hdisplay || 5408 common_modes[i].h > native_mode->vdisplay || 5409 (common_modes[i].w == native_mode->hdisplay && 5410 common_modes[i].h == native_mode->vdisplay)) 5411 continue; 5412 5413 list_for_each_entry(curmode, &connector->probed_modes, head) { 5414 if (common_modes[i].w == curmode->hdisplay && 5415 common_modes[i].h == curmode->vdisplay) { 5416 mode_existed = true; 5417 break; 5418 } 5419 } 5420 5421 if (mode_existed) 5422 continue; 5423 5424 mode = amdgpu_dm_create_common_mode(encoder, 5425 common_modes[i].name, common_modes[i].w, 5426 common_modes[i].h); 5427 drm_mode_probed_add(connector, mode); 5428 amdgpu_dm_connector->num_modes++; 5429 } 5430 } 5431 5432 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 5433 struct edid *edid) 5434 { 5435 struct amdgpu_dm_connector *amdgpu_dm_connector = 5436 to_amdgpu_dm_connector(connector); 5437 5438 if (edid) { 5439 /* empty probed_modes */ 5440 INIT_LIST_HEAD(&connector->probed_modes); 5441 amdgpu_dm_connector->num_modes = 5442 drm_add_edid_modes(connector, edid); 5443 5444 /* sorting the probed modes before calling function 5445 * amdgpu_dm_get_native_mode() since EDID can have 5446 * more than one preferred mode. The modes that are 5447 * later in the probed mode list could be of higher 5448 * and preferred resolution. For example, 3840x2160 5449 * resolution in base EDID preferred timing and 4096x2160 5450 * preferred resolution in DID extension block later. 5451 */ 5452 drm_mode_sort(&connector->probed_modes); 5453 amdgpu_dm_get_native_mode(connector); 5454 } else { 5455 amdgpu_dm_connector->num_modes = 0; 5456 } 5457 } 5458 5459 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 5460 { 5461 struct amdgpu_dm_connector *amdgpu_dm_connector = 5462 to_amdgpu_dm_connector(connector); 5463 struct drm_encoder *encoder; 5464 struct edid *edid = amdgpu_dm_connector->edid; 5465 5466 encoder = amdgpu_dm_connector_to_encoder(connector); 5467 5468 if (!edid || !drm_edid_is_valid(edid)) { 5469 amdgpu_dm_connector->num_modes = 5470 drm_add_modes_noedid(connector, 640, 480); 5471 } else { 5472 amdgpu_dm_connector_ddc_get_modes(connector, edid); 5473 amdgpu_dm_connector_add_common_modes(encoder, connector); 5474 } 5475 amdgpu_dm_fbc_init(connector); 5476 5477 return amdgpu_dm_connector->num_modes; 5478 } 5479 5480 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 5481 struct amdgpu_dm_connector *aconnector, 5482 int connector_type, 5483 struct dc_link *link, 5484 int link_index) 5485 { 5486 struct amdgpu_device *adev = dm->ddev->dev_private; 5487 5488 /* 5489 * Some of the properties below require access to state, like bpc. 5490 * Allocate some default initial connector state with our reset helper. 5491 */ 5492 if (aconnector->base.funcs->reset) 5493 aconnector->base.funcs->reset(&aconnector->base); 5494 5495 aconnector->connector_id = link_index; 5496 aconnector->dc_link = link; 5497 aconnector->base.interlace_allowed = false; 5498 aconnector->base.doublescan_allowed = false; 5499 aconnector->base.stereo_allowed = false; 5500 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 5501 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 5502 aconnector->audio_inst = -1; 5503 mutex_init(&aconnector->hpd_lock); 5504 5505 /* 5506 * configure support HPD hot plug connector_>polled default value is 0 5507 * which means HPD hot plug not supported 5508 */ 5509 switch (connector_type) { 5510 case DRM_MODE_CONNECTOR_HDMIA: 5511 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 5512 aconnector->base.ycbcr_420_allowed = 5513 link->link_enc->features.hdmi_ycbcr420_supported ? true : false; 5514 break; 5515 case DRM_MODE_CONNECTOR_DisplayPort: 5516 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 5517 aconnector->base.ycbcr_420_allowed = 5518 link->link_enc->features.dp_ycbcr420_supported ? true : false; 5519 break; 5520 case DRM_MODE_CONNECTOR_DVID: 5521 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 5522 break; 5523 default: 5524 break; 5525 } 5526 5527 drm_object_attach_property(&aconnector->base.base, 5528 dm->ddev->mode_config.scaling_mode_property, 5529 DRM_MODE_SCALE_NONE); 5530 5531 drm_object_attach_property(&aconnector->base.base, 5532 adev->mode_info.underscan_property, 5533 UNDERSCAN_OFF); 5534 drm_object_attach_property(&aconnector->base.base, 5535 adev->mode_info.underscan_hborder_property, 5536 0); 5537 drm_object_attach_property(&aconnector->base.base, 5538 adev->mode_info.underscan_vborder_property, 5539 0); 5540 5541 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 5542 5543 /* This defaults to the max in the range, but we want 8bpc. */ 5544 aconnector->base.state->max_bpc = 8; 5545 aconnector->base.state->max_requested_bpc = 8; 5546 5547 if (connector_type == DRM_MODE_CONNECTOR_eDP && 5548 dc_is_dmcu_initialized(adev->dm.dc)) { 5549 drm_object_attach_property(&aconnector->base.base, 5550 adev->mode_info.abm_level_property, 0); 5551 } 5552 5553 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 5554 connector_type == DRM_MODE_CONNECTOR_DisplayPort || 5555 connector_type == DRM_MODE_CONNECTOR_eDP) { 5556 drm_object_attach_property( 5557 &aconnector->base.base, 5558 dm->ddev->mode_config.hdr_output_metadata_property, 0); 5559 5560 drm_connector_attach_vrr_capable_property( 5561 &aconnector->base); 5562 #ifdef CONFIG_DRM_AMD_DC_HDCP 5563 if (adev->asic_type >= CHIP_RAVEN) 5564 drm_connector_attach_content_protection_property(&aconnector->base, false); 5565 #endif 5566 } 5567 } 5568 5569 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 5570 struct i2c_msg *msgs, int num) 5571 { 5572 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 5573 struct ddc_service *ddc_service = i2c->ddc_service; 5574 struct i2c_command cmd; 5575 int i; 5576 int result = -EIO; 5577 5578 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); 5579 5580 if (!cmd.payloads) 5581 return result; 5582 5583 cmd.number_of_payloads = num; 5584 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 5585 cmd.speed = 100; 5586 5587 for (i = 0; i < num; i++) { 5588 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 5589 cmd.payloads[i].address = msgs[i].addr; 5590 cmd.payloads[i].length = msgs[i].len; 5591 cmd.payloads[i].data = msgs[i].buf; 5592 } 5593 5594 if (dc_submit_i2c( 5595 ddc_service->ctx->dc, 5596 ddc_service->ddc_pin->hw_info.ddc_channel, 5597 &cmd)) 5598 result = num; 5599 5600 kfree(cmd.payloads); 5601 return result; 5602 } 5603 5604 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 5605 { 5606 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 5607 } 5608 5609 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 5610 .master_xfer = amdgpu_dm_i2c_xfer, 5611 .functionality = amdgpu_dm_i2c_func, 5612 }; 5613 5614 static struct amdgpu_i2c_adapter * 5615 create_i2c(struct ddc_service *ddc_service, 5616 int link_index, 5617 int *res) 5618 { 5619 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 5620 struct amdgpu_i2c_adapter *i2c; 5621 5622 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); 5623 if (!i2c) 5624 return NULL; 5625 i2c->base.owner = THIS_MODULE; 5626 i2c->base.class = I2C_CLASS_DDC; 5627 i2c->base.dev.parent = &adev->pdev->dev; 5628 i2c->base.algo = &amdgpu_dm_i2c_algo; 5629 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); 5630 i2c_set_adapdata(&i2c->base, i2c); 5631 i2c->ddc_service = ddc_service; 5632 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index; 5633 5634 return i2c; 5635 } 5636 5637 5638 /* 5639 * Note: this function assumes that dc_link_detect() was called for the 5640 * dc_link which will be represented by this aconnector. 5641 */ 5642 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 5643 struct amdgpu_dm_connector *aconnector, 5644 uint32_t link_index, 5645 struct amdgpu_encoder *aencoder) 5646 { 5647 int res = 0; 5648 int connector_type; 5649 struct dc *dc = dm->dc; 5650 struct dc_link *link = dc_get_link_at_index(dc, link_index); 5651 struct amdgpu_i2c_adapter *i2c; 5652 5653 link->priv = aconnector; 5654 5655 DRM_DEBUG_DRIVER("%s()\n", __func__); 5656 5657 i2c = create_i2c(link->ddc, link->link_index, &res); 5658 if (!i2c) { 5659 DRM_ERROR("Failed to create i2c adapter data\n"); 5660 return -ENOMEM; 5661 } 5662 5663 aconnector->i2c = i2c; 5664 res = i2c_add_adapter(&i2c->base); 5665 5666 if (res) { 5667 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); 5668 goto out_free; 5669 } 5670 5671 connector_type = to_drm_connector_type(link->connector_signal); 5672 5673 res = drm_connector_init( 5674 dm->ddev, 5675 &aconnector->base, 5676 &amdgpu_dm_connector_funcs, 5677 connector_type); 5678 5679 if (res) { 5680 DRM_ERROR("connector_init failed\n"); 5681 aconnector->connector_id = -1; 5682 goto out_free; 5683 } 5684 5685 drm_connector_helper_add( 5686 &aconnector->base, 5687 &amdgpu_dm_connector_helper_funcs); 5688 5689 amdgpu_dm_connector_init_helper( 5690 dm, 5691 aconnector, 5692 connector_type, 5693 link, 5694 link_index); 5695 5696 drm_connector_attach_encoder( 5697 &aconnector->base, &aencoder->base); 5698 5699 drm_connector_register(&aconnector->base); 5700 #if defined(CONFIG_DEBUG_FS) 5701 connector_debugfs_init(aconnector); 5702 aconnector->debugfs_dpcd_address = 0; 5703 aconnector->debugfs_dpcd_size = 0; 5704 #endif 5705 5706 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 5707 || connector_type == DRM_MODE_CONNECTOR_eDP) 5708 amdgpu_dm_initialize_dp_connector(dm, aconnector); 5709 5710 out_free: 5711 if (res) { 5712 kfree(i2c); 5713 aconnector->i2c = NULL; 5714 } 5715 return res; 5716 } 5717 5718 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 5719 { 5720 switch (adev->mode_info.num_crtc) { 5721 case 1: 5722 return 0x1; 5723 case 2: 5724 return 0x3; 5725 case 3: 5726 return 0x7; 5727 case 4: 5728 return 0xf; 5729 case 5: 5730 return 0x1f; 5731 case 6: 5732 default: 5733 return 0x3f; 5734 } 5735 } 5736 5737 static int amdgpu_dm_encoder_init(struct drm_device *dev, 5738 struct amdgpu_encoder *aencoder, 5739 uint32_t link_index) 5740 { 5741 struct amdgpu_device *adev = dev->dev_private; 5742 5743 int res = drm_encoder_init(dev, 5744 &aencoder->base, 5745 &amdgpu_dm_encoder_funcs, 5746 DRM_MODE_ENCODER_TMDS, 5747 NULL); 5748 5749 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 5750 5751 if (!res) 5752 aencoder->encoder_id = link_index; 5753 else 5754 aencoder->encoder_id = -1; 5755 5756 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 5757 5758 return res; 5759 } 5760 5761 static void manage_dm_interrupts(struct amdgpu_device *adev, 5762 struct amdgpu_crtc *acrtc, 5763 bool enable) 5764 { 5765 /* 5766 * this is not correct translation but will work as soon as VBLANK 5767 * constant is the same as PFLIP 5768 */ 5769 int irq_type = 5770 amdgpu_display_crtc_idx_to_irq_type( 5771 adev, 5772 acrtc->crtc_id); 5773 5774 if (enable) { 5775 drm_crtc_vblank_on(&acrtc->base); 5776 amdgpu_irq_get( 5777 adev, 5778 &adev->pageflip_irq, 5779 irq_type); 5780 } else { 5781 5782 amdgpu_irq_put( 5783 adev, 5784 &adev->pageflip_irq, 5785 irq_type); 5786 drm_crtc_vblank_off(&acrtc->base); 5787 } 5788 } 5789 5790 static bool 5791 is_scaling_state_different(const struct dm_connector_state *dm_state, 5792 const struct dm_connector_state *old_dm_state) 5793 { 5794 if (dm_state->scaling != old_dm_state->scaling) 5795 return true; 5796 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 5797 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 5798 return true; 5799 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 5800 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 5801 return true; 5802 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 5803 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 5804 return true; 5805 return false; 5806 } 5807 5808 #ifdef CONFIG_DRM_AMD_DC_HDCP 5809 static bool is_content_protection_different(struct drm_connector_state *state, 5810 const struct drm_connector_state *old_state, 5811 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) 5812 { 5813 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 5814 5815 /* CP is being re enabled, ignore this */ 5816 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 5817 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 5818 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 5819 return false; 5820 } 5821 5822 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */ 5823 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 5824 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 5825 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 5826 5827 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled 5828 * hot-plug, headless s3, dpms 5829 */ 5830 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON && 5831 aconnector->dc_sink != NULL) 5832 return true; 5833 5834 if (old_state->content_protection == state->content_protection) 5835 return false; 5836 5837 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 5838 return true; 5839 5840 return false; 5841 } 5842 5843 static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector, 5844 struct hdcp_workqueue *hdcp_w) 5845 { 5846 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 5847 5848 if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) 5849 hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector); 5850 else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 5851 hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index); 5852 5853 } 5854 #endif 5855 static void remove_stream(struct amdgpu_device *adev, 5856 struct amdgpu_crtc *acrtc, 5857 struct dc_stream_state *stream) 5858 { 5859 /* this is the update mode case */ 5860 5861 acrtc->otg_inst = -1; 5862 acrtc->enabled = false; 5863 } 5864 5865 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, 5866 struct dc_cursor_position *position) 5867 { 5868 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 5869 int x, y; 5870 int xorigin = 0, yorigin = 0; 5871 5872 position->enable = false; 5873 position->x = 0; 5874 position->y = 0; 5875 5876 if (!crtc || !plane->state->fb) 5877 return 0; 5878 5879 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || 5880 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { 5881 DRM_ERROR("%s: bad cursor width or height %d x %d\n", 5882 __func__, 5883 plane->state->crtc_w, 5884 plane->state->crtc_h); 5885 return -EINVAL; 5886 } 5887 5888 x = plane->state->crtc_x; 5889 y = plane->state->crtc_y; 5890 5891 if (x <= -amdgpu_crtc->max_cursor_width || 5892 y <= -amdgpu_crtc->max_cursor_height) 5893 return 0; 5894 5895 if (crtc->primary->state) { 5896 /* avivo cursor are offset into the total surface */ 5897 x += crtc->primary->state->src_x >> 16; 5898 y += crtc->primary->state->src_y >> 16; 5899 } 5900 5901 if (x < 0) { 5902 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 5903 x = 0; 5904 } 5905 if (y < 0) { 5906 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 5907 y = 0; 5908 } 5909 position->enable = true; 5910 position->x = x; 5911 position->y = y; 5912 position->x_hotspot = xorigin; 5913 position->y_hotspot = yorigin; 5914 5915 return 0; 5916 } 5917 5918 static void handle_cursor_update(struct drm_plane *plane, 5919 struct drm_plane_state *old_plane_state) 5920 { 5921 struct amdgpu_device *adev = plane->dev->dev_private; 5922 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 5923 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 5924 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 5925 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 5926 uint64_t address = afb ? afb->address : 0; 5927 struct dc_cursor_position position; 5928 struct dc_cursor_attributes attributes; 5929 int ret; 5930 5931 if (!plane->state->fb && !old_plane_state->fb) 5932 return; 5933 5934 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n", 5935 __func__, 5936 amdgpu_crtc->crtc_id, 5937 plane->state->crtc_w, 5938 plane->state->crtc_h); 5939 5940 ret = get_cursor_position(plane, crtc, &position); 5941 if (ret) 5942 return; 5943 5944 if (!position.enable) { 5945 /* turn off cursor */ 5946 if (crtc_state && crtc_state->stream) { 5947 mutex_lock(&adev->dm.dc_lock); 5948 dc_stream_set_cursor_position(crtc_state->stream, 5949 &position); 5950 mutex_unlock(&adev->dm.dc_lock); 5951 } 5952 return; 5953 } 5954 5955 amdgpu_crtc->cursor_width = plane->state->crtc_w; 5956 amdgpu_crtc->cursor_height = plane->state->crtc_h; 5957 5958 memset(&attributes, 0, sizeof(attributes)); 5959 attributes.address.high_part = upper_32_bits(address); 5960 attributes.address.low_part = lower_32_bits(address); 5961 attributes.width = plane->state->crtc_w; 5962 attributes.height = plane->state->crtc_h; 5963 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 5964 attributes.rotation_angle = 0; 5965 attributes.attribute_flags.value = 0; 5966 5967 attributes.pitch = attributes.width; 5968 5969 if (crtc_state->stream) { 5970 mutex_lock(&adev->dm.dc_lock); 5971 if (!dc_stream_set_cursor_attributes(crtc_state->stream, 5972 &attributes)) 5973 DRM_ERROR("DC failed to set cursor attributes\n"); 5974 5975 if (!dc_stream_set_cursor_position(crtc_state->stream, 5976 &position)) 5977 DRM_ERROR("DC failed to set cursor position\n"); 5978 mutex_unlock(&adev->dm.dc_lock); 5979 } 5980 } 5981 5982 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 5983 { 5984 5985 assert_spin_locked(&acrtc->base.dev->event_lock); 5986 WARN_ON(acrtc->event); 5987 5988 acrtc->event = acrtc->base.state->event; 5989 5990 /* Set the flip status */ 5991 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 5992 5993 /* Mark this event as consumed */ 5994 acrtc->base.state->event = NULL; 5995 5996 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 5997 acrtc->crtc_id); 5998 } 5999 6000 static void update_freesync_state_on_stream( 6001 struct amdgpu_display_manager *dm, 6002 struct dm_crtc_state *new_crtc_state, 6003 struct dc_stream_state *new_stream, 6004 struct dc_plane_state *surface, 6005 u32 flip_timestamp_in_us) 6006 { 6007 struct mod_vrr_params vrr_params; 6008 struct dc_info_packet vrr_infopacket = {0}; 6009 struct amdgpu_device *adev = dm->adev; 6010 unsigned long flags; 6011 6012 if (!new_stream) 6013 return; 6014 6015 /* 6016 * TODO: Determine why min/max totals and vrefresh can be 0 here. 6017 * For now it's sufficient to just guard against these conditions. 6018 */ 6019 6020 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 6021 return; 6022 6023 spin_lock_irqsave(&adev->ddev->event_lock, flags); 6024 vrr_params = new_crtc_state->vrr_params; 6025 6026 if (surface) { 6027 mod_freesync_handle_preflip( 6028 dm->freesync_module, 6029 surface, 6030 new_stream, 6031 flip_timestamp_in_us, 6032 &vrr_params); 6033 6034 if (adev->family < AMDGPU_FAMILY_AI && 6035 amdgpu_dm_vrr_active(new_crtc_state)) { 6036 mod_freesync_handle_v_update(dm->freesync_module, 6037 new_stream, &vrr_params); 6038 6039 /* Need to call this before the frame ends. */ 6040 dc_stream_adjust_vmin_vmax(dm->dc, 6041 new_crtc_state->stream, 6042 &vrr_params.adjust); 6043 } 6044 } 6045 6046 mod_freesync_build_vrr_infopacket( 6047 dm->freesync_module, 6048 new_stream, 6049 &vrr_params, 6050 PACKET_TYPE_VRR, 6051 TRANSFER_FUNC_UNKNOWN, 6052 &vrr_infopacket); 6053 6054 new_crtc_state->freesync_timing_changed |= 6055 (memcmp(&new_crtc_state->vrr_params.adjust, 6056 &vrr_params.adjust, 6057 sizeof(vrr_params.adjust)) != 0); 6058 6059 new_crtc_state->freesync_vrr_info_changed |= 6060 (memcmp(&new_crtc_state->vrr_infopacket, 6061 &vrr_infopacket, 6062 sizeof(vrr_infopacket)) != 0); 6063 6064 new_crtc_state->vrr_params = vrr_params; 6065 new_crtc_state->vrr_infopacket = vrr_infopacket; 6066 6067 new_stream->adjust = new_crtc_state->vrr_params.adjust; 6068 new_stream->vrr_infopacket = vrr_infopacket; 6069 6070 if (new_crtc_state->freesync_vrr_info_changed) 6071 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", 6072 new_crtc_state->base.crtc->base.id, 6073 (int)new_crtc_state->base.vrr_enabled, 6074 (int)vrr_params.state); 6075 6076 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 6077 } 6078 6079 static void pre_update_freesync_state_on_stream( 6080 struct amdgpu_display_manager *dm, 6081 struct dm_crtc_state *new_crtc_state) 6082 { 6083 struct dc_stream_state *new_stream = new_crtc_state->stream; 6084 struct mod_vrr_params vrr_params; 6085 struct mod_freesync_config config = new_crtc_state->freesync_config; 6086 struct amdgpu_device *adev = dm->adev; 6087 unsigned long flags; 6088 6089 if (!new_stream) 6090 return; 6091 6092 /* 6093 * TODO: Determine why min/max totals and vrefresh can be 0 here. 6094 * For now it's sufficient to just guard against these conditions. 6095 */ 6096 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 6097 return; 6098 6099 spin_lock_irqsave(&adev->ddev->event_lock, flags); 6100 vrr_params = new_crtc_state->vrr_params; 6101 6102 if (new_crtc_state->vrr_supported && 6103 config.min_refresh_in_uhz && 6104 config.max_refresh_in_uhz) { 6105 config.state = new_crtc_state->base.vrr_enabled ? 6106 VRR_STATE_ACTIVE_VARIABLE : 6107 VRR_STATE_INACTIVE; 6108 } else { 6109 config.state = VRR_STATE_UNSUPPORTED; 6110 } 6111 6112 mod_freesync_build_vrr_params(dm->freesync_module, 6113 new_stream, 6114 &config, &vrr_params); 6115 6116 new_crtc_state->freesync_timing_changed |= 6117 (memcmp(&new_crtc_state->vrr_params.adjust, 6118 &vrr_params.adjust, 6119 sizeof(vrr_params.adjust)) != 0); 6120 6121 new_crtc_state->vrr_params = vrr_params; 6122 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 6123 } 6124 6125 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, 6126 struct dm_crtc_state *new_state) 6127 { 6128 bool old_vrr_active = amdgpu_dm_vrr_active(old_state); 6129 bool new_vrr_active = amdgpu_dm_vrr_active(new_state); 6130 6131 if (!old_vrr_active && new_vrr_active) { 6132 /* Transition VRR inactive -> active: 6133 * While VRR is active, we must not disable vblank irq, as a 6134 * reenable after disable would compute bogus vblank/pflip 6135 * timestamps if it likely happened inside display front-porch. 6136 * 6137 * We also need vupdate irq for the actual core vblank handling 6138 * at end of vblank. 6139 */ 6140 dm_set_vupdate_irq(new_state->base.crtc, true); 6141 drm_crtc_vblank_get(new_state->base.crtc); 6142 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", 6143 __func__, new_state->base.crtc->base.id); 6144 } else if (old_vrr_active && !new_vrr_active) { 6145 /* Transition VRR active -> inactive: 6146 * Allow vblank irq disable again for fixed refresh rate. 6147 */ 6148 dm_set_vupdate_irq(new_state->base.crtc, false); 6149 drm_crtc_vblank_put(new_state->base.crtc); 6150 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", 6151 __func__, new_state->base.crtc->base.id); 6152 } 6153 } 6154 6155 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 6156 { 6157 struct drm_plane *plane; 6158 struct drm_plane_state *old_plane_state, *new_plane_state; 6159 int i; 6160 6161 /* 6162 * TODO: Make this per-stream so we don't issue redundant updates for 6163 * commits with multiple streams. 6164 */ 6165 for_each_oldnew_plane_in_state(state, plane, old_plane_state, 6166 new_plane_state, i) 6167 if (plane->type == DRM_PLANE_TYPE_CURSOR) 6168 handle_cursor_update(plane, old_plane_state); 6169 } 6170 6171 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 6172 struct dc_state *dc_state, 6173 struct drm_device *dev, 6174 struct amdgpu_display_manager *dm, 6175 struct drm_crtc *pcrtc, 6176 bool wait_for_vblank) 6177 { 6178 uint32_t i; 6179 uint64_t timestamp_ns; 6180 struct drm_plane *plane; 6181 struct drm_plane_state *old_plane_state, *new_plane_state; 6182 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 6183 struct drm_crtc_state *new_pcrtc_state = 6184 drm_atomic_get_new_crtc_state(state, pcrtc); 6185 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 6186 struct dm_crtc_state *dm_old_crtc_state = 6187 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 6188 int planes_count = 0, vpos, hpos; 6189 long r; 6190 unsigned long flags; 6191 struct amdgpu_bo *abo; 6192 uint64_t tiling_flags; 6193 uint32_t target_vblank, last_flip_vblank; 6194 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); 6195 bool pflip_present = false; 6196 bool swizzle = true; 6197 struct { 6198 struct dc_surface_update surface_updates[MAX_SURFACES]; 6199 struct dc_plane_info plane_infos[MAX_SURFACES]; 6200 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 6201 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 6202 struct dc_stream_update stream_update; 6203 } *bundle; 6204 6205 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 6206 6207 if (!bundle) { 6208 dm_error("Failed to allocate update bundle\n"); 6209 goto cleanup; 6210 } 6211 6212 /* 6213 * Disable the cursor first if we're disabling all the planes. 6214 * It'll remain on the screen after the planes are re-enabled 6215 * if we don't. 6216 */ 6217 if (acrtc_state->active_planes == 0) 6218 amdgpu_dm_commit_cursors(state); 6219 6220 /* update planes when needed */ 6221 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 6222 struct drm_crtc *crtc = new_plane_state->crtc; 6223 struct drm_crtc_state *new_crtc_state; 6224 struct drm_framebuffer *fb = new_plane_state->fb; 6225 bool plane_needs_flip; 6226 struct dc_plane_state *dc_plane; 6227 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 6228 6229 /* Cursor plane is handled after stream updates */ 6230 if (plane->type == DRM_PLANE_TYPE_CURSOR) 6231 continue; 6232 6233 if (!fb || !crtc || pcrtc != crtc) 6234 continue; 6235 6236 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 6237 if (!new_crtc_state->active) 6238 continue; 6239 6240 dc_plane = dm_new_plane_state->dc_state; 6241 6242 if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle) 6243 swizzle = false; 6244 6245 bundle->surface_updates[planes_count].surface = dc_plane; 6246 if (new_pcrtc_state->color_mgmt_changed) { 6247 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; 6248 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; 6249 } 6250 6251 fill_dc_scaling_info(new_plane_state, 6252 &bundle->scaling_infos[planes_count]); 6253 6254 bundle->surface_updates[planes_count].scaling_info = 6255 &bundle->scaling_infos[planes_count]; 6256 6257 plane_needs_flip = old_plane_state->fb && new_plane_state->fb; 6258 6259 pflip_present = pflip_present || plane_needs_flip; 6260 6261 if (!plane_needs_flip) { 6262 planes_count += 1; 6263 continue; 6264 } 6265 6266 abo = gem_to_amdgpu_bo(fb->obj[0]); 6267 6268 /* 6269 * Wait for all fences on this FB. Do limited wait to avoid 6270 * deadlock during GPU reset when this fence will not signal 6271 * but we hold reservation lock for the BO. 6272 */ 6273 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true, 6274 false, 6275 msecs_to_jiffies(5000)); 6276 if (unlikely(r <= 0)) 6277 DRM_ERROR("Waiting for fences timed out!"); 6278 6279 /* 6280 * TODO This might fail and hence better not used, wait 6281 * explicitly on fences instead 6282 * and in general should be called for 6283 * blocking commit to as per framework helpers 6284 */ 6285 r = amdgpu_bo_reserve(abo, true); 6286 if (unlikely(r != 0)) 6287 DRM_ERROR("failed to reserve buffer before flip\n"); 6288 6289 amdgpu_bo_get_tiling_flags(abo, &tiling_flags); 6290 6291 amdgpu_bo_unreserve(abo); 6292 6293 fill_dc_plane_info_and_addr( 6294 dm->adev, new_plane_state, tiling_flags, 6295 &bundle->plane_infos[planes_count], 6296 &bundle->flip_addrs[planes_count].address); 6297 6298 bundle->surface_updates[planes_count].plane_info = 6299 &bundle->plane_infos[planes_count]; 6300 6301 /* 6302 * Only allow immediate flips for fast updates that don't 6303 * change FB pitch, DCC state, rotation or mirroing. 6304 */ 6305 bundle->flip_addrs[planes_count].flip_immediate = 6306 crtc->state->async_flip && 6307 acrtc_state->update_type == UPDATE_TYPE_FAST; 6308 6309 timestamp_ns = ktime_get_ns(); 6310 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 6311 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; 6312 bundle->surface_updates[planes_count].surface = dc_plane; 6313 6314 if (!bundle->surface_updates[planes_count].surface) { 6315 DRM_ERROR("No surface for CRTC: id=%d\n", 6316 acrtc_attach->crtc_id); 6317 continue; 6318 } 6319 6320 if (plane == pcrtc->primary) 6321 update_freesync_state_on_stream( 6322 dm, 6323 acrtc_state, 6324 acrtc_state->stream, 6325 dc_plane, 6326 bundle->flip_addrs[planes_count].flip_timestamp_in_us); 6327 6328 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n", 6329 __func__, 6330 bundle->flip_addrs[planes_count].address.grph.addr.high_part, 6331 bundle->flip_addrs[planes_count].address.grph.addr.low_part); 6332 6333 planes_count += 1; 6334 6335 } 6336 6337 if (pflip_present) { 6338 if (!vrr_active) { 6339 /* Use old throttling in non-vrr fixed refresh rate mode 6340 * to keep flip scheduling based on target vblank counts 6341 * working in a backwards compatible way, e.g., for 6342 * clients using the GLX_OML_sync_control extension or 6343 * DRI3/Present extension with defined target_msc. 6344 */ 6345 last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id); 6346 } 6347 else { 6348 /* For variable refresh rate mode only: 6349 * Get vblank of last completed flip to avoid > 1 vrr 6350 * flips per video frame by use of throttling, but allow 6351 * flip programming anywhere in the possibly large 6352 * variable vrr vblank interval for fine-grained flip 6353 * timing control and more opportunity to avoid stutter 6354 * on late submission of flips. 6355 */ 6356 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 6357 last_flip_vblank = acrtc_attach->last_flip_vblank; 6358 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 6359 } 6360 6361 target_vblank = last_flip_vblank + wait_for_vblank; 6362 6363 /* 6364 * Wait until we're out of the vertical blank period before the one 6365 * targeted by the flip 6366 */ 6367 while ((acrtc_attach->enabled && 6368 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 6369 0, &vpos, &hpos, NULL, 6370 NULL, &pcrtc->hwmode) 6371 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 6372 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 6373 (int)(target_vblank - 6374 amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) { 6375 usleep_range(1000, 1100); 6376 } 6377 6378 if (acrtc_attach->base.state->event) { 6379 drm_crtc_vblank_get(pcrtc); 6380 6381 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 6382 6383 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); 6384 prepare_flip_isr(acrtc_attach); 6385 6386 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 6387 } 6388 6389 if (acrtc_state->stream) { 6390 if (acrtc_state->freesync_vrr_info_changed) 6391 bundle->stream_update.vrr_infopacket = 6392 &acrtc_state->stream->vrr_infopacket; 6393 } 6394 } 6395 6396 /* Update the planes if changed or disable if we don't have any. */ 6397 if ((planes_count || acrtc_state->active_planes == 0) && 6398 acrtc_state->stream) { 6399 bundle->stream_update.stream = acrtc_state->stream; 6400 if (new_pcrtc_state->mode_changed) { 6401 bundle->stream_update.src = acrtc_state->stream->src; 6402 bundle->stream_update.dst = acrtc_state->stream->dst; 6403 } 6404 6405 if (new_pcrtc_state->color_mgmt_changed) { 6406 /* 6407 * TODO: This isn't fully correct since we've actually 6408 * already modified the stream in place. 6409 */ 6410 bundle->stream_update.gamut_remap = 6411 &acrtc_state->stream->gamut_remap_matrix; 6412 bundle->stream_update.output_csc_transform = 6413 &acrtc_state->stream->csc_color_matrix; 6414 bundle->stream_update.out_transfer_func = 6415 acrtc_state->stream->out_transfer_func; 6416 } 6417 6418 acrtc_state->stream->abm_level = acrtc_state->abm_level; 6419 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) 6420 bundle->stream_update.abm_level = &acrtc_state->abm_level; 6421 6422 /* 6423 * If FreeSync state on the stream has changed then we need to 6424 * re-adjust the min/max bounds now that DC doesn't handle this 6425 * as part of commit. 6426 */ 6427 if (amdgpu_dm_vrr_active(dm_old_crtc_state) != 6428 amdgpu_dm_vrr_active(acrtc_state)) { 6429 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 6430 dc_stream_adjust_vmin_vmax( 6431 dm->dc, acrtc_state->stream, 6432 &acrtc_state->vrr_params.adjust); 6433 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 6434 } 6435 mutex_lock(&dm->dc_lock); 6436 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 6437 acrtc_state->stream->link->psr_allow_active) 6438 amdgpu_dm_psr_disable(acrtc_state->stream); 6439 6440 dc_commit_updates_for_stream(dm->dc, 6441 bundle->surface_updates, 6442 planes_count, 6443 acrtc_state->stream, 6444 &bundle->stream_update, 6445 dc_state); 6446 6447 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 6448 acrtc_state->stream->psr_version && 6449 !acrtc_state->stream->link->psr_feature_enabled) 6450 amdgpu_dm_link_setup_psr(acrtc_state->stream); 6451 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) && 6452 acrtc_state->stream->link->psr_feature_enabled && 6453 !acrtc_state->stream->link->psr_allow_active && 6454 swizzle) { 6455 amdgpu_dm_psr_enable(acrtc_state->stream); 6456 } 6457 6458 mutex_unlock(&dm->dc_lock); 6459 } 6460 6461 /* 6462 * Update cursor state *after* programming all the planes. 6463 * This avoids redundant programming in the case where we're going 6464 * to be disabling a single plane - those pipes are being disabled. 6465 */ 6466 if (acrtc_state->active_planes) 6467 amdgpu_dm_commit_cursors(state); 6468 6469 cleanup: 6470 kfree(bundle); 6471 } 6472 6473 static void amdgpu_dm_commit_audio(struct drm_device *dev, 6474 struct drm_atomic_state *state) 6475 { 6476 struct amdgpu_device *adev = dev->dev_private; 6477 struct amdgpu_dm_connector *aconnector; 6478 struct drm_connector *connector; 6479 struct drm_connector_state *old_con_state, *new_con_state; 6480 struct drm_crtc_state *new_crtc_state; 6481 struct dm_crtc_state *new_dm_crtc_state; 6482 const struct dc_stream_status *status; 6483 int i, inst; 6484 6485 /* Notify device removals. */ 6486 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 6487 if (old_con_state->crtc != new_con_state->crtc) { 6488 /* CRTC changes require notification. */ 6489 goto notify; 6490 } 6491 6492 if (!new_con_state->crtc) 6493 continue; 6494 6495 new_crtc_state = drm_atomic_get_new_crtc_state( 6496 state, new_con_state->crtc); 6497 6498 if (!new_crtc_state) 6499 continue; 6500 6501 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 6502 continue; 6503 6504 notify: 6505 aconnector = to_amdgpu_dm_connector(connector); 6506 6507 mutex_lock(&adev->dm.audio_lock); 6508 inst = aconnector->audio_inst; 6509 aconnector->audio_inst = -1; 6510 mutex_unlock(&adev->dm.audio_lock); 6511 6512 amdgpu_dm_audio_eld_notify(adev, inst); 6513 } 6514 6515 /* Notify audio device additions. */ 6516 for_each_new_connector_in_state(state, connector, new_con_state, i) { 6517 if (!new_con_state->crtc) 6518 continue; 6519 6520 new_crtc_state = drm_atomic_get_new_crtc_state( 6521 state, new_con_state->crtc); 6522 6523 if (!new_crtc_state) 6524 continue; 6525 6526 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 6527 continue; 6528 6529 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 6530 if (!new_dm_crtc_state->stream) 6531 continue; 6532 6533 status = dc_stream_get_status(new_dm_crtc_state->stream); 6534 if (!status) 6535 continue; 6536 6537 aconnector = to_amdgpu_dm_connector(connector); 6538 6539 mutex_lock(&adev->dm.audio_lock); 6540 inst = status->audio_inst; 6541 aconnector->audio_inst = inst; 6542 mutex_unlock(&adev->dm.audio_lock); 6543 6544 amdgpu_dm_audio_eld_notify(adev, inst); 6545 } 6546 } 6547 6548 /* 6549 * Enable interrupts on CRTCs that are newly active, undergone 6550 * a modeset, or have active planes again. 6551 * 6552 * Done in two passes, based on the for_modeset flag: 6553 * Pass 1: For CRTCs going through modeset 6554 * Pass 2: For CRTCs going from 0 to n active planes 6555 * 6556 * Interrupts can only be enabled after the planes are programmed, 6557 * so this requires a two-pass approach since we don't want to 6558 * just defer the interrupts until after commit planes every time. 6559 */ 6560 static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev, 6561 struct drm_atomic_state *state, 6562 bool for_modeset) 6563 { 6564 struct amdgpu_device *adev = dev->dev_private; 6565 struct drm_crtc *crtc; 6566 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 6567 int i; 6568 #ifdef CONFIG_DEBUG_FS 6569 enum amdgpu_dm_pipe_crc_source source; 6570 #endif 6571 6572 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 6573 new_crtc_state, i) { 6574 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 6575 struct dm_crtc_state *dm_new_crtc_state = 6576 to_dm_crtc_state(new_crtc_state); 6577 struct dm_crtc_state *dm_old_crtc_state = 6578 to_dm_crtc_state(old_crtc_state); 6579 bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state); 6580 bool run_pass; 6581 6582 run_pass = (for_modeset && modeset) || 6583 (!for_modeset && !modeset && 6584 !dm_old_crtc_state->interrupts_enabled); 6585 6586 if (!run_pass) 6587 continue; 6588 6589 if (!dm_new_crtc_state->interrupts_enabled) 6590 continue; 6591 6592 manage_dm_interrupts(adev, acrtc, true); 6593 6594 #ifdef CONFIG_DEBUG_FS 6595 /* The stream has changed so CRC capture needs to re-enabled. */ 6596 source = dm_new_crtc_state->crc_src; 6597 if (amdgpu_dm_is_valid_crc_source(source)) { 6598 amdgpu_dm_crtc_configure_crc_source( 6599 crtc, dm_new_crtc_state, 6600 dm_new_crtc_state->crc_src); 6601 } 6602 #endif 6603 } 6604 } 6605 6606 /* 6607 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 6608 * @crtc_state: the DRM CRTC state 6609 * @stream_state: the DC stream state. 6610 * 6611 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 6612 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 6613 */ 6614 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 6615 struct dc_stream_state *stream_state) 6616 { 6617 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 6618 } 6619 6620 static int amdgpu_dm_atomic_commit(struct drm_device *dev, 6621 struct drm_atomic_state *state, 6622 bool nonblock) 6623 { 6624 struct drm_crtc *crtc; 6625 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 6626 struct amdgpu_device *adev = dev->dev_private; 6627 int i; 6628 6629 /* 6630 * We evade vblank and pflip interrupts on CRTCs that are undergoing 6631 * a modeset, being disabled, or have no active planes. 6632 * 6633 * It's done in atomic commit rather than commit tail for now since 6634 * some of these interrupt handlers access the current CRTC state and 6635 * potentially the stream pointer itself. 6636 * 6637 * Since the atomic state is swapped within atomic commit and not within 6638 * commit tail this would leave to new state (that hasn't been committed yet) 6639 * being accesssed from within the handlers. 6640 * 6641 * TODO: Fix this so we can do this in commit tail and not have to block 6642 * in atomic check. 6643 */ 6644 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 6645 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 6646 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 6647 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 6648 6649 if (dm_old_crtc_state->interrupts_enabled && 6650 (!dm_new_crtc_state->interrupts_enabled || 6651 drm_atomic_crtc_needs_modeset(new_crtc_state))) 6652 manage_dm_interrupts(adev, acrtc, false); 6653 } 6654 /* 6655 * Add check here for SoC's that support hardware cursor plane, to 6656 * unset legacy_cursor_update 6657 */ 6658 6659 return drm_atomic_helper_commit(dev, state, nonblock); 6660 6661 /*TODO Handle EINTR, reenable IRQ*/ 6662 } 6663 6664 /** 6665 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 6666 * @state: The atomic state to commit 6667 * 6668 * This will tell DC to commit the constructed DC state from atomic_check, 6669 * programming the hardware. Any failures here implies a hardware failure, since 6670 * atomic check should have filtered anything non-kosher. 6671 */ 6672 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 6673 { 6674 struct drm_device *dev = state->dev; 6675 struct amdgpu_device *adev = dev->dev_private; 6676 struct amdgpu_display_manager *dm = &adev->dm; 6677 struct dm_atomic_state *dm_state; 6678 struct dc_state *dc_state = NULL, *dc_state_temp = NULL; 6679 uint32_t i, j; 6680 struct drm_crtc *crtc; 6681 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 6682 unsigned long flags; 6683 bool wait_for_vblank = true; 6684 struct drm_connector *connector; 6685 struct drm_connector_state *old_con_state, *new_con_state; 6686 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 6687 int crtc_disable_count = 0; 6688 6689 drm_atomic_helper_update_legacy_modeset_state(dev, state); 6690 6691 dm_state = dm_atomic_get_new_state(state); 6692 if (dm_state && dm_state->context) { 6693 dc_state = dm_state->context; 6694 } else { 6695 /* No state changes, retain current state. */ 6696 dc_state_temp = dc_create_state(dm->dc); 6697 ASSERT(dc_state_temp); 6698 dc_state = dc_state_temp; 6699 dc_resource_state_copy_construct_current(dm->dc, dc_state); 6700 } 6701 6702 /* update changed items */ 6703 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 6704 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 6705 6706 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 6707 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 6708 6709 DRM_DEBUG_DRIVER( 6710 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " 6711 "planes_changed:%d, mode_changed:%d,active_changed:%d," 6712 "connectors_changed:%d\n", 6713 acrtc->crtc_id, 6714 new_crtc_state->enable, 6715 new_crtc_state->active, 6716 new_crtc_state->planes_changed, 6717 new_crtc_state->mode_changed, 6718 new_crtc_state->active_changed, 6719 new_crtc_state->connectors_changed); 6720 6721 /* Copy all transient state flags into dc state */ 6722 if (dm_new_crtc_state->stream) { 6723 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 6724 dm_new_crtc_state->stream); 6725 } 6726 6727 /* handles headless hotplug case, updating new_state and 6728 * aconnector as needed 6729 */ 6730 6731 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 6732 6733 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); 6734 6735 if (!dm_new_crtc_state->stream) { 6736 /* 6737 * this could happen because of issues with 6738 * userspace notifications delivery. 6739 * In this case userspace tries to set mode on 6740 * display which is disconnected in fact. 6741 * dc_sink is NULL in this case on aconnector. 6742 * We expect reset mode will come soon. 6743 * 6744 * This can also happen when unplug is done 6745 * during resume sequence ended 6746 * 6747 * In this case, we want to pretend we still 6748 * have a sink to keep the pipe running so that 6749 * hw state is consistent with the sw state 6750 */ 6751 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 6752 __func__, acrtc->base.base.id); 6753 continue; 6754 } 6755 6756 if (dm_old_crtc_state->stream) 6757 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 6758 6759 pm_runtime_get_noresume(dev->dev); 6760 6761 acrtc->enabled = true; 6762 acrtc->hw_mode = new_crtc_state->mode; 6763 crtc->hwmode = new_crtc_state->mode; 6764 } else if (modereset_required(new_crtc_state)) { 6765 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); 6766 /* i.e. reset mode */ 6767 if (dm_old_crtc_state->stream) { 6768 if (dm_old_crtc_state->stream->link->psr_allow_active) 6769 amdgpu_dm_psr_disable(dm_old_crtc_state->stream); 6770 6771 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 6772 } 6773 } 6774 } /* for_each_crtc_in_state() */ 6775 6776 if (dc_state) { 6777 dm_enable_per_frame_crtc_master_sync(dc_state); 6778 mutex_lock(&dm->dc_lock); 6779 WARN_ON(!dc_commit_state(dm->dc, dc_state)); 6780 mutex_unlock(&dm->dc_lock); 6781 } 6782 6783 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 6784 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 6785 6786 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 6787 6788 if (dm_new_crtc_state->stream != NULL) { 6789 const struct dc_stream_status *status = 6790 dc_stream_get_status(dm_new_crtc_state->stream); 6791 6792 if (!status) 6793 status = dc_stream_get_status_from_state(dc_state, 6794 dm_new_crtc_state->stream); 6795 6796 if (!status) 6797 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); 6798 else 6799 acrtc->otg_inst = status->primary_otg_inst; 6800 } 6801 } 6802 #ifdef CONFIG_DRM_AMD_DC_HDCP 6803 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 6804 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 6805 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 6806 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6807 6808 new_crtc_state = NULL; 6809 6810 if (acrtc) 6811 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 6812 6813 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 6814 6815 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && 6816 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 6817 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 6818 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 6819 continue; 6820 } 6821 6822 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) 6823 update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue); 6824 } 6825 #endif 6826 6827 /* Handle connector state changes */ 6828 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 6829 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 6830 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 6831 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 6832 struct dc_surface_update dummy_updates[MAX_SURFACES]; 6833 struct dc_stream_update stream_update; 6834 struct dc_info_packet hdr_packet; 6835 struct dc_stream_status *status = NULL; 6836 bool abm_changed, hdr_changed, scaling_changed; 6837 6838 memset(&dummy_updates, 0, sizeof(dummy_updates)); 6839 memset(&stream_update, 0, sizeof(stream_update)); 6840 6841 if (acrtc) { 6842 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 6843 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 6844 } 6845 6846 /* Skip any modesets/resets */ 6847 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 6848 continue; 6849 6850 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 6851 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 6852 6853 scaling_changed = is_scaling_state_different(dm_new_con_state, 6854 dm_old_con_state); 6855 6856 abm_changed = dm_new_crtc_state->abm_level != 6857 dm_old_crtc_state->abm_level; 6858 6859 hdr_changed = 6860 is_hdr_metadata_different(old_con_state, new_con_state); 6861 6862 if (!scaling_changed && !abm_changed && !hdr_changed) 6863 continue; 6864 6865 stream_update.stream = dm_new_crtc_state->stream; 6866 if (scaling_changed) { 6867 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 6868 dm_new_con_state, dm_new_crtc_state->stream); 6869 6870 stream_update.src = dm_new_crtc_state->stream->src; 6871 stream_update.dst = dm_new_crtc_state->stream->dst; 6872 } 6873 6874 if (abm_changed) { 6875 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 6876 6877 stream_update.abm_level = &dm_new_crtc_state->abm_level; 6878 } 6879 6880 if (hdr_changed) { 6881 fill_hdr_info_packet(new_con_state, &hdr_packet); 6882 stream_update.hdr_static_metadata = &hdr_packet; 6883 } 6884 6885 status = dc_stream_get_status(dm_new_crtc_state->stream); 6886 WARN_ON(!status); 6887 WARN_ON(!status->plane_count); 6888 6889 /* 6890 * TODO: DC refuses to perform stream updates without a dc_surface_update. 6891 * Here we create an empty update on each plane. 6892 * To fix this, DC should permit updating only stream properties. 6893 */ 6894 for (j = 0; j < status->plane_count; j++) 6895 dummy_updates[j].surface = status->plane_states[0]; 6896 6897 6898 mutex_lock(&dm->dc_lock); 6899 dc_commit_updates_for_stream(dm->dc, 6900 dummy_updates, 6901 status->plane_count, 6902 dm_new_crtc_state->stream, 6903 &stream_update, 6904 dc_state); 6905 mutex_unlock(&dm->dc_lock); 6906 } 6907 6908 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 6909 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 6910 new_crtc_state, i) { 6911 if (old_crtc_state->active && !new_crtc_state->active) 6912 crtc_disable_count++; 6913 6914 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 6915 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 6916 6917 /* Update freesync active state. */ 6918 pre_update_freesync_state_on_stream(dm, dm_new_crtc_state); 6919 6920 /* Handle vrr on->off / off->on transitions */ 6921 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, 6922 dm_new_crtc_state); 6923 } 6924 6925 /* Enable interrupts for CRTCs going through a modeset. */ 6926 amdgpu_dm_enable_crtc_interrupts(dev, state, true); 6927 6928 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 6929 if (new_crtc_state->async_flip) 6930 wait_for_vblank = false; 6931 6932 /* update planes when needed per crtc*/ 6933 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 6934 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 6935 6936 if (dm_new_crtc_state->stream) 6937 amdgpu_dm_commit_planes(state, dc_state, dev, 6938 dm, crtc, wait_for_vblank); 6939 } 6940 6941 /* Enable interrupts for CRTCs going from 0 to n active planes. */ 6942 amdgpu_dm_enable_crtc_interrupts(dev, state, false); 6943 6944 /* Update audio instances for each connector. */ 6945 amdgpu_dm_commit_audio(dev, state); 6946 6947 /* 6948 * send vblank event on all events not handled in flip and 6949 * mark consumed event for drm_atomic_helper_commit_hw_done 6950 */ 6951 spin_lock_irqsave(&adev->ddev->event_lock, flags); 6952 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 6953 6954 if (new_crtc_state->event) 6955 drm_send_event_locked(dev, &new_crtc_state->event->base); 6956 6957 new_crtc_state->event = NULL; 6958 } 6959 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 6960 6961 /* Signal HW programming completion */ 6962 drm_atomic_helper_commit_hw_done(state); 6963 6964 if (wait_for_vblank) 6965 drm_atomic_helper_wait_for_flip_done(dev, state); 6966 6967 drm_atomic_helper_cleanup_planes(dev, state); 6968 6969 /* 6970 * Finally, drop a runtime PM reference for each newly disabled CRTC, 6971 * so we can put the GPU into runtime suspend if we're not driving any 6972 * displays anymore 6973 */ 6974 for (i = 0; i < crtc_disable_count; i++) 6975 pm_runtime_put_autosuspend(dev->dev); 6976 pm_runtime_mark_last_busy(dev->dev); 6977 6978 if (dc_state_temp) 6979 dc_release_state(dc_state_temp); 6980 } 6981 6982 6983 static int dm_force_atomic_commit(struct drm_connector *connector) 6984 { 6985 int ret = 0; 6986 struct drm_device *ddev = connector->dev; 6987 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 6988 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 6989 struct drm_plane *plane = disconnected_acrtc->base.primary; 6990 struct drm_connector_state *conn_state; 6991 struct drm_crtc_state *crtc_state; 6992 struct drm_plane_state *plane_state; 6993 6994 if (!state) 6995 return -ENOMEM; 6996 6997 state->acquire_ctx = ddev->mode_config.acquire_ctx; 6998 6999 /* Construct an atomic state to restore previous display setting */ 7000 7001 /* 7002 * Attach connectors to drm_atomic_state 7003 */ 7004 conn_state = drm_atomic_get_connector_state(state, connector); 7005 7006 ret = PTR_ERR_OR_ZERO(conn_state); 7007 if (ret) 7008 goto err; 7009 7010 /* Attach crtc to drm_atomic_state*/ 7011 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 7012 7013 ret = PTR_ERR_OR_ZERO(crtc_state); 7014 if (ret) 7015 goto err; 7016 7017 /* force a restore */ 7018 crtc_state->mode_changed = true; 7019 7020 /* Attach plane to drm_atomic_state */ 7021 plane_state = drm_atomic_get_plane_state(state, plane); 7022 7023 ret = PTR_ERR_OR_ZERO(plane_state); 7024 if (ret) 7025 goto err; 7026 7027 7028 /* Call commit internally with the state we just constructed */ 7029 ret = drm_atomic_commit(state); 7030 if (!ret) 7031 return 0; 7032 7033 err: 7034 DRM_ERROR("Restoring old state failed with %i\n", ret); 7035 drm_atomic_state_put(state); 7036 7037 return ret; 7038 } 7039 7040 /* 7041 * This function handles all cases when set mode does not come upon hotplug. 7042 * This includes when a display is unplugged then plugged back into the 7043 * same port and when running without usermode desktop manager supprot 7044 */ 7045 void dm_restore_drm_connector_state(struct drm_device *dev, 7046 struct drm_connector *connector) 7047 { 7048 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7049 struct amdgpu_crtc *disconnected_acrtc; 7050 struct dm_crtc_state *acrtc_state; 7051 7052 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 7053 return; 7054 7055 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 7056 if (!disconnected_acrtc) 7057 return; 7058 7059 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 7060 if (!acrtc_state->stream) 7061 return; 7062 7063 /* 7064 * If the previous sink is not released and different from the current, 7065 * we deduce we are in a state where we can not rely on usermode call 7066 * to turn on the display, so we do it here 7067 */ 7068 if (acrtc_state->stream->sink != aconnector->dc_sink) 7069 dm_force_atomic_commit(&aconnector->base); 7070 } 7071 7072 /* 7073 * Grabs all modesetting locks to serialize against any blocking commits, 7074 * Waits for completion of all non blocking commits. 7075 */ 7076 static int do_aquire_global_lock(struct drm_device *dev, 7077 struct drm_atomic_state *state) 7078 { 7079 struct drm_crtc *crtc; 7080 struct drm_crtc_commit *commit; 7081 long ret; 7082 7083 /* 7084 * Adding all modeset locks to aquire_ctx will 7085 * ensure that when the framework release it the 7086 * extra locks we are locking here will get released to 7087 */ 7088 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 7089 if (ret) 7090 return ret; 7091 7092 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 7093 spin_lock(&crtc->commit_lock); 7094 commit = list_first_entry_or_null(&crtc->commit_list, 7095 struct drm_crtc_commit, commit_entry); 7096 if (commit) 7097 drm_crtc_commit_get(commit); 7098 spin_unlock(&crtc->commit_lock); 7099 7100 if (!commit) 7101 continue; 7102 7103 /* 7104 * Make sure all pending HW programming completed and 7105 * page flips done 7106 */ 7107 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 7108 7109 if (ret > 0) 7110 ret = wait_for_completion_interruptible_timeout( 7111 &commit->flip_done, 10*HZ); 7112 7113 if (ret == 0) 7114 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " 7115 "timed out\n", crtc->base.id, crtc->name); 7116 7117 drm_crtc_commit_put(commit); 7118 } 7119 7120 return ret < 0 ? ret : 0; 7121 } 7122 7123 static void get_freesync_config_for_crtc( 7124 struct dm_crtc_state *new_crtc_state, 7125 struct dm_connector_state *new_con_state) 7126 { 7127 struct mod_freesync_config config = {0}; 7128 struct amdgpu_dm_connector *aconnector = 7129 to_amdgpu_dm_connector(new_con_state->base.connector); 7130 struct drm_display_mode *mode = &new_crtc_state->base.mode; 7131 int vrefresh = drm_mode_vrefresh(mode); 7132 7133 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 7134 vrefresh >= aconnector->min_vfreq && 7135 vrefresh <= aconnector->max_vfreq; 7136 7137 if (new_crtc_state->vrr_supported) { 7138 new_crtc_state->stream->ignore_msa_timing_param = true; 7139 config.state = new_crtc_state->base.vrr_enabled ? 7140 VRR_STATE_ACTIVE_VARIABLE : 7141 VRR_STATE_INACTIVE; 7142 config.min_refresh_in_uhz = 7143 aconnector->min_vfreq * 1000000; 7144 config.max_refresh_in_uhz = 7145 aconnector->max_vfreq * 1000000; 7146 config.vsif_supported = true; 7147 config.btr = true; 7148 } 7149 7150 new_crtc_state->freesync_config = config; 7151 } 7152 7153 static void reset_freesync_config_for_crtc( 7154 struct dm_crtc_state *new_crtc_state) 7155 { 7156 new_crtc_state->vrr_supported = false; 7157 7158 memset(&new_crtc_state->vrr_params, 0, 7159 sizeof(new_crtc_state->vrr_params)); 7160 memset(&new_crtc_state->vrr_infopacket, 0, 7161 sizeof(new_crtc_state->vrr_infopacket)); 7162 } 7163 7164 static int dm_update_crtc_state(struct amdgpu_display_manager *dm, 7165 struct drm_atomic_state *state, 7166 struct drm_crtc *crtc, 7167 struct drm_crtc_state *old_crtc_state, 7168 struct drm_crtc_state *new_crtc_state, 7169 bool enable, 7170 bool *lock_and_validation_needed) 7171 { 7172 struct dm_atomic_state *dm_state = NULL; 7173 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 7174 struct dc_stream_state *new_stream; 7175 int ret = 0; 7176 7177 /* 7178 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 7179 * update changed items 7180 */ 7181 struct amdgpu_crtc *acrtc = NULL; 7182 struct amdgpu_dm_connector *aconnector = NULL; 7183 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 7184 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 7185 7186 new_stream = NULL; 7187 7188 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 7189 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 7190 acrtc = to_amdgpu_crtc(crtc); 7191 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 7192 7193 /* TODO This hack should go away */ 7194 if (aconnector && enable) { 7195 /* Make sure fake sink is created in plug-in scenario */ 7196 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 7197 &aconnector->base); 7198 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 7199 &aconnector->base); 7200 7201 if (IS_ERR(drm_new_conn_state)) { 7202 ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 7203 goto fail; 7204 } 7205 7206 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 7207 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 7208 7209 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 7210 goto skip_modeset; 7211 7212 new_stream = create_stream_for_sink(aconnector, 7213 &new_crtc_state->mode, 7214 dm_new_conn_state, 7215 dm_old_crtc_state->stream); 7216 7217 /* 7218 * we can have no stream on ACTION_SET if a display 7219 * was disconnected during S3, in this case it is not an 7220 * error, the OS will be updated after detection, and 7221 * will do the right thing on next atomic commit 7222 */ 7223 7224 if (!new_stream) { 7225 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 7226 __func__, acrtc->base.base.id); 7227 ret = -ENOMEM; 7228 goto fail; 7229 } 7230 7231 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 7232 7233 ret = fill_hdr_info_packet(drm_new_conn_state, 7234 &new_stream->hdr_static_metadata); 7235 if (ret) 7236 goto fail; 7237 7238 /* 7239 * If we already removed the old stream from the context 7240 * (and set the new stream to NULL) then we can't reuse 7241 * the old stream even if the stream and scaling are unchanged. 7242 * We'll hit the BUG_ON and black screen. 7243 * 7244 * TODO: Refactor this function to allow this check to work 7245 * in all conditions. 7246 */ 7247 if (dm_new_crtc_state->stream && 7248 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 7249 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 7250 new_crtc_state->mode_changed = false; 7251 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", 7252 new_crtc_state->mode_changed); 7253 } 7254 } 7255 7256 /* mode_changed flag may get updated above, need to check again */ 7257 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 7258 goto skip_modeset; 7259 7260 DRM_DEBUG_DRIVER( 7261 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " 7262 "planes_changed:%d, mode_changed:%d,active_changed:%d," 7263 "connectors_changed:%d\n", 7264 acrtc->crtc_id, 7265 new_crtc_state->enable, 7266 new_crtc_state->active, 7267 new_crtc_state->planes_changed, 7268 new_crtc_state->mode_changed, 7269 new_crtc_state->active_changed, 7270 new_crtc_state->connectors_changed); 7271 7272 /* Remove stream for any changed/disabled CRTC */ 7273 if (!enable) { 7274 7275 if (!dm_old_crtc_state->stream) 7276 goto skip_modeset; 7277 7278 ret = dm_atomic_get_state(state, &dm_state); 7279 if (ret) 7280 goto fail; 7281 7282 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 7283 crtc->base.id); 7284 7285 /* i.e. reset mode */ 7286 if (dc_remove_stream_from_ctx( 7287 dm->dc, 7288 dm_state->context, 7289 dm_old_crtc_state->stream) != DC_OK) { 7290 ret = -EINVAL; 7291 goto fail; 7292 } 7293 7294 dc_stream_release(dm_old_crtc_state->stream); 7295 dm_new_crtc_state->stream = NULL; 7296 7297 reset_freesync_config_for_crtc(dm_new_crtc_state); 7298 7299 *lock_and_validation_needed = true; 7300 7301 } else {/* Add stream for any updated/enabled CRTC */ 7302 /* 7303 * Quick fix to prevent NULL pointer on new_stream when 7304 * added MST connectors not found in existing crtc_state in the chained mode 7305 * TODO: need to dig out the root cause of that 7306 */ 7307 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port)) 7308 goto skip_modeset; 7309 7310 if (modereset_required(new_crtc_state)) 7311 goto skip_modeset; 7312 7313 if (modeset_required(new_crtc_state, new_stream, 7314 dm_old_crtc_state->stream)) { 7315 7316 WARN_ON(dm_new_crtc_state->stream); 7317 7318 ret = dm_atomic_get_state(state, &dm_state); 7319 if (ret) 7320 goto fail; 7321 7322 dm_new_crtc_state->stream = new_stream; 7323 7324 dc_stream_retain(new_stream); 7325 7326 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n", 7327 crtc->base.id); 7328 7329 if (dc_add_stream_to_ctx( 7330 dm->dc, 7331 dm_state->context, 7332 dm_new_crtc_state->stream) != DC_OK) { 7333 ret = -EINVAL; 7334 goto fail; 7335 } 7336 7337 *lock_and_validation_needed = true; 7338 } 7339 } 7340 7341 skip_modeset: 7342 /* Release extra reference */ 7343 if (new_stream) 7344 dc_stream_release(new_stream); 7345 7346 /* 7347 * We want to do dc stream updates that do not require a 7348 * full modeset below. 7349 */ 7350 if (!(enable && aconnector && new_crtc_state->enable && 7351 new_crtc_state->active)) 7352 return 0; 7353 /* 7354 * Given above conditions, the dc state cannot be NULL because: 7355 * 1. We're in the process of enabling CRTCs (just been added 7356 * to the dc context, or already is on the context) 7357 * 2. Has a valid connector attached, and 7358 * 3. Is currently active and enabled. 7359 * => The dc stream state currently exists. 7360 */ 7361 BUG_ON(dm_new_crtc_state->stream == NULL); 7362 7363 /* Scaling or underscan settings */ 7364 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state)) 7365 update_stream_scaling_settings( 7366 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 7367 7368 /* ABM settings */ 7369 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 7370 7371 /* 7372 * Color management settings. We also update color properties 7373 * when a modeset is needed, to ensure it gets reprogrammed. 7374 */ 7375 if (dm_new_crtc_state->base.color_mgmt_changed || 7376 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 7377 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); 7378 if (ret) 7379 goto fail; 7380 } 7381 7382 /* Update Freesync settings. */ 7383 get_freesync_config_for_crtc(dm_new_crtc_state, 7384 dm_new_conn_state); 7385 7386 return ret; 7387 7388 fail: 7389 if (new_stream) 7390 dc_stream_release(new_stream); 7391 return ret; 7392 } 7393 7394 static bool should_reset_plane(struct drm_atomic_state *state, 7395 struct drm_plane *plane, 7396 struct drm_plane_state *old_plane_state, 7397 struct drm_plane_state *new_plane_state) 7398 { 7399 struct drm_plane *other; 7400 struct drm_plane_state *old_other_state, *new_other_state; 7401 struct drm_crtc_state *new_crtc_state; 7402 int i; 7403 7404 /* 7405 * TODO: Remove this hack once the checks below are sufficient 7406 * enough to determine when we need to reset all the planes on 7407 * the stream. 7408 */ 7409 if (state->allow_modeset) 7410 return true; 7411 7412 /* Exit early if we know that we're adding or removing the plane. */ 7413 if (old_plane_state->crtc != new_plane_state->crtc) 7414 return true; 7415 7416 /* old crtc == new_crtc == NULL, plane not in context. */ 7417 if (!new_plane_state->crtc) 7418 return false; 7419 7420 new_crtc_state = 7421 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 7422 7423 if (!new_crtc_state) 7424 return true; 7425 7426 /* CRTC Degamma changes currently require us to recreate planes. */ 7427 if (new_crtc_state->color_mgmt_changed) 7428 return true; 7429 7430 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 7431 return true; 7432 7433 /* 7434 * If there are any new primary or overlay planes being added or 7435 * removed then the z-order can potentially change. To ensure 7436 * correct z-order and pipe acquisition the current DC architecture 7437 * requires us to remove and recreate all existing planes. 7438 * 7439 * TODO: Come up with a more elegant solution for this. 7440 */ 7441 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { 7442 if (other->type == DRM_PLANE_TYPE_CURSOR) 7443 continue; 7444 7445 if (old_other_state->crtc != new_plane_state->crtc && 7446 new_other_state->crtc != new_plane_state->crtc) 7447 continue; 7448 7449 if (old_other_state->crtc != new_other_state->crtc) 7450 return true; 7451 7452 /* TODO: Remove this once we can handle fast format changes. */ 7453 if (old_other_state->fb && new_other_state->fb && 7454 old_other_state->fb->format != new_other_state->fb->format) 7455 return true; 7456 } 7457 7458 return false; 7459 } 7460 7461 static int dm_update_plane_state(struct dc *dc, 7462 struct drm_atomic_state *state, 7463 struct drm_plane *plane, 7464 struct drm_plane_state *old_plane_state, 7465 struct drm_plane_state *new_plane_state, 7466 bool enable, 7467 bool *lock_and_validation_needed) 7468 { 7469 7470 struct dm_atomic_state *dm_state = NULL; 7471 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 7472 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 7473 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 7474 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 7475 bool needs_reset; 7476 int ret = 0; 7477 7478 7479 new_plane_crtc = new_plane_state->crtc; 7480 old_plane_crtc = old_plane_state->crtc; 7481 dm_new_plane_state = to_dm_plane_state(new_plane_state); 7482 dm_old_plane_state = to_dm_plane_state(old_plane_state); 7483 7484 /*TODO Implement atomic check for cursor plane */ 7485 if (plane->type == DRM_PLANE_TYPE_CURSOR) 7486 return 0; 7487 7488 needs_reset = should_reset_plane(state, plane, old_plane_state, 7489 new_plane_state); 7490 7491 /* Remove any changed/removed planes */ 7492 if (!enable) { 7493 if (!needs_reset) 7494 return 0; 7495 7496 if (!old_plane_crtc) 7497 return 0; 7498 7499 old_crtc_state = drm_atomic_get_old_crtc_state( 7500 state, old_plane_crtc); 7501 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 7502 7503 if (!dm_old_crtc_state->stream) 7504 return 0; 7505 7506 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 7507 plane->base.id, old_plane_crtc->base.id); 7508 7509 ret = dm_atomic_get_state(state, &dm_state); 7510 if (ret) 7511 return ret; 7512 7513 if (!dc_remove_plane_from_context( 7514 dc, 7515 dm_old_crtc_state->stream, 7516 dm_old_plane_state->dc_state, 7517 dm_state->context)) { 7518 7519 ret = EINVAL; 7520 return ret; 7521 } 7522 7523 7524 dc_plane_state_release(dm_old_plane_state->dc_state); 7525 dm_new_plane_state->dc_state = NULL; 7526 7527 *lock_and_validation_needed = true; 7528 7529 } else { /* Add new planes */ 7530 struct dc_plane_state *dc_new_plane_state; 7531 7532 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 7533 return 0; 7534 7535 if (!new_plane_crtc) 7536 return 0; 7537 7538 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 7539 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 7540 7541 if (!dm_new_crtc_state->stream) 7542 return 0; 7543 7544 if (!needs_reset) 7545 return 0; 7546 7547 WARN_ON(dm_new_plane_state->dc_state); 7548 7549 dc_new_plane_state = dc_create_plane_state(dc); 7550 if (!dc_new_plane_state) 7551 return -ENOMEM; 7552 7553 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", 7554 plane->base.id, new_plane_crtc->base.id); 7555 7556 ret = fill_dc_plane_attributes( 7557 new_plane_crtc->dev->dev_private, 7558 dc_new_plane_state, 7559 new_plane_state, 7560 new_crtc_state); 7561 if (ret) { 7562 dc_plane_state_release(dc_new_plane_state); 7563 return ret; 7564 } 7565 7566 ret = dm_atomic_get_state(state, &dm_state); 7567 if (ret) { 7568 dc_plane_state_release(dc_new_plane_state); 7569 return ret; 7570 } 7571 7572 /* 7573 * Any atomic check errors that occur after this will 7574 * not need a release. The plane state will be attached 7575 * to the stream, and therefore part of the atomic 7576 * state. It'll be released when the atomic state is 7577 * cleaned. 7578 */ 7579 if (!dc_add_plane_to_context( 7580 dc, 7581 dm_new_crtc_state->stream, 7582 dc_new_plane_state, 7583 dm_state->context)) { 7584 7585 dc_plane_state_release(dc_new_plane_state); 7586 return -EINVAL; 7587 } 7588 7589 dm_new_plane_state->dc_state = dc_new_plane_state; 7590 7591 /* Tell DC to do a full surface update every time there 7592 * is a plane change. Inefficient, but works for now. 7593 */ 7594 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 7595 7596 *lock_and_validation_needed = true; 7597 } 7598 7599 7600 return ret; 7601 } 7602 7603 static int 7604 dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, 7605 struct drm_atomic_state *state, 7606 enum surface_update_type *out_type) 7607 { 7608 struct dc *dc = dm->dc; 7609 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL; 7610 int i, j, num_plane, ret = 0; 7611 struct drm_plane_state *old_plane_state, *new_plane_state; 7612 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state; 7613 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 7614 struct drm_plane *plane; 7615 7616 struct drm_crtc *crtc; 7617 struct drm_crtc_state *new_crtc_state, *old_crtc_state; 7618 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state; 7619 struct dc_stream_status *status = NULL; 7620 7621 struct dc_surface_update *updates; 7622 enum surface_update_type update_type = UPDATE_TYPE_FAST; 7623 7624 updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL); 7625 7626 if (!updates) { 7627 DRM_ERROR("Failed to allocate plane updates\n"); 7628 /* Set type to FULL to avoid crashing in DC*/ 7629 update_type = UPDATE_TYPE_FULL; 7630 goto cleanup; 7631 } 7632 7633 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7634 struct dc_scaling_info scaling_info; 7635 struct dc_stream_update stream_update; 7636 7637 memset(&stream_update, 0, sizeof(stream_update)); 7638 7639 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 7640 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); 7641 num_plane = 0; 7642 7643 if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) { 7644 update_type = UPDATE_TYPE_FULL; 7645 goto cleanup; 7646 } 7647 7648 if (!new_dm_crtc_state->stream) 7649 continue; 7650 7651 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) { 7652 const struct amdgpu_framebuffer *amdgpu_fb = 7653 to_amdgpu_framebuffer(new_plane_state->fb); 7654 struct dc_plane_info plane_info; 7655 struct dc_flip_addrs flip_addr; 7656 uint64_t tiling_flags; 7657 7658 new_plane_crtc = new_plane_state->crtc; 7659 old_plane_crtc = old_plane_state->crtc; 7660 new_dm_plane_state = to_dm_plane_state(new_plane_state); 7661 old_dm_plane_state = to_dm_plane_state(old_plane_state); 7662 7663 if (plane->type == DRM_PLANE_TYPE_CURSOR) 7664 continue; 7665 7666 if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) { 7667 update_type = UPDATE_TYPE_FULL; 7668 goto cleanup; 7669 } 7670 7671 if (crtc != new_plane_crtc) 7672 continue; 7673 7674 updates[num_plane].surface = new_dm_plane_state->dc_state; 7675 7676 if (new_crtc_state->mode_changed) { 7677 stream_update.dst = new_dm_crtc_state->stream->dst; 7678 stream_update.src = new_dm_crtc_state->stream->src; 7679 } 7680 7681 if (new_crtc_state->color_mgmt_changed) { 7682 updates[num_plane].gamma = 7683 new_dm_plane_state->dc_state->gamma_correction; 7684 updates[num_plane].in_transfer_func = 7685 new_dm_plane_state->dc_state->in_transfer_func; 7686 stream_update.gamut_remap = 7687 &new_dm_crtc_state->stream->gamut_remap_matrix; 7688 stream_update.output_csc_transform = 7689 &new_dm_crtc_state->stream->csc_color_matrix; 7690 stream_update.out_transfer_func = 7691 new_dm_crtc_state->stream->out_transfer_func; 7692 } 7693 7694 ret = fill_dc_scaling_info(new_plane_state, 7695 &scaling_info); 7696 if (ret) 7697 goto cleanup; 7698 7699 updates[num_plane].scaling_info = &scaling_info; 7700 7701 if (amdgpu_fb) { 7702 ret = get_fb_info(amdgpu_fb, &tiling_flags); 7703 if (ret) 7704 goto cleanup; 7705 7706 memset(&flip_addr, 0, sizeof(flip_addr)); 7707 7708 ret = fill_dc_plane_info_and_addr( 7709 dm->adev, new_plane_state, tiling_flags, 7710 &plane_info, 7711 &flip_addr.address); 7712 if (ret) 7713 goto cleanup; 7714 7715 updates[num_plane].plane_info = &plane_info; 7716 updates[num_plane].flip_addr = &flip_addr; 7717 } 7718 7719 num_plane++; 7720 } 7721 7722 if (num_plane == 0) 7723 continue; 7724 7725 ret = dm_atomic_get_state(state, &dm_state); 7726 if (ret) 7727 goto cleanup; 7728 7729 old_dm_state = dm_atomic_get_old_state(state); 7730 if (!old_dm_state) { 7731 ret = -EINVAL; 7732 goto cleanup; 7733 } 7734 7735 status = dc_stream_get_status_from_state(old_dm_state->context, 7736 new_dm_crtc_state->stream); 7737 stream_update.stream = new_dm_crtc_state->stream; 7738 /* 7739 * TODO: DC modifies the surface during this call so we need 7740 * to lock here - find a way to do this without locking. 7741 */ 7742 mutex_lock(&dm->dc_lock); 7743 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, 7744 &stream_update, status); 7745 mutex_unlock(&dm->dc_lock); 7746 7747 if (update_type > UPDATE_TYPE_MED) { 7748 update_type = UPDATE_TYPE_FULL; 7749 goto cleanup; 7750 } 7751 } 7752 7753 cleanup: 7754 kfree(updates); 7755 7756 *out_type = update_type; 7757 return ret; 7758 } 7759 7760 /** 7761 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 7762 * @dev: The DRM device 7763 * @state: The atomic state to commit 7764 * 7765 * Validate that the given atomic state is programmable by DC into hardware. 7766 * This involves constructing a &struct dc_state reflecting the new hardware 7767 * state we wish to commit, then querying DC to see if it is programmable. It's 7768 * important not to modify the existing DC state. Otherwise, atomic_check 7769 * may unexpectedly commit hardware changes. 7770 * 7771 * When validating the DC state, it's important that the right locks are 7772 * acquired. For full updates case which removes/adds/updates streams on one 7773 * CRTC while flipping on another CRTC, acquiring global lock will guarantee 7774 * that any such full update commit will wait for completion of any outstanding 7775 * flip using DRMs synchronization events. See 7776 * dm_determine_update_type_for_commit() 7777 * 7778 * Note that DM adds the affected connectors for all CRTCs in state, when that 7779 * might not seem necessary. This is because DC stream creation requires the 7780 * DC sink, which is tied to the DRM connector state. Cleaning this up should 7781 * be possible but non-trivial - a possible TODO item. 7782 * 7783 * Return: -Error code if validation failed. 7784 */ 7785 static int amdgpu_dm_atomic_check(struct drm_device *dev, 7786 struct drm_atomic_state *state) 7787 { 7788 struct amdgpu_device *adev = dev->dev_private; 7789 struct dm_atomic_state *dm_state = NULL; 7790 struct dc *dc = adev->dm.dc; 7791 struct drm_connector *connector; 7792 struct drm_connector_state *old_con_state, *new_con_state; 7793 struct drm_crtc *crtc; 7794 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 7795 struct drm_plane *plane; 7796 struct drm_plane_state *old_plane_state, *new_plane_state; 7797 enum surface_update_type update_type = UPDATE_TYPE_FAST; 7798 enum surface_update_type overall_update_type = UPDATE_TYPE_FAST; 7799 7800 int ret, i; 7801 7802 /* 7803 * This bool will be set for true for any modeset/reset 7804 * or plane update which implies non fast surface update. 7805 */ 7806 bool lock_and_validation_needed = false; 7807 7808 ret = drm_atomic_helper_check_modeset(dev, state); 7809 if (ret) 7810 goto fail; 7811 7812 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7813 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 7814 !new_crtc_state->color_mgmt_changed && 7815 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled) 7816 continue; 7817 7818 if (!new_crtc_state->enable) 7819 continue; 7820 7821 ret = drm_atomic_add_affected_connectors(state, crtc); 7822 if (ret) 7823 return ret; 7824 7825 ret = drm_atomic_add_affected_planes(state, crtc); 7826 if (ret) 7827 goto fail; 7828 } 7829 7830 /* 7831 * Add all primary and overlay planes on the CRTC to the state 7832 * whenever a plane is enabled to maintain correct z-ordering 7833 * and to enable fast surface updates. 7834 */ 7835 drm_for_each_crtc(crtc, dev) { 7836 bool modified = false; 7837 7838 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 7839 if (plane->type == DRM_PLANE_TYPE_CURSOR) 7840 continue; 7841 7842 if (new_plane_state->crtc == crtc || 7843 old_plane_state->crtc == crtc) { 7844 modified = true; 7845 break; 7846 } 7847 } 7848 7849 if (!modified) 7850 continue; 7851 7852 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 7853 if (plane->type == DRM_PLANE_TYPE_CURSOR) 7854 continue; 7855 7856 new_plane_state = 7857 drm_atomic_get_plane_state(state, plane); 7858 7859 if (IS_ERR(new_plane_state)) { 7860 ret = PTR_ERR(new_plane_state); 7861 goto fail; 7862 } 7863 } 7864 } 7865 7866 /* Remove exiting planes if they are modified */ 7867 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 7868 ret = dm_update_plane_state(dc, state, plane, 7869 old_plane_state, 7870 new_plane_state, 7871 false, 7872 &lock_and_validation_needed); 7873 if (ret) 7874 goto fail; 7875 } 7876 7877 /* Disable all crtcs which require disable */ 7878 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7879 ret = dm_update_crtc_state(&adev->dm, state, crtc, 7880 old_crtc_state, 7881 new_crtc_state, 7882 false, 7883 &lock_and_validation_needed); 7884 if (ret) 7885 goto fail; 7886 } 7887 7888 /* Enable all crtcs which require enable */ 7889 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7890 ret = dm_update_crtc_state(&adev->dm, state, crtc, 7891 old_crtc_state, 7892 new_crtc_state, 7893 true, 7894 &lock_and_validation_needed); 7895 if (ret) 7896 goto fail; 7897 } 7898 7899 /* Add new/modified planes */ 7900 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 7901 ret = dm_update_plane_state(dc, state, plane, 7902 old_plane_state, 7903 new_plane_state, 7904 true, 7905 &lock_and_validation_needed); 7906 if (ret) 7907 goto fail; 7908 } 7909 7910 /* Run this here since we want to validate the streams we created */ 7911 ret = drm_atomic_helper_check_planes(dev, state); 7912 if (ret) 7913 goto fail; 7914 7915 /* Perform validation of MST topology in the state*/ 7916 ret = drm_dp_mst_atomic_check(state); 7917 if (ret) 7918 goto fail; 7919 7920 if (state->legacy_cursor_update) { 7921 /* 7922 * This is a fast cursor update coming from the plane update 7923 * helper, check if it can be done asynchronously for better 7924 * performance. 7925 */ 7926 state->async_update = 7927 !drm_atomic_helper_async_check(dev, state); 7928 7929 /* 7930 * Skip the remaining global validation if this is an async 7931 * update. Cursor updates can be done without affecting 7932 * state or bandwidth calcs and this avoids the performance 7933 * penalty of locking the private state object and 7934 * allocating a new dc_state. 7935 */ 7936 if (state->async_update) 7937 return 0; 7938 } 7939 7940 /* Check scaling and underscan changes*/ 7941 /* TODO Removed scaling changes validation due to inability to commit 7942 * new stream into context w\o causing full reset. Need to 7943 * decide how to handle. 7944 */ 7945 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 7946 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 7947 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 7948 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 7949 7950 /* Skip any modesets/resets */ 7951 if (!acrtc || drm_atomic_crtc_needs_modeset( 7952 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 7953 continue; 7954 7955 /* Skip any thing not scale or underscan changes */ 7956 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 7957 continue; 7958 7959 overall_update_type = UPDATE_TYPE_FULL; 7960 lock_and_validation_needed = true; 7961 } 7962 7963 ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type); 7964 if (ret) 7965 goto fail; 7966 7967 if (overall_update_type < update_type) 7968 overall_update_type = update_type; 7969 7970 /* 7971 * lock_and_validation_needed was an old way to determine if we need to set 7972 * the global lock. Leaving it in to check if we broke any corner cases 7973 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED 7974 * lock_and_validation_needed false = UPDATE_TYPE_FAST 7975 */ 7976 if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST) 7977 WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL"); 7978 7979 if (overall_update_type > UPDATE_TYPE_FAST) { 7980 ret = dm_atomic_get_state(state, &dm_state); 7981 if (ret) 7982 goto fail; 7983 7984 ret = do_aquire_global_lock(dev, state); 7985 if (ret) 7986 goto fail; 7987 7988 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) { 7989 ret = -EINVAL; 7990 goto fail; 7991 } 7992 } else { 7993 /* 7994 * The commit is a fast update. Fast updates shouldn't change 7995 * the DC context, affect global validation, and can have their 7996 * commit work done in parallel with other commits not touching 7997 * the same resource. If we have a new DC context as part of 7998 * the DM atomic state from validation we need to free it and 7999 * retain the existing one instead. 8000 */ 8001 struct dm_atomic_state *new_dm_state, *old_dm_state; 8002 8003 new_dm_state = dm_atomic_get_new_state(state); 8004 old_dm_state = dm_atomic_get_old_state(state); 8005 8006 if (new_dm_state && old_dm_state) { 8007 if (new_dm_state->context) 8008 dc_release_state(new_dm_state->context); 8009 8010 new_dm_state->context = old_dm_state->context; 8011 8012 if (old_dm_state->context) 8013 dc_retain_state(old_dm_state->context); 8014 } 8015 } 8016 8017 /* Store the overall update type for use later in atomic check. */ 8018 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { 8019 struct dm_crtc_state *dm_new_crtc_state = 8020 to_dm_crtc_state(new_crtc_state); 8021 8022 dm_new_crtc_state->update_type = (int)overall_update_type; 8023 } 8024 8025 /* Must be success */ 8026 WARN_ON(ret); 8027 return ret; 8028 8029 fail: 8030 if (ret == -EDEADLK) 8031 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n"); 8032 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 8033 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); 8034 else 8035 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret); 8036 8037 return ret; 8038 } 8039 8040 static bool is_dp_capable_without_timing_msa(struct dc *dc, 8041 struct amdgpu_dm_connector *amdgpu_dm_connector) 8042 { 8043 uint8_t dpcd_data; 8044 bool capable = false; 8045 8046 if (amdgpu_dm_connector->dc_link && 8047 dm_helpers_dp_read_dpcd( 8048 NULL, 8049 amdgpu_dm_connector->dc_link, 8050 DP_DOWN_STREAM_PORT_COUNT, 8051 &dpcd_data, 8052 sizeof(dpcd_data))) { 8053 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; 8054 } 8055 8056 return capable; 8057 } 8058 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 8059 struct edid *edid) 8060 { 8061 int i; 8062 bool edid_check_required; 8063 struct detailed_timing *timing; 8064 struct detailed_non_pixel *data; 8065 struct detailed_data_monitor_range *range; 8066 struct amdgpu_dm_connector *amdgpu_dm_connector = 8067 to_amdgpu_dm_connector(connector); 8068 struct dm_connector_state *dm_con_state = NULL; 8069 8070 struct drm_device *dev = connector->dev; 8071 struct amdgpu_device *adev = dev->dev_private; 8072 bool freesync_capable = false; 8073 8074 if (!connector->state) { 8075 DRM_ERROR("%s - Connector has no state", __func__); 8076 goto update; 8077 } 8078 8079 if (!edid) { 8080 dm_con_state = to_dm_connector_state(connector->state); 8081 8082 amdgpu_dm_connector->min_vfreq = 0; 8083 amdgpu_dm_connector->max_vfreq = 0; 8084 amdgpu_dm_connector->pixel_clock_mhz = 0; 8085 8086 goto update; 8087 } 8088 8089 dm_con_state = to_dm_connector_state(connector->state); 8090 8091 edid_check_required = false; 8092 if (!amdgpu_dm_connector->dc_sink) { 8093 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); 8094 goto update; 8095 } 8096 if (!adev->dm.freesync_module) 8097 goto update; 8098 /* 8099 * if edid non zero restrict freesync only for dp and edp 8100 */ 8101 if (edid) { 8102 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT 8103 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { 8104 edid_check_required = is_dp_capable_without_timing_msa( 8105 adev->dm.dc, 8106 amdgpu_dm_connector); 8107 } 8108 } 8109 if (edid_check_required == true && (edid->version > 1 || 8110 (edid->version == 1 && edid->revision > 1))) { 8111 for (i = 0; i < 4; i++) { 8112 8113 timing = &edid->detailed_timings[i]; 8114 data = &timing->data.other_data; 8115 range = &data->data.range; 8116 /* 8117 * Check if monitor has continuous frequency mode 8118 */ 8119 if (data->type != EDID_DETAIL_MONITOR_RANGE) 8120 continue; 8121 /* 8122 * Check for flag range limits only. If flag == 1 then 8123 * no additional timing information provided. 8124 * Default GTF, GTF Secondary curve and CVT are not 8125 * supported 8126 */ 8127 if (range->flags != 1) 8128 continue; 8129 8130 amdgpu_dm_connector->min_vfreq = range->min_vfreq; 8131 amdgpu_dm_connector->max_vfreq = range->max_vfreq; 8132 amdgpu_dm_connector->pixel_clock_mhz = 8133 range->pixel_clock_mhz * 10; 8134 break; 8135 } 8136 8137 if (amdgpu_dm_connector->max_vfreq - 8138 amdgpu_dm_connector->min_vfreq > 10) { 8139 8140 freesync_capable = true; 8141 } 8142 } 8143 8144 update: 8145 if (dm_con_state) 8146 dm_con_state->freesync_capable = freesync_capable; 8147 8148 if (connector->vrr_capable_property) 8149 drm_connector_set_vrr_capable_property(connector, 8150 freesync_capable); 8151 } 8152 8153 static void amdgpu_dm_set_psr_caps(struct dc_link *link) 8154 { 8155 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; 8156 8157 if (!(link->connector_signal & SIGNAL_TYPE_EDP)) 8158 return; 8159 if (link->type == dc_connection_none) 8160 return; 8161 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, 8162 dpcd_data, sizeof(dpcd_data))) { 8163 link->psr_feature_enabled = dpcd_data[0] ? true:false; 8164 DRM_INFO("PSR support:%d\n", link->psr_feature_enabled); 8165 } 8166 } 8167 8168 /* 8169 * amdgpu_dm_link_setup_psr() - configure psr link 8170 * @stream: stream state 8171 * 8172 * Return: true if success 8173 */ 8174 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) 8175 { 8176 struct dc_link *link = NULL; 8177 struct psr_config psr_config = {0}; 8178 struct psr_context psr_context = {0}; 8179 struct dc *dc = NULL; 8180 bool ret = false; 8181 8182 if (stream == NULL) 8183 return false; 8184 8185 link = stream->link; 8186 dc = link->ctx->dc; 8187 8188 psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version; 8189 8190 if (psr_config.psr_version > 0) { 8191 psr_config.psr_exit_link_training_required = 0x1; 8192 psr_config.psr_frame_capture_indication_req = 0; 8193 psr_config.psr_rfb_setup_time = 0x37; 8194 psr_config.psr_sdp_transmit_line_num_deadline = 0x20; 8195 psr_config.allow_smu_optimizations = 0x0; 8196 8197 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); 8198 8199 } 8200 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled); 8201 8202 return ret; 8203 } 8204 8205 /* 8206 * amdgpu_dm_psr_enable() - enable psr f/w 8207 * @stream: stream state 8208 * 8209 * Return: true if success 8210 */ 8211 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) 8212 { 8213 struct dc_link *link = stream->link; 8214 struct dc_static_screen_events triggers = {0}; 8215 8216 DRM_DEBUG_DRIVER("Enabling psr...\n"); 8217 8218 triggers.cursor_update = true; 8219 triggers.overlay_update = true; 8220 triggers.surface_update = true; 8221 8222 dc_stream_set_static_screen_events(link->ctx->dc, 8223 &stream, 1, 8224 &triggers); 8225 8226 return dc_link_set_psr_allow_active(link, true, false); 8227 } 8228 8229 /* 8230 * amdgpu_dm_psr_disable() - disable psr f/w 8231 * @stream: stream state 8232 * 8233 * Return: true if success 8234 */ 8235 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) 8236 { 8237 8238 DRM_DEBUG_DRIVER("Disabling psr...\n"); 8239 8240 return dc_link_set_psr_allow_active(stream->link, false, true); 8241 } 8242