1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include "dm_services_types.h" 27 #include "dc.h" 28 #include "dc/inc/core_types.h" 29 30 #include "vid.h" 31 #include "amdgpu.h" 32 #include "amdgpu_display.h" 33 #include "atom.h" 34 #include "amdgpu_dm.h" 35 #include "amdgpu_pm.h" 36 37 #include "amd_shared.h" 38 #include "amdgpu_dm_irq.h" 39 #include "dm_helpers.h" 40 #include "dm_services_types.h" 41 #include "amdgpu_dm_mst_types.h" 42 43 #include "ivsrcid/ivsrcid_vislands30.h" 44 45 #include <linux/module.h> 46 #include <linux/moduleparam.h> 47 #include <linux/version.h> 48 #include <linux/types.h> 49 50 #include <drm/drmP.h> 51 #include <drm/drm_atomic.h> 52 #include <drm/drm_atomic_helper.h> 53 #include <drm/drm_dp_mst_helper.h> 54 #include <drm/drm_fb_helper.h> 55 #include <drm/drm_edid.h> 56 57 #include "modules/inc/mod_freesync.h" 58 59 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 60 #include "ivsrcid/irqsrcs_dcn_1_0.h" 61 62 #include "dcn/dcn_1_0_offset.h" 63 #include "dcn/dcn_1_0_sh_mask.h" 64 #include "soc15_hw_ip.h" 65 #include "vega10_ip_offset.h" 66 67 #include "soc15_common.h" 68 #endif 69 70 #include "modules/inc/mod_freesync.h" 71 72 #include "i2caux_interface.h" 73 74 /* basic init/fini API */ 75 static int amdgpu_dm_init(struct amdgpu_device *adev); 76 static void amdgpu_dm_fini(struct amdgpu_device *adev); 77 78 /* initializes drm_device display related structures, based on the information 79 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 80 * drm_encoder, drm_mode_config 81 * 82 * Returns 0 on success 83 */ 84 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 85 /* removes and deallocates the drm structures, created by the above function */ 86 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 87 88 static void 89 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector); 90 91 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 92 struct amdgpu_plane *aplane, 93 unsigned long possible_crtcs); 94 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 95 struct drm_plane *plane, 96 uint32_t link_index); 97 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 98 struct amdgpu_dm_connector *amdgpu_dm_connector, 99 uint32_t link_index, 100 struct amdgpu_encoder *amdgpu_encoder); 101 static int amdgpu_dm_encoder_init(struct drm_device *dev, 102 struct amdgpu_encoder *aencoder, 103 uint32_t link_index); 104 105 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 106 107 static int amdgpu_dm_atomic_commit(struct drm_device *dev, 108 struct drm_atomic_state *state, 109 bool nonblock); 110 111 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 112 113 static int amdgpu_dm_atomic_check(struct drm_device *dev, 114 struct drm_atomic_state *state); 115 116 117 118 119 static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = { 120 DRM_PLANE_TYPE_PRIMARY, 121 DRM_PLANE_TYPE_PRIMARY, 122 DRM_PLANE_TYPE_PRIMARY, 123 DRM_PLANE_TYPE_PRIMARY, 124 DRM_PLANE_TYPE_PRIMARY, 125 DRM_PLANE_TYPE_PRIMARY, 126 }; 127 128 static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = { 129 DRM_PLANE_TYPE_PRIMARY, 130 DRM_PLANE_TYPE_PRIMARY, 131 DRM_PLANE_TYPE_PRIMARY, 132 DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */ 133 }; 134 135 static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = { 136 DRM_PLANE_TYPE_PRIMARY, 137 DRM_PLANE_TYPE_PRIMARY, 138 DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */ 139 }; 140 141 /* 142 * dm_vblank_get_counter 143 * 144 * @brief 145 * Get counter for number of vertical blanks 146 * 147 * @param 148 * struct amdgpu_device *adev - [in] desired amdgpu device 149 * int disp_idx - [in] which CRTC to get the counter from 150 * 151 * @return 152 * Counter for vertical blanks 153 */ 154 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 155 { 156 if (crtc >= adev->mode_info.num_crtc) 157 return 0; 158 else { 159 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; 160 struct dm_crtc_state *acrtc_state = to_dm_crtc_state( 161 acrtc->base.state); 162 163 164 if (acrtc_state->stream == NULL) { 165 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 166 crtc); 167 return 0; 168 } 169 170 return dc_stream_get_vblank_counter(acrtc_state->stream); 171 } 172 } 173 174 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 175 u32 *vbl, u32 *position) 176 { 177 uint32_t v_blank_start, v_blank_end, h_position, v_position; 178 179 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 180 return -EINVAL; 181 else { 182 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; 183 struct dm_crtc_state *acrtc_state = to_dm_crtc_state( 184 acrtc->base.state); 185 186 if (acrtc_state->stream == NULL) { 187 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 188 crtc); 189 return 0; 190 } 191 192 /* 193 * TODO rework base driver to use values directly. 194 * for now parse it back into reg-format 195 */ 196 dc_stream_get_scanoutpos(acrtc_state->stream, 197 &v_blank_start, 198 &v_blank_end, 199 &h_position, 200 &v_position); 201 202 *position = v_position | (h_position << 16); 203 *vbl = v_blank_start | (v_blank_end << 16); 204 } 205 206 return 0; 207 } 208 209 static bool dm_is_idle(void *handle) 210 { 211 /* XXX todo */ 212 return true; 213 } 214 215 static int dm_wait_for_idle(void *handle) 216 { 217 /* XXX todo */ 218 return 0; 219 } 220 221 static bool dm_check_soft_reset(void *handle) 222 { 223 return false; 224 } 225 226 static int dm_soft_reset(void *handle) 227 { 228 /* XXX todo */ 229 return 0; 230 } 231 232 static struct amdgpu_crtc * 233 get_crtc_by_otg_inst(struct amdgpu_device *adev, 234 int otg_inst) 235 { 236 struct drm_device *dev = adev->ddev; 237 struct drm_crtc *crtc; 238 struct amdgpu_crtc *amdgpu_crtc; 239 240 /* 241 * following if is check inherited from both functions where this one is 242 * used now. Need to be checked why it could happen. 243 */ 244 if (otg_inst == -1) { 245 WARN_ON(1); 246 return adev->mode_info.crtcs[0]; 247 } 248 249 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 250 amdgpu_crtc = to_amdgpu_crtc(crtc); 251 252 if (amdgpu_crtc->otg_inst == otg_inst) 253 return amdgpu_crtc; 254 } 255 256 return NULL; 257 } 258 259 static void dm_pflip_high_irq(void *interrupt_params) 260 { 261 struct amdgpu_crtc *amdgpu_crtc; 262 struct common_irq_params *irq_params = interrupt_params; 263 struct amdgpu_device *adev = irq_params->adev; 264 unsigned long flags; 265 266 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 267 268 /* IRQ could occur when in initial stage */ 269 /*TODO work and BO cleanup */ 270 if (amdgpu_crtc == NULL) { 271 DRM_DEBUG_DRIVER("CRTC is null, returning.\n"); 272 return; 273 } 274 275 spin_lock_irqsave(&adev->ddev->event_lock, flags); 276 277 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ 278 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", 279 amdgpu_crtc->pflip_status, 280 AMDGPU_FLIP_SUBMITTED, 281 amdgpu_crtc->crtc_id, 282 amdgpu_crtc); 283 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 284 return; 285 } 286 287 288 /* wakeup usersapce */ 289 if (amdgpu_crtc->event) { 290 /* Update to correct count/ts if racing with vblank irq */ 291 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 292 293 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event); 294 295 /* page flip completed. clean up */ 296 amdgpu_crtc->event = NULL; 297 298 } else 299 WARN_ON(1); 300 301 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 302 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 303 304 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n", 305 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc); 306 307 drm_crtc_vblank_put(&amdgpu_crtc->base); 308 } 309 310 static void dm_crtc_high_irq(void *interrupt_params) 311 { 312 struct common_irq_params *irq_params = interrupt_params; 313 struct amdgpu_device *adev = irq_params->adev; 314 uint8_t crtc_index = 0; 315 struct amdgpu_crtc *acrtc; 316 317 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 318 319 if (acrtc) 320 crtc_index = acrtc->crtc_id; 321 322 drm_handle_vblank(adev->ddev, crtc_index); 323 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 324 } 325 326 static int dm_set_clockgating_state(void *handle, 327 enum amd_clockgating_state state) 328 { 329 return 0; 330 } 331 332 static int dm_set_powergating_state(void *handle, 333 enum amd_powergating_state state) 334 { 335 return 0; 336 } 337 338 /* Prototypes of private functions */ 339 static int dm_early_init(void* handle); 340 341 static void hotplug_notify_work_func(struct work_struct *work) 342 { 343 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work); 344 struct drm_device *dev = dm->ddev; 345 346 drm_kms_helper_hotplug_event(dev); 347 } 348 349 #if defined(CONFIG_DRM_AMD_DC_FBC) 350 /* Allocate memory for FBC compressed data */ 351 static void amdgpu_dm_fbc_init(struct amdgpu_device *adev) 352 { 353 struct dm_comressor_info *compressor = &adev->dm.compressor; 354 struct drm_connector *conn; 355 struct drm_device *dev = adev->ddev; 356 unsigned long max_size = 0; 357 358 if (adev->dm.dc->fbc_compressor == NULL) 359 return; 360 361 if (compressor->bo_ptr) 362 return; 363 364 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 365 366 /* For eDP connector find a mode requiring max size */ 367 list_for_each_entry(conn, 368 &dev->mode_config.connector_list, head) { 369 struct amdgpu_dm_connector *aconn; 370 371 aconn = to_amdgpu_dm_connector(conn); 372 if (aconn->dc_link->connector_signal == SIGNAL_TYPE_EDP) { 373 struct drm_display_mode *mode; 374 375 list_for_each_entry(mode, &conn->modes, head) { 376 if (max_size < mode->hdisplay * mode->vdisplay) 377 max_size = mode->htotal * mode->vtotal; 378 } 379 } 380 } 381 382 if (max_size) { 383 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 384 AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr, 385 &compressor->gpu_addr, &compressor->cpu_addr); 386 387 if (r) 388 DRM_ERROR("DM: Failed to initialize FBC\n"); 389 else { 390 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 391 DRM_INFO("DM: FBC alloc %lu\n", max_size*4); 392 } 393 394 } 395 396 drm_modeset_unlock(&dev->mode_config.connection_mutex); 397 } 398 #endif 399 400 401 /* Init display KMS 402 * 403 * Returns 0 on success 404 */ 405 static int amdgpu_dm_init(struct amdgpu_device *adev) 406 { 407 struct dc_init_data init_data; 408 adev->dm.ddev = adev->ddev; 409 adev->dm.adev = adev; 410 411 /* Zero all the fields */ 412 memset(&init_data, 0, sizeof(init_data)); 413 414 /* initialize DAL's lock (for SYNC context use) */ 415 spin_lock_init(&adev->dm.dal_lock); 416 417 /* initialize DAL's mutex */ 418 mutex_init(&adev->dm.dal_mutex); 419 420 if(amdgpu_dm_irq_init(adev)) { 421 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); 422 goto error; 423 } 424 425 init_data.asic_id.chip_family = adev->family; 426 427 init_data.asic_id.pci_revision_id = adev->rev_id; 428 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 429 430 init_data.asic_id.vram_width = adev->gmc.vram_width; 431 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 432 init_data.asic_id.atombios_base_address = 433 adev->mode_info.atom_context->bios; 434 435 init_data.driver = adev; 436 437 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 438 439 if (!adev->dm.cgs_device) { 440 DRM_ERROR("amdgpu: failed to create cgs device.\n"); 441 goto error; 442 } 443 444 init_data.cgs_device = adev->dm.cgs_device; 445 446 adev->dm.dal = NULL; 447 448 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 449 450 if (amdgpu_dc_log) 451 init_data.log_mask = DC_DEFAULT_LOG_MASK; 452 else 453 init_data.log_mask = DC_MIN_LOG_MASK; 454 455 /* 456 * TODO debug why this doesn't work on Raven 457 */ 458 if (adev->flags & AMD_IS_APU && 459 adev->asic_type >= CHIP_CARRIZO && 460 adev->asic_type < CHIP_RAVEN) 461 init_data.flags.gpu_vm_support = true; 462 463 /* Display Core create. */ 464 adev->dm.dc = dc_create(&init_data); 465 466 if (adev->dm.dc) { 467 DRM_INFO("Display Core initialized with v%s!\n", DC_VER); 468 } else { 469 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); 470 goto error; 471 } 472 473 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func); 474 475 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 476 if (!adev->dm.freesync_module) { 477 DRM_ERROR( 478 "amdgpu: failed to initialize freesync_module.\n"); 479 } else 480 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", 481 adev->dm.freesync_module); 482 483 if (amdgpu_dm_initialize_drm_device(adev)) { 484 DRM_ERROR( 485 "amdgpu: failed to initialize sw for display support.\n"); 486 goto error; 487 } 488 489 /* Update the actual used number of crtc */ 490 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 491 492 /* TODO: Add_display_info? */ 493 494 /* TODO use dynamic cursor width */ 495 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 496 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 497 498 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) { 499 DRM_ERROR( 500 "amdgpu: failed to initialize sw for display support.\n"); 501 goto error; 502 } 503 504 DRM_DEBUG_DRIVER("KMS initialized.\n"); 505 506 return 0; 507 error: 508 amdgpu_dm_fini(adev); 509 510 return -1; 511 } 512 513 static void amdgpu_dm_fini(struct amdgpu_device *adev) 514 { 515 amdgpu_dm_destroy_drm_device(&adev->dm); 516 /* 517 * TODO: pageflip, vlank interrupt 518 * 519 * amdgpu_dm_irq_fini(adev); 520 */ 521 522 if (adev->dm.cgs_device) { 523 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 524 adev->dm.cgs_device = NULL; 525 } 526 if (adev->dm.freesync_module) { 527 mod_freesync_destroy(adev->dm.freesync_module); 528 adev->dm.freesync_module = NULL; 529 } 530 /* DC Destroy TODO: Replace destroy DAL */ 531 if (adev->dm.dc) 532 dc_destroy(&adev->dm.dc); 533 return; 534 } 535 536 static int dm_sw_init(void *handle) 537 { 538 return 0; 539 } 540 541 static int dm_sw_fini(void *handle) 542 { 543 return 0; 544 } 545 546 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 547 { 548 struct amdgpu_dm_connector *aconnector; 549 struct drm_connector *connector; 550 int ret = 0; 551 552 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 553 554 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 555 aconnector = to_amdgpu_dm_connector(connector); 556 if (aconnector->dc_link->type == dc_connection_mst_branch && 557 aconnector->mst_mgr.aux) { 558 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", 559 aconnector, aconnector->base.base.id); 560 561 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 562 if (ret < 0) { 563 DRM_ERROR("DM_MST: Failed to start MST\n"); 564 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single; 565 return ret; 566 } 567 } 568 } 569 570 drm_modeset_unlock(&dev->mode_config.connection_mutex); 571 return ret; 572 } 573 574 static int dm_late_init(void *handle) 575 { 576 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 577 578 #if defined(CONFIG_DRM_AMD_DC_FBC) 579 amdgpu_dm_fbc_init(adev); 580 #endif 581 return detect_mst_link_for_all_connectors(adev->ddev); 582 } 583 584 static void s3_handle_mst(struct drm_device *dev, bool suspend) 585 { 586 struct amdgpu_dm_connector *aconnector; 587 struct drm_connector *connector; 588 589 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 590 591 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 592 aconnector = to_amdgpu_dm_connector(connector); 593 if (aconnector->dc_link->type == dc_connection_mst_branch && 594 !aconnector->mst_port) { 595 596 if (suspend) 597 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr); 598 else 599 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr); 600 } 601 } 602 603 drm_modeset_unlock(&dev->mode_config.connection_mutex); 604 } 605 606 static int dm_hw_init(void *handle) 607 { 608 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 609 /* Create DAL display manager */ 610 amdgpu_dm_init(adev); 611 amdgpu_dm_hpd_init(adev); 612 613 return 0; 614 } 615 616 static int dm_hw_fini(void *handle) 617 { 618 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 619 620 amdgpu_dm_hpd_fini(adev); 621 622 amdgpu_dm_irq_fini(adev); 623 amdgpu_dm_fini(adev); 624 return 0; 625 } 626 627 static int dm_suspend(void *handle) 628 { 629 struct amdgpu_device *adev = handle; 630 struct amdgpu_display_manager *dm = &adev->dm; 631 int ret = 0; 632 633 s3_handle_mst(adev->ddev, true); 634 635 amdgpu_dm_irq_suspend(adev); 636 637 WARN_ON(adev->dm.cached_state); 638 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); 639 640 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 641 642 return ret; 643 } 644 645 static struct amdgpu_dm_connector * 646 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 647 struct drm_crtc *crtc) 648 { 649 uint32_t i; 650 struct drm_connector_state *new_con_state; 651 struct drm_connector *connector; 652 struct drm_crtc *crtc_from_state; 653 654 for_each_new_connector_in_state(state, connector, new_con_state, i) { 655 crtc_from_state = new_con_state->crtc; 656 657 if (crtc_from_state == crtc) 658 return to_amdgpu_dm_connector(connector); 659 } 660 661 return NULL; 662 } 663 664 static int dm_resume(void *handle) 665 { 666 struct amdgpu_device *adev = handle; 667 struct amdgpu_display_manager *dm = &adev->dm; 668 669 /* power on hardware */ 670 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 671 672 return 0; 673 } 674 675 int amdgpu_dm_display_resume(struct amdgpu_device *adev) 676 { 677 struct drm_device *ddev = adev->ddev; 678 struct amdgpu_display_manager *dm = &adev->dm; 679 struct amdgpu_dm_connector *aconnector; 680 struct drm_connector *connector; 681 struct drm_crtc *crtc; 682 struct drm_crtc_state *new_crtc_state; 683 struct dm_crtc_state *dm_new_crtc_state; 684 struct drm_plane *plane; 685 struct drm_plane_state *new_plane_state; 686 struct dm_plane_state *dm_new_plane_state; 687 688 int ret = 0; 689 int i; 690 691 /* program HPD filter */ 692 dc_resume(dm->dc); 693 694 /* On resume we need to rewrite the MSTM control bits to enamble MST*/ 695 s3_handle_mst(ddev, false); 696 697 /* 698 * early enable HPD Rx IRQ, should be done before set mode as short 699 * pulse interrupts are used for MST 700 */ 701 amdgpu_dm_irq_resume_early(adev); 702 703 /* Do detection*/ 704 list_for_each_entry(connector, 705 &ddev->mode_config.connector_list, head) { 706 aconnector = to_amdgpu_dm_connector(connector); 707 708 /* 709 * this is the case when traversing through already created 710 * MST connectors, should be skipped 711 */ 712 if (aconnector->mst_port) 713 continue; 714 715 mutex_lock(&aconnector->hpd_lock); 716 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 717 718 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 719 aconnector->fake_enable = false; 720 721 aconnector->dc_sink = NULL; 722 amdgpu_dm_update_connector_after_detect(aconnector); 723 mutex_unlock(&aconnector->hpd_lock); 724 } 725 726 /* Force mode set in atomic comit */ 727 for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) 728 new_crtc_state->active_changed = true; 729 730 /* 731 * atomic_check is expected to create the dc states. We need to release 732 * them here, since they were duplicated as part of the suspend 733 * procedure. 734 */ 735 for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) { 736 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 737 if (dm_new_crtc_state->stream) { 738 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 739 dc_stream_release(dm_new_crtc_state->stream); 740 dm_new_crtc_state->stream = NULL; 741 } 742 } 743 744 for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) { 745 dm_new_plane_state = to_dm_plane_state(new_plane_state); 746 if (dm_new_plane_state->dc_state) { 747 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 748 dc_plane_state_release(dm_new_plane_state->dc_state); 749 dm_new_plane_state->dc_state = NULL; 750 } 751 } 752 753 ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state); 754 755 adev->dm.cached_state = NULL; 756 757 amdgpu_dm_irq_resume_late(adev); 758 759 return ret; 760 } 761 762 static const struct amd_ip_funcs amdgpu_dm_funcs = { 763 .name = "dm", 764 .early_init = dm_early_init, 765 .late_init = dm_late_init, 766 .sw_init = dm_sw_init, 767 .sw_fini = dm_sw_fini, 768 .hw_init = dm_hw_init, 769 .hw_fini = dm_hw_fini, 770 .suspend = dm_suspend, 771 .resume = dm_resume, 772 .is_idle = dm_is_idle, 773 .wait_for_idle = dm_wait_for_idle, 774 .check_soft_reset = dm_check_soft_reset, 775 .soft_reset = dm_soft_reset, 776 .set_clockgating_state = dm_set_clockgating_state, 777 .set_powergating_state = dm_set_powergating_state, 778 }; 779 780 const struct amdgpu_ip_block_version dm_ip_block = 781 { 782 .type = AMD_IP_BLOCK_TYPE_DCE, 783 .major = 1, 784 .minor = 0, 785 .rev = 0, 786 .funcs = &amdgpu_dm_funcs, 787 }; 788 789 790 static struct drm_atomic_state * 791 dm_atomic_state_alloc(struct drm_device *dev) 792 { 793 struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL); 794 795 if (!state) 796 return NULL; 797 798 if (drm_atomic_state_init(dev, &state->base) < 0) 799 goto fail; 800 801 return &state->base; 802 803 fail: 804 kfree(state); 805 return NULL; 806 } 807 808 static void 809 dm_atomic_state_clear(struct drm_atomic_state *state) 810 { 811 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 812 813 if (dm_state->context) { 814 dc_release_state(dm_state->context); 815 dm_state->context = NULL; 816 } 817 818 drm_atomic_state_default_clear(state); 819 } 820 821 static void 822 dm_atomic_state_alloc_free(struct drm_atomic_state *state) 823 { 824 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 825 drm_atomic_state_default_release(state); 826 kfree(dm_state); 827 } 828 829 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 830 .fb_create = amdgpu_user_framebuffer_create, 831 .output_poll_changed = drm_fb_helper_output_poll_changed, 832 .atomic_check = amdgpu_dm_atomic_check, 833 .atomic_commit = amdgpu_dm_atomic_commit, 834 .atomic_state_alloc = dm_atomic_state_alloc, 835 .atomic_state_clear = dm_atomic_state_clear, 836 .atomic_state_free = dm_atomic_state_alloc_free 837 }; 838 839 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 840 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail 841 }; 842 843 static void 844 amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) 845 { 846 struct drm_connector *connector = &aconnector->base; 847 struct drm_device *dev = connector->dev; 848 struct dc_sink *sink; 849 850 /* MST handled by drm_mst framework */ 851 if (aconnector->mst_mgr.mst_state == true) 852 return; 853 854 855 sink = aconnector->dc_link->local_sink; 856 857 /* Edid mgmt connector gets first update only in mode_valid hook and then 858 * the connector sink is set to either fake or physical sink depends on link status. 859 * don't do it here if u are during boot 860 */ 861 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 862 && aconnector->dc_em_sink) { 863 864 /* For S3 resume with headless use eml_sink to fake stream 865 * because on resume connecotr->sink is set ti NULL 866 */ 867 mutex_lock(&dev->mode_config.mutex); 868 869 if (sink) { 870 if (aconnector->dc_sink) { 871 amdgpu_dm_remove_sink_from_freesync_module( 872 connector); 873 /* retain and release bellow are used for 874 * bump up refcount for sink because the link don't point 875 * to it anymore after disconnect so on next crtc to connector 876 * reshuffle by UMD we will get into unwanted dc_sink release 877 */ 878 if (aconnector->dc_sink != aconnector->dc_em_sink) 879 dc_sink_release(aconnector->dc_sink); 880 } 881 aconnector->dc_sink = sink; 882 amdgpu_dm_add_sink_to_freesync_module( 883 connector, aconnector->edid); 884 } else { 885 amdgpu_dm_remove_sink_from_freesync_module(connector); 886 if (!aconnector->dc_sink) 887 aconnector->dc_sink = aconnector->dc_em_sink; 888 else if (aconnector->dc_sink != aconnector->dc_em_sink) 889 dc_sink_retain(aconnector->dc_sink); 890 } 891 892 mutex_unlock(&dev->mode_config.mutex); 893 return; 894 } 895 896 /* 897 * TODO: temporary guard to look for proper fix 898 * if this sink is MST sink, we should not do anything 899 */ 900 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 901 return; 902 903 if (aconnector->dc_sink == sink) { 904 /* We got a DP short pulse (Link Loss, DP CTS, etc...). 905 * Do nothing!! */ 906 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", 907 aconnector->connector_id); 908 return; 909 } 910 911 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 912 aconnector->connector_id, aconnector->dc_sink, sink); 913 914 mutex_lock(&dev->mode_config.mutex); 915 916 /* 1. Update status of the drm connector 917 * 2. Send an event and let userspace tell us what to do */ 918 if (sink) { 919 /* TODO: check if we still need the S3 mode update workaround. 920 * If yes, put it here. */ 921 if (aconnector->dc_sink) 922 amdgpu_dm_remove_sink_from_freesync_module( 923 connector); 924 925 aconnector->dc_sink = sink; 926 if (sink->dc_edid.length == 0) { 927 aconnector->edid = NULL; 928 } else { 929 aconnector->edid = 930 (struct edid *) sink->dc_edid.raw_edid; 931 932 933 drm_mode_connector_update_edid_property(connector, 934 aconnector->edid); 935 } 936 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid); 937 938 } else { 939 amdgpu_dm_remove_sink_from_freesync_module(connector); 940 drm_mode_connector_update_edid_property(connector, NULL); 941 aconnector->num_modes = 0; 942 aconnector->dc_sink = NULL; 943 } 944 945 mutex_unlock(&dev->mode_config.mutex); 946 } 947 948 static void handle_hpd_irq(void *param) 949 { 950 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 951 struct drm_connector *connector = &aconnector->base; 952 struct drm_device *dev = connector->dev; 953 954 /* In case of failure or MST no need to update connector status or notify the OS 955 * since (for MST case) MST does this in it's own context. 956 */ 957 mutex_lock(&aconnector->hpd_lock); 958 959 if (aconnector->fake_enable) 960 aconnector->fake_enable = false; 961 962 if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { 963 amdgpu_dm_update_connector_after_detect(aconnector); 964 965 966 drm_modeset_lock_all(dev); 967 dm_restore_drm_connector_state(dev, connector); 968 drm_modeset_unlock_all(dev); 969 970 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 971 drm_kms_helper_hotplug_event(dev); 972 } 973 mutex_unlock(&aconnector->hpd_lock); 974 975 } 976 977 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) 978 { 979 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; 980 uint8_t dret; 981 bool new_irq_handled = false; 982 int dpcd_addr; 983 int dpcd_bytes_to_read; 984 985 const int max_process_count = 30; 986 int process_count = 0; 987 988 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); 989 990 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { 991 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; 992 /* DPCD 0x200 - 0x201 for downstream IRQ */ 993 dpcd_addr = DP_SINK_COUNT; 994 } else { 995 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; 996 /* DPCD 0x2002 - 0x2005 for downstream IRQ */ 997 dpcd_addr = DP_SINK_COUNT_ESI; 998 } 999 1000 dret = drm_dp_dpcd_read( 1001 &aconnector->dm_dp_aux.aux, 1002 dpcd_addr, 1003 esi, 1004 dpcd_bytes_to_read); 1005 1006 while (dret == dpcd_bytes_to_read && 1007 process_count < max_process_count) { 1008 uint8_t retry; 1009 dret = 0; 1010 1011 process_count++; 1012 1013 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); 1014 /* handle HPD short pulse irq */ 1015 if (aconnector->mst_mgr.mst_state) 1016 drm_dp_mst_hpd_irq( 1017 &aconnector->mst_mgr, 1018 esi, 1019 &new_irq_handled); 1020 1021 if (new_irq_handled) { 1022 /* ACK at DPCD to notify down stream */ 1023 const int ack_dpcd_bytes_to_write = 1024 dpcd_bytes_to_read - 1; 1025 1026 for (retry = 0; retry < 3; retry++) { 1027 uint8_t wret; 1028 1029 wret = drm_dp_dpcd_write( 1030 &aconnector->dm_dp_aux.aux, 1031 dpcd_addr + 1, 1032 &esi[1], 1033 ack_dpcd_bytes_to_write); 1034 if (wret == ack_dpcd_bytes_to_write) 1035 break; 1036 } 1037 1038 /* check if there is new irq to be handle */ 1039 dret = drm_dp_dpcd_read( 1040 &aconnector->dm_dp_aux.aux, 1041 dpcd_addr, 1042 esi, 1043 dpcd_bytes_to_read); 1044 1045 new_irq_handled = false; 1046 } else { 1047 break; 1048 } 1049 } 1050 1051 if (process_count == max_process_count) 1052 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); 1053 } 1054 1055 static void handle_hpd_rx_irq(void *param) 1056 { 1057 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 1058 struct drm_connector *connector = &aconnector->base; 1059 struct drm_device *dev = connector->dev; 1060 struct dc_link *dc_link = aconnector->dc_link; 1061 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 1062 1063 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio 1064 * conflict, after implement i2c helper, this mutex should be 1065 * retired. 1066 */ 1067 if (dc_link->type != dc_connection_mst_branch) 1068 mutex_lock(&aconnector->hpd_lock); 1069 1070 if (dc_link_handle_hpd_rx_irq(dc_link, NULL) && 1071 !is_mst_root_connector) { 1072 /* Downstream Port status changed. */ 1073 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { 1074 amdgpu_dm_update_connector_after_detect(aconnector); 1075 1076 1077 drm_modeset_lock_all(dev); 1078 dm_restore_drm_connector_state(dev, connector); 1079 drm_modeset_unlock_all(dev); 1080 1081 drm_kms_helper_hotplug_event(dev); 1082 } 1083 } 1084 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || 1085 (dc_link->type == dc_connection_mst_branch)) 1086 dm_handle_hpd_rx_irq(aconnector); 1087 1088 if (dc_link->type != dc_connection_mst_branch) 1089 mutex_unlock(&aconnector->hpd_lock); 1090 } 1091 1092 static void register_hpd_handlers(struct amdgpu_device *adev) 1093 { 1094 struct drm_device *dev = adev->ddev; 1095 struct drm_connector *connector; 1096 struct amdgpu_dm_connector *aconnector; 1097 const struct dc_link *dc_link; 1098 struct dc_interrupt_params int_params = {0}; 1099 1100 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 1101 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 1102 1103 list_for_each_entry(connector, 1104 &dev->mode_config.connector_list, head) { 1105 1106 aconnector = to_amdgpu_dm_connector(connector); 1107 dc_link = aconnector->dc_link; 1108 1109 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { 1110 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 1111 int_params.irq_source = dc_link->irq_source_hpd; 1112 1113 amdgpu_dm_irq_register_interrupt(adev, &int_params, 1114 handle_hpd_irq, 1115 (void *) aconnector); 1116 } 1117 1118 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { 1119 1120 /* Also register for DP short pulse (hpd_rx). */ 1121 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 1122 int_params.irq_source = dc_link->irq_source_hpd_rx; 1123 1124 amdgpu_dm_irq_register_interrupt(adev, &int_params, 1125 handle_hpd_rx_irq, 1126 (void *) aconnector); 1127 } 1128 } 1129 } 1130 1131 /* Register IRQ sources and initialize IRQ callbacks */ 1132 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 1133 { 1134 struct dc *dc = adev->dm.dc; 1135 struct common_irq_params *c_irq_params; 1136 struct dc_interrupt_params int_params = {0}; 1137 int r; 1138 int i; 1139 unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY; 1140 1141 if (adev->asic_type == CHIP_VEGA10 || 1142 adev->asic_type == CHIP_RAVEN) 1143 client_id = AMDGPU_IH_CLIENTID_DCE; 1144 1145 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 1146 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 1147 1148 /* Actions of amdgpu_irq_add_id(): 1149 * 1. Register a set() function with base driver. 1150 * Base driver will call set() function to enable/disable an 1151 * interrupt in DC hardware. 1152 * 2. Register amdgpu_dm_irq_handler(). 1153 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 1154 * coming from DC hardware. 1155 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 1156 * for acknowledging and handling. */ 1157 1158 /* Use VBLANK interrupt */ 1159 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { 1160 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); 1161 if (r) { 1162 DRM_ERROR("Failed to add crtc irq id!\n"); 1163 return r; 1164 } 1165 1166 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 1167 int_params.irq_source = 1168 dc_interrupt_to_irq_source(dc, i, 0); 1169 1170 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 1171 1172 c_irq_params->adev = adev; 1173 c_irq_params->irq_src = int_params.irq_source; 1174 1175 amdgpu_dm_irq_register_interrupt(adev, &int_params, 1176 dm_crtc_high_irq, c_irq_params); 1177 } 1178 1179 /* Use GRPH_PFLIP interrupt */ 1180 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 1181 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 1182 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 1183 if (r) { 1184 DRM_ERROR("Failed to add page flip irq id!\n"); 1185 return r; 1186 } 1187 1188 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 1189 int_params.irq_source = 1190 dc_interrupt_to_irq_source(dc, i, 0); 1191 1192 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 1193 1194 c_irq_params->adev = adev; 1195 c_irq_params->irq_src = int_params.irq_source; 1196 1197 amdgpu_dm_irq_register_interrupt(adev, &int_params, 1198 dm_pflip_high_irq, c_irq_params); 1199 1200 } 1201 1202 /* HPD */ 1203 r = amdgpu_irq_add_id(adev, client_id, 1204 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 1205 if (r) { 1206 DRM_ERROR("Failed to add hpd irq id!\n"); 1207 return r; 1208 } 1209 1210 register_hpd_handlers(adev); 1211 1212 return 0; 1213 } 1214 1215 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 1216 /* Register IRQ sources and initialize IRQ callbacks */ 1217 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 1218 { 1219 struct dc *dc = adev->dm.dc; 1220 struct common_irq_params *c_irq_params; 1221 struct dc_interrupt_params int_params = {0}; 1222 int r; 1223 int i; 1224 1225 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 1226 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 1227 1228 /* Actions of amdgpu_irq_add_id(): 1229 * 1. Register a set() function with base driver. 1230 * Base driver will call set() function to enable/disable an 1231 * interrupt in DC hardware. 1232 * 2. Register amdgpu_dm_irq_handler(). 1233 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 1234 * coming from DC hardware. 1235 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 1236 * for acknowledging and handling. 1237 * */ 1238 1239 /* Use VSTARTUP interrupt */ 1240 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 1241 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 1242 i++) { 1243 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq); 1244 1245 if (r) { 1246 DRM_ERROR("Failed to add crtc irq id!\n"); 1247 return r; 1248 } 1249 1250 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 1251 int_params.irq_source = 1252 dc_interrupt_to_irq_source(dc, i, 0); 1253 1254 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 1255 1256 c_irq_params->adev = adev; 1257 c_irq_params->irq_src = int_params.irq_source; 1258 1259 amdgpu_dm_irq_register_interrupt(adev, &int_params, 1260 dm_crtc_high_irq, c_irq_params); 1261 } 1262 1263 /* Use GRPH_PFLIP interrupt */ 1264 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 1265 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; 1266 i++) { 1267 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 1268 if (r) { 1269 DRM_ERROR("Failed to add page flip irq id!\n"); 1270 return r; 1271 } 1272 1273 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 1274 int_params.irq_source = 1275 dc_interrupt_to_irq_source(dc, i, 0); 1276 1277 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 1278 1279 c_irq_params->adev = adev; 1280 c_irq_params->irq_src = int_params.irq_source; 1281 1282 amdgpu_dm_irq_register_interrupt(adev, &int_params, 1283 dm_pflip_high_irq, c_irq_params); 1284 1285 } 1286 1287 /* HPD */ 1288 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 1289 &adev->hpd_irq); 1290 if (r) { 1291 DRM_ERROR("Failed to add hpd irq id!\n"); 1292 return r; 1293 } 1294 1295 register_hpd_handlers(adev); 1296 1297 return 0; 1298 } 1299 #endif 1300 1301 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 1302 { 1303 int r; 1304 1305 adev->mode_info.mode_config_initialized = true; 1306 1307 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 1308 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 1309 1310 adev->ddev->mode_config.max_width = 16384; 1311 adev->ddev->mode_config.max_height = 16384; 1312 1313 adev->ddev->mode_config.preferred_depth = 24; 1314 adev->ddev->mode_config.prefer_shadow = 1; 1315 /* indicate support of immediate flip */ 1316 adev->ddev->mode_config.async_page_flip = true; 1317 1318 adev->ddev->mode_config.fb_base = adev->gmc.aper_base; 1319 1320 r = amdgpu_modeset_create_props(adev); 1321 if (r) 1322 return r; 1323 1324 return 0; 1325 } 1326 1327 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 1328 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 1329 1330 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 1331 { 1332 struct amdgpu_display_manager *dm = bl_get_data(bd); 1333 1334 if (dc_link_set_backlight_level(dm->backlight_link, 1335 bd->props.brightness, 0, 0)) 1336 return 0; 1337 else 1338 return 1; 1339 } 1340 1341 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 1342 { 1343 return bd->props.brightness; 1344 } 1345 1346 static const struct backlight_ops amdgpu_dm_backlight_ops = { 1347 .get_brightness = amdgpu_dm_backlight_get_brightness, 1348 .update_status = amdgpu_dm_backlight_update_status, 1349 }; 1350 1351 static void 1352 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) 1353 { 1354 char bl_name[16]; 1355 struct backlight_properties props = { 0 }; 1356 1357 props.max_brightness = AMDGPU_MAX_BL_LEVEL; 1358 props.type = BACKLIGHT_RAW; 1359 1360 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 1361 dm->adev->ddev->primary->index); 1362 1363 dm->backlight_dev = backlight_device_register(bl_name, 1364 dm->adev->ddev->dev, 1365 dm, 1366 &amdgpu_dm_backlight_ops, 1367 &props); 1368 1369 if (IS_ERR(dm->backlight_dev)) 1370 DRM_ERROR("DM: Backlight registration failed!\n"); 1371 else 1372 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); 1373 } 1374 1375 #endif 1376 1377 /* In this architecture, the association 1378 * connector -> encoder -> crtc 1379 * id not really requried. The crtc and connector will hold the 1380 * display_index as an abstraction to use with DAL component 1381 * 1382 * Returns 0 on success 1383 */ 1384 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 1385 { 1386 struct amdgpu_display_manager *dm = &adev->dm; 1387 uint32_t i; 1388 struct amdgpu_dm_connector *aconnector = NULL; 1389 struct amdgpu_encoder *aencoder = NULL; 1390 struct amdgpu_mode_info *mode_info = &adev->mode_info; 1391 uint32_t link_cnt; 1392 unsigned long possible_crtcs; 1393 1394 link_cnt = dm->dc->caps.max_links; 1395 if (amdgpu_dm_mode_config_init(dm->adev)) { 1396 DRM_ERROR("DM: Failed to initialize mode config\n"); 1397 return -1; 1398 } 1399 1400 for (i = 0; i < dm->dc->caps.max_planes; i++) { 1401 struct amdgpu_plane *plane; 1402 1403 plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL); 1404 mode_info->planes[i] = plane; 1405 1406 if (!plane) { 1407 DRM_ERROR("KMS: Failed to allocate plane\n"); 1408 goto fail; 1409 } 1410 plane->base.type = mode_info->plane_type[i]; 1411 1412 /* 1413 * HACK: IGT tests expect that each plane can only have one 1414 * one possible CRTC. For now, set one CRTC for each 1415 * plane that is not an underlay, but still allow multiple 1416 * CRTCs for underlay planes. 1417 */ 1418 possible_crtcs = 1 << i; 1419 if (i >= dm->dc->caps.max_streams) 1420 possible_crtcs = 0xff; 1421 1422 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) { 1423 DRM_ERROR("KMS: Failed to initialize plane\n"); 1424 goto fail; 1425 } 1426 } 1427 1428 for (i = 0; i < dm->dc->caps.max_streams; i++) 1429 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) { 1430 DRM_ERROR("KMS: Failed to initialize crtc\n"); 1431 goto fail; 1432 } 1433 1434 dm->display_indexes_num = dm->dc->caps.max_streams; 1435 1436 /* loops over all connectors on the board */ 1437 for (i = 0; i < link_cnt; i++) { 1438 1439 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { 1440 DRM_ERROR( 1441 "KMS: Cannot support more than %d display indexes\n", 1442 AMDGPU_DM_MAX_DISPLAY_INDEX); 1443 continue; 1444 } 1445 1446 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 1447 if (!aconnector) 1448 goto fail; 1449 1450 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); 1451 if (!aencoder) 1452 goto fail; 1453 1454 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 1455 DRM_ERROR("KMS: Failed to initialize encoder\n"); 1456 goto fail; 1457 } 1458 1459 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 1460 DRM_ERROR("KMS: Failed to initialize connector\n"); 1461 goto fail; 1462 } 1463 1464 if (dc_link_detect(dc_get_link_at_index(dm->dc, i), 1465 DETECT_REASON_BOOT)) 1466 amdgpu_dm_update_connector_after_detect(aconnector); 1467 } 1468 1469 /* Software is initialized. Now we can register interrupt handlers. */ 1470 switch (adev->asic_type) { 1471 case CHIP_BONAIRE: 1472 case CHIP_HAWAII: 1473 case CHIP_KAVERI: 1474 case CHIP_KABINI: 1475 case CHIP_MULLINS: 1476 case CHIP_TONGA: 1477 case CHIP_FIJI: 1478 case CHIP_CARRIZO: 1479 case CHIP_STONEY: 1480 case CHIP_POLARIS11: 1481 case CHIP_POLARIS10: 1482 case CHIP_POLARIS12: 1483 case CHIP_VEGA10: 1484 if (dce110_register_irq_handlers(dm->adev)) { 1485 DRM_ERROR("DM: Failed to initialize IRQ\n"); 1486 goto fail; 1487 } 1488 break; 1489 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 1490 case CHIP_RAVEN: 1491 if (dcn10_register_irq_handlers(dm->adev)) { 1492 DRM_ERROR("DM: Failed to initialize IRQ\n"); 1493 goto fail; 1494 } 1495 /* 1496 * Temporary disable until pplib/smu interaction is implemented 1497 */ 1498 dm->dc->debug.disable_stutter = true; 1499 break; 1500 #endif 1501 default: 1502 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); 1503 goto fail; 1504 } 1505 1506 return 0; 1507 fail: 1508 kfree(aencoder); 1509 kfree(aconnector); 1510 for (i = 0; i < dm->dc->caps.max_planes; i++) 1511 kfree(mode_info->planes[i]); 1512 return -1; 1513 } 1514 1515 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 1516 { 1517 drm_mode_config_cleanup(dm->ddev); 1518 return; 1519 } 1520 1521 /****************************************************************************** 1522 * amdgpu_display_funcs functions 1523 *****************************************************************************/ 1524 1525 /** 1526 * dm_bandwidth_update - program display watermarks 1527 * 1528 * @adev: amdgpu_device pointer 1529 * 1530 * Calculate and program the display watermarks and line buffer allocation. 1531 */ 1532 static void dm_bandwidth_update(struct amdgpu_device *adev) 1533 { 1534 /* TODO: implement later */ 1535 } 1536 1537 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, 1538 u8 level) 1539 { 1540 /* TODO: translate amdgpu_encoder to display_index and call DAL */ 1541 } 1542 1543 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder) 1544 { 1545 /* TODO: translate amdgpu_encoder to display_index and call DAL */ 1546 return 0; 1547 } 1548 1549 static int amdgpu_notify_freesync(struct drm_device *dev, void *data, 1550 struct drm_file *filp) 1551 { 1552 struct mod_freesync_params freesync_params; 1553 uint8_t num_streams; 1554 uint8_t i; 1555 1556 struct amdgpu_device *adev = dev->dev_private; 1557 int r = 0; 1558 1559 /* Get freesync enable flag from DRM */ 1560 1561 num_streams = dc_get_current_stream_count(adev->dm.dc); 1562 1563 for (i = 0; i < num_streams; i++) { 1564 struct dc_stream_state *stream; 1565 stream = dc_get_stream_at_index(adev->dm.dc, i); 1566 1567 mod_freesync_update_state(adev->dm.freesync_module, 1568 &stream, 1, &freesync_params); 1569 } 1570 1571 return r; 1572 } 1573 1574 static const struct amdgpu_display_funcs dm_display_funcs = { 1575 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 1576 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 1577 .vblank_wait = NULL, 1578 .backlight_set_level = 1579 dm_set_backlight_level,/* called unconditionally */ 1580 .backlight_get_level = 1581 dm_get_backlight_level,/* called unconditionally */ 1582 .hpd_sense = NULL,/* called unconditionally */ 1583 .hpd_set_polarity = NULL, /* called unconditionally */ 1584 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 1585 .page_flip_get_scanoutpos = 1586 dm_crtc_get_scanoutpos,/* called unconditionally */ 1587 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 1588 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 1589 .notify_freesync = amdgpu_notify_freesync, 1590 1591 }; 1592 1593 #if defined(CONFIG_DEBUG_KERNEL_DC) 1594 1595 static ssize_t s3_debug_store(struct device *device, 1596 struct device_attribute *attr, 1597 const char *buf, 1598 size_t count) 1599 { 1600 int ret; 1601 int s3_state; 1602 struct pci_dev *pdev = to_pci_dev(device); 1603 struct drm_device *drm_dev = pci_get_drvdata(pdev); 1604 struct amdgpu_device *adev = drm_dev->dev_private; 1605 1606 ret = kstrtoint(buf, 0, &s3_state); 1607 1608 if (ret == 0) { 1609 if (s3_state) { 1610 dm_resume(adev); 1611 amdgpu_dm_display_resume(adev); 1612 drm_kms_helper_hotplug_event(adev->ddev); 1613 } else 1614 dm_suspend(adev); 1615 } 1616 1617 return ret == 0 ? count : 0; 1618 } 1619 1620 DEVICE_ATTR_WO(s3_debug); 1621 1622 #endif 1623 1624 static int dm_early_init(void *handle) 1625 { 1626 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1627 1628 adev->ddev->driver->driver_features |= DRIVER_ATOMIC; 1629 1630 switch (adev->asic_type) { 1631 case CHIP_BONAIRE: 1632 case CHIP_HAWAII: 1633 adev->mode_info.num_crtc = 6; 1634 adev->mode_info.num_hpd = 6; 1635 adev->mode_info.num_dig = 6; 1636 adev->mode_info.plane_type = dm_plane_type_default; 1637 break; 1638 case CHIP_KAVERI: 1639 adev->mode_info.num_crtc = 4; 1640 adev->mode_info.num_hpd = 6; 1641 adev->mode_info.num_dig = 7; 1642 adev->mode_info.plane_type = dm_plane_type_default; 1643 break; 1644 case CHIP_KABINI: 1645 case CHIP_MULLINS: 1646 adev->mode_info.num_crtc = 2; 1647 adev->mode_info.num_hpd = 6; 1648 adev->mode_info.num_dig = 6; 1649 adev->mode_info.plane_type = dm_plane_type_default; 1650 break; 1651 case CHIP_FIJI: 1652 case CHIP_TONGA: 1653 adev->mode_info.num_crtc = 6; 1654 adev->mode_info.num_hpd = 6; 1655 adev->mode_info.num_dig = 7; 1656 adev->mode_info.plane_type = dm_plane_type_default; 1657 break; 1658 case CHIP_CARRIZO: 1659 adev->mode_info.num_crtc = 3; 1660 adev->mode_info.num_hpd = 6; 1661 adev->mode_info.num_dig = 9; 1662 adev->mode_info.plane_type = dm_plane_type_carizzo; 1663 break; 1664 case CHIP_STONEY: 1665 adev->mode_info.num_crtc = 2; 1666 adev->mode_info.num_hpd = 6; 1667 adev->mode_info.num_dig = 9; 1668 adev->mode_info.plane_type = dm_plane_type_stoney; 1669 break; 1670 case CHIP_POLARIS11: 1671 case CHIP_POLARIS12: 1672 adev->mode_info.num_crtc = 5; 1673 adev->mode_info.num_hpd = 5; 1674 adev->mode_info.num_dig = 5; 1675 adev->mode_info.plane_type = dm_plane_type_default; 1676 break; 1677 case CHIP_POLARIS10: 1678 adev->mode_info.num_crtc = 6; 1679 adev->mode_info.num_hpd = 6; 1680 adev->mode_info.num_dig = 6; 1681 adev->mode_info.plane_type = dm_plane_type_default; 1682 break; 1683 case CHIP_VEGA10: 1684 adev->mode_info.num_crtc = 6; 1685 adev->mode_info.num_hpd = 6; 1686 adev->mode_info.num_dig = 6; 1687 adev->mode_info.plane_type = dm_plane_type_default; 1688 break; 1689 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 1690 case CHIP_RAVEN: 1691 adev->mode_info.num_crtc = 4; 1692 adev->mode_info.num_hpd = 4; 1693 adev->mode_info.num_dig = 4; 1694 adev->mode_info.plane_type = dm_plane_type_default; 1695 break; 1696 #endif 1697 default: 1698 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); 1699 return -EINVAL; 1700 } 1701 1702 amdgpu_dm_set_irq_funcs(adev); 1703 1704 if (adev->mode_info.funcs == NULL) 1705 adev->mode_info.funcs = &dm_display_funcs; 1706 1707 /* Note: Do NOT change adev->audio_endpt_rreg and 1708 * adev->audio_endpt_wreg because they are initialised in 1709 * amdgpu_device_init() */ 1710 #if defined(CONFIG_DEBUG_KERNEL_DC) 1711 device_create_file( 1712 adev->ddev->dev, 1713 &dev_attr_s3_debug); 1714 #endif 1715 1716 return 0; 1717 } 1718 1719 static bool modeset_required(struct drm_crtc_state *crtc_state, 1720 struct dc_stream_state *new_stream, 1721 struct dc_stream_state *old_stream) 1722 { 1723 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 1724 return false; 1725 1726 if (!crtc_state->enable) 1727 return false; 1728 1729 return crtc_state->active; 1730 } 1731 1732 static bool modereset_required(struct drm_crtc_state *crtc_state) 1733 { 1734 if (!drm_atomic_crtc_needs_modeset(crtc_state)) 1735 return false; 1736 1737 return !crtc_state->enable || !crtc_state->active; 1738 } 1739 1740 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 1741 { 1742 drm_encoder_cleanup(encoder); 1743 kfree(encoder); 1744 } 1745 1746 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 1747 .destroy = amdgpu_dm_encoder_destroy, 1748 }; 1749 1750 static bool fill_rects_from_plane_state(const struct drm_plane_state *state, 1751 struct dc_plane_state *plane_state) 1752 { 1753 plane_state->src_rect.x = state->src_x >> 16; 1754 plane_state->src_rect.y = state->src_y >> 16; 1755 /*we ignore for now mantissa and do not to deal with floating pixels :(*/ 1756 plane_state->src_rect.width = state->src_w >> 16; 1757 1758 if (plane_state->src_rect.width == 0) 1759 return false; 1760 1761 plane_state->src_rect.height = state->src_h >> 16; 1762 if (plane_state->src_rect.height == 0) 1763 return false; 1764 1765 plane_state->dst_rect.x = state->crtc_x; 1766 plane_state->dst_rect.y = state->crtc_y; 1767 1768 if (state->crtc_w == 0) 1769 return false; 1770 1771 plane_state->dst_rect.width = state->crtc_w; 1772 1773 if (state->crtc_h == 0) 1774 return false; 1775 1776 plane_state->dst_rect.height = state->crtc_h; 1777 1778 plane_state->clip_rect = plane_state->dst_rect; 1779 1780 switch (state->rotation & DRM_MODE_ROTATE_MASK) { 1781 case DRM_MODE_ROTATE_0: 1782 plane_state->rotation = ROTATION_ANGLE_0; 1783 break; 1784 case DRM_MODE_ROTATE_90: 1785 plane_state->rotation = ROTATION_ANGLE_90; 1786 break; 1787 case DRM_MODE_ROTATE_180: 1788 plane_state->rotation = ROTATION_ANGLE_180; 1789 break; 1790 case DRM_MODE_ROTATE_270: 1791 plane_state->rotation = ROTATION_ANGLE_270; 1792 break; 1793 default: 1794 plane_state->rotation = ROTATION_ANGLE_0; 1795 break; 1796 } 1797 1798 return true; 1799 } 1800 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, 1801 uint64_t *tiling_flags) 1802 { 1803 struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); 1804 int r = amdgpu_bo_reserve(rbo, false); 1805 1806 if (unlikely(r)) { 1807 // Don't show error msg. when return -ERESTARTSYS 1808 if (r != -ERESTARTSYS) 1809 DRM_ERROR("Unable to reserve buffer: %d\n", r); 1810 return r; 1811 } 1812 1813 if (tiling_flags) 1814 amdgpu_bo_get_tiling_flags(rbo, tiling_flags); 1815 1816 amdgpu_bo_unreserve(rbo); 1817 1818 return r; 1819 } 1820 1821 static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, 1822 struct dc_plane_state *plane_state, 1823 const struct amdgpu_framebuffer *amdgpu_fb) 1824 { 1825 uint64_t tiling_flags; 1826 unsigned int awidth; 1827 const struct drm_framebuffer *fb = &amdgpu_fb->base; 1828 int ret = 0; 1829 struct drm_format_name_buf format_name; 1830 1831 ret = get_fb_info( 1832 amdgpu_fb, 1833 &tiling_flags); 1834 1835 if (ret) 1836 return ret; 1837 1838 switch (fb->format->format) { 1839 case DRM_FORMAT_C8: 1840 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 1841 break; 1842 case DRM_FORMAT_RGB565: 1843 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 1844 break; 1845 case DRM_FORMAT_XRGB8888: 1846 case DRM_FORMAT_ARGB8888: 1847 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 1848 break; 1849 case DRM_FORMAT_XRGB2101010: 1850 case DRM_FORMAT_ARGB2101010: 1851 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 1852 break; 1853 case DRM_FORMAT_XBGR2101010: 1854 case DRM_FORMAT_ABGR2101010: 1855 plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 1856 break; 1857 case DRM_FORMAT_NV21: 1858 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 1859 break; 1860 case DRM_FORMAT_NV12: 1861 plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 1862 break; 1863 default: 1864 DRM_ERROR("Unsupported screen format %s\n", 1865 drm_get_format_name(fb->format->format, &format_name)); 1866 return -EINVAL; 1867 } 1868 1869 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 1870 plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS; 1871 plane_state->plane_size.grph.surface_size.x = 0; 1872 plane_state->plane_size.grph.surface_size.y = 0; 1873 plane_state->plane_size.grph.surface_size.width = fb->width; 1874 plane_state->plane_size.grph.surface_size.height = fb->height; 1875 plane_state->plane_size.grph.surface_pitch = 1876 fb->pitches[0] / fb->format->cpp[0]; 1877 /* TODO: unhardcode */ 1878 plane_state->color_space = COLOR_SPACE_SRGB; 1879 1880 } else { 1881 awidth = ALIGN(fb->width, 64); 1882 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; 1883 plane_state->plane_size.video.luma_size.x = 0; 1884 plane_state->plane_size.video.luma_size.y = 0; 1885 plane_state->plane_size.video.luma_size.width = awidth; 1886 plane_state->plane_size.video.luma_size.height = fb->height; 1887 /* TODO: unhardcode */ 1888 plane_state->plane_size.video.luma_pitch = awidth; 1889 1890 plane_state->plane_size.video.chroma_size.x = 0; 1891 plane_state->plane_size.video.chroma_size.y = 0; 1892 plane_state->plane_size.video.chroma_size.width = awidth; 1893 plane_state->plane_size.video.chroma_size.height = fb->height; 1894 plane_state->plane_size.video.chroma_pitch = awidth / 2; 1895 1896 /* TODO: unhardcode */ 1897 plane_state->color_space = COLOR_SPACE_YCBCR709; 1898 } 1899 1900 memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info)); 1901 1902 /* Fill GFX8 params */ 1903 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { 1904 unsigned int bankw, bankh, mtaspect, tile_split, num_banks; 1905 1906 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 1907 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 1908 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 1909 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 1910 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 1911 1912 /* XXX fix me for VI */ 1913 plane_state->tiling_info.gfx8.num_banks = num_banks; 1914 plane_state->tiling_info.gfx8.array_mode = 1915 DC_ARRAY_2D_TILED_THIN1; 1916 plane_state->tiling_info.gfx8.tile_split = tile_split; 1917 plane_state->tiling_info.gfx8.bank_width = bankw; 1918 plane_state->tiling_info.gfx8.bank_height = bankh; 1919 plane_state->tiling_info.gfx8.tile_aspect = mtaspect; 1920 plane_state->tiling_info.gfx8.tile_mode = 1921 DC_ADDR_SURF_MICRO_TILING_DISPLAY; 1922 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) 1923 == DC_ARRAY_1D_TILED_THIN1) { 1924 plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; 1925 } 1926 1927 plane_state->tiling_info.gfx8.pipe_config = 1928 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 1929 1930 if (adev->asic_type == CHIP_VEGA10 || 1931 adev->asic_type == CHIP_RAVEN) { 1932 /* Fill GFX9 params */ 1933 plane_state->tiling_info.gfx9.num_pipes = 1934 adev->gfx.config.gb_addr_config_fields.num_pipes; 1935 plane_state->tiling_info.gfx9.num_banks = 1936 adev->gfx.config.gb_addr_config_fields.num_banks; 1937 plane_state->tiling_info.gfx9.pipe_interleave = 1938 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; 1939 plane_state->tiling_info.gfx9.num_shader_engines = 1940 adev->gfx.config.gb_addr_config_fields.num_se; 1941 plane_state->tiling_info.gfx9.max_compressed_frags = 1942 adev->gfx.config.gb_addr_config_fields.max_compress_frags; 1943 plane_state->tiling_info.gfx9.num_rb_per_se = 1944 adev->gfx.config.gb_addr_config_fields.num_rb_per_se; 1945 plane_state->tiling_info.gfx9.swizzle = 1946 AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE); 1947 plane_state->tiling_info.gfx9.shaderEnable = 1; 1948 } 1949 1950 plane_state->visible = true; 1951 plane_state->scaling_quality.h_taps_c = 0; 1952 plane_state->scaling_quality.v_taps_c = 0; 1953 1954 /* is this needed? is plane_state zeroed at allocation? */ 1955 plane_state->scaling_quality.h_taps = 0; 1956 plane_state->scaling_quality.v_taps = 0; 1957 plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE; 1958 1959 return ret; 1960 1961 } 1962 1963 static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state, 1964 struct dc_plane_state *plane_state) 1965 { 1966 int i; 1967 struct dc_gamma *gamma; 1968 struct drm_color_lut *lut = 1969 (struct drm_color_lut *) crtc_state->gamma_lut->data; 1970 1971 gamma = dc_create_gamma(); 1972 1973 if (gamma == NULL) { 1974 WARN_ON(1); 1975 return; 1976 } 1977 1978 gamma->type = GAMMA_RGB_256; 1979 gamma->num_entries = GAMMA_RGB_256_ENTRIES; 1980 for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) { 1981 gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red); 1982 gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green); 1983 gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue); 1984 } 1985 1986 plane_state->gamma_correction = gamma; 1987 } 1988 1989 static int fill_plane_attributes(struct amdgpu_device *adev, 1990 struct dc_plane_state *dc_plane_state, 1991 struct drm_plane_state *plane_state, 1992 struct drm_crtc_state *crtc_state) 1993 { 1994 const struct amdgpu_framebuffer *amdgpu_fb = 1995 to_amdgpu_framebuffer(plane_state->fb); 1996 const struct drm_crtc *crtc = plane_state->crtc; 1997 struct dc_transfer_func *input_tf; 1998 int ret = 0; 1999 2000 if (!fill_rects_from_plane_state(plane_state, dc_plane_state)) 2001 return -EINVAL; 2002 2003 ret = fill_plane_attributes_from_fb( 2004 crtc->dev->dev_private, 2005 dc_plane_state, 2006 amdgpu_fb); 2007 2008 if (ret) 2009 return ret; 2010 2011 input_tf = dc_create_transfer_func(); 2012 2013 if (input_tf == NULL) 2014 return -ENOMEM; 2015 2016 input_tf->type = TF_TYPE_PREDEFINED; 2017 input_tf->tf = TRANSFER_FUNCTION_SRGB; 2018 2019 dc_plane_state->in_transfer_func = input_tf; 2020 2021 /* In case of gamma set, update gamma value */ 2022 if (crtc_state->gamma_lut) 2023 fill_gamma_from_crtc_state(crtc_state, dc_plane_state); 2024 2025 return ret; 2026 } 2027 2028 /*****************************************************************************/ 2029 2030 static void update_stream_scaling_settings(const struct drm_display_mode *mode, 2031 const struct dm_connector_state *dm_state, 2032 struct dc_stream_state *stream) 2033 { 2034 enum amdgpu_rmx_type rmx_type; 2035 2036 struct rect src = { 0 }; /* viewport in composition space*/ 2037 struct rect dst = { 0 }; /* stream addressable area */ 2038 2039 /* no mode. nothing to be done */ 2040 if (!mode) 2041 return; 2042 2043 /* Full screen scaling by default */ 2044 src.width = mode->hdisplay; 2045 src.height = mode->vdisplay; 2046 dst.width = stream->timing.h_addressable; 2047 dst.height = stream->timing.v_addressable; 2048 2049 if (dm_state) { 2050 rmx_type = dm_state->scaling; 2051 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 2052 if (src.width * dst.height < 2053 src.height * dst.width) { 2054 /* height needs less upscaling/more downscaling */ 2055 dst.width = src.width * 2056 dst.height / src.height; 2057 } else { 2058 /* width needs less upscaling/more downscaling */ 2059 dst.height = src.height * 2060 dst.width / src.width; 2061 } 2062 } else if (rmx_type == RMX_CENTER) { 2063 dst = src; 2064 } 2065 2066 dst.x = (stream->timing.h_addressable - dst.width) / 2; 2067 dst.y = (stream->timing.v_addressable - dst.height) / 2; 2068 2069 if (dm_state->underscan_enable) { 2070 dst.x += dm_state->underscan_hborder / 2; 2071 dst.y += dm_state->underscan_vborder / 2; 2072 dst.width -= dm_state->underscan_hborder; 2073 dst.height -= dm_state->underscan_vborder; 2074 } 2075 } 2076 2077 stream->src = src; 2078 stream->dst = dst; 2079 2080 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n", 2081 dst.x, dst.y, dst.width, dst.height); 2082 2083 } 2084 2085 static enum dc_color_depth 2086 convert_color_depth_from_display_info(const struct drm_connector *connector) 2087 { 2088 uint32_t bpc = connector->display_info.bpc; 2089 2090 /* Limited color depth to 8bit 2091 * TODO: Still need to handle deep color 2092 */ 2093 if (bpc > 8) 2094 bpc = 8; 2095 2096 switch (bpc) { 2097 case 0: 2098 /* Temporary Work around, DRM don't parse color depth for 2099 * EDID revision before 1.4 2100 * TODO: Fix edid parsing 2101 */ 2102 return COLOR_DEPTH_888; 2103 case 6: 2104 return COLOR_DEPTH_666; 2105 case 8: 2106 return COLOR_DEPTH_888; 2107 case 10: 2108 return COLOR_DEPTH_101010; 2109 case 12: 2110 return COLOR_DEPTH_121212; 2111 case 14: 2112 return COLOR_DEPTH_141414; 2113 case 16: 2114 return COLOR_DEPTH_161616; 2115 default: 2116 return COLOR_DEPTH_UNDEFINED; 2117 } 2118 } 2119 2120 static enum dc_aspect_ratio 2121 get_aspect_ratio(const struct drm_display_mode *mode_in) 2122 { 2123 int32_t width = mode_in->crtc_hdisplay * 9; 2124 int32_t height = mode_in->crtc_vdisplay * 16; 2125 2126 if ((width - height) < 10 && (width - height) > -10) 2127 return ASPECT_RATIO_16_9; 2128 else 2129 return ASPECT_RATIO_4_3; 2130 } 2131 2132 static enum dc_color_space 2133 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) 2134 { 2135 enum dc_color_space color_space = COLOR_SPACE_SRGB; 2136 2137 switch (dc_crtc_timing->pixel_encoding) { 2138 case PIXEL_ENCODING_YCBCR422: 2139 case PIXEL_ENCODING_YCBCR444: 2140 case PIXEL_ENCODING_YCBCR420: 2141 { 2142 /* 2143 * 27030khz is the separation point between HDTV and SDTV 2144 * according to HDMI spec, we use YCbCr709 and YCbCr601 2145 * respectively 2146 */ 2147 if (dc_crtc_timing->pix_clk_khz > 27030) { 2148 if (dc_crtc_timing->flags.Y_ONLY) 2149 color_space = 2150 COLOR_SPACE_YCBCR709_LIMITED; 2151 else 2152 color_space = COLOR_SPACE_YCBCR709; 2153 } else { 2154 if (dc_crtc_timing->flags.Y_ONLY) 2155 color_space = 2156 COLOR_SPACE_YCBCR601_LIMITED; 2157 else 2158 color_space = COLOR_SPACE_YCBCR601; 2159 } 2160 2161 } 2162 break; 2163 case PIXEL_ENCODING_RGB: 2164 color_space = COLOR_SPACE_SRGB; 2165 break; 2166 2167 default: 2168 WARN_ON(1); 2169 break; 2170 } 2171 2172 return color_space; 2173 } 2174 2175 /*****************************************************************************/ 2176 2177 static void 2178 fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, 2179 const struct drm_display_mode *mode_in, 2180 const struct drm_connector *connector) 2181 { 2182 struct dc_crtc_timing *timing_out = &stream->timing; 2183 struct dc_transfer_func *tf = dc_create_transfer_func(); 2184 2185 memset(timing_out, 0, sizeof(struct dc_crtc_timing)); 2186 2187 timing_out->h_border_left = 0; 2188 timing_out->h_border_right = 0; 2189 timing_out->v_border_top = 0; 2190 timing_out->v_border_bottom = 0; 2191 /* TODO: un-hardcode */ 2192 2193 if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) 2194 && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) 2195 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 2196 else 2197 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 2198 2199 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 2200 timing_out->display_color_depth = convert_color_depth_from_display_info( 2201 connector); 2202 timing_out->scan_type = SCANNING_TYPE_NODATA; 2203 timing_out->hdmi_vic = 0; 2204 timing_out->vic = drm_match_cea_mode(mode_in); 2205 2206 timing_out->h_addressable = mode_in->crtc_hdisplay; 2207 timing_out->h_total = mode_in->crtc_htotal; 2208 timing_out->h_sync_width = 2209 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 2210 timing_out->h_front_porch = 2211 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 2212 timing_out->v_total = mode_in->crtc_vtotal; 2213 timing_out->v_addressable = mode_in->crtc_vdisplay; 2214 timing_out->v_front_porch = 2215 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 2216 timing_out->v_sync_width = 2217 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 2218 timing_out->pix_clk_khz = mode_in->crtc_clock; 2219 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 2220 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 2221 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 2222 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 2223 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 2224 2225 stream->output_color_space = get_output_color_space(timing_out); 2226 2227 tf->type = TF_TYPE_PREDEFINED; 2228 tf->tf = TRANSFER_FUNCTION_SRGB; 2229 stream->out_transfer_func = tf; 2230 } 2231 2232 static void fill_audio_info(struct audio_info *audio_info, 2233 const struct drm_connector *drm_connector, 2234 const struct dc_sink *dc_sink) 2235 { 2236 int i = 0; 2237 int cea_revision = 0; 2238 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 2239 2240 audio_info->manufacture_id = edid_caps->manufacturer_id; 2241 audio_info->product_id = edid_caps->product_id; 2242 2243 cea_revision = drm_connector->display_info.cea_rev; 2244 2245 strncpy(audio_info->display_name, 2246 edid_caps->display_name, 2247 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1); 2248 2249 if (cea_revision >= 3) { 2250 audio_info->mode_count = edid_caps->audio_mode_count; 2251 2252 for (i = 0; i < audio_info->mode_count; ++i) { 2253 audio_info->modes[i].format_code = 2254 (enum audio_format_code) 2255 (edid_caps->audio_modes[i].format_code); 2256 audio_info->modes[i].channel_count = 2257 edid_caps->audio_modes[i].channel_count; 2258 audio_info->modes[i].sample_rates.all = 2259 edid_caps->audio_modes[i].sample_rate; 2260 audio_info->modes[i].sample_size = 2261 edid_caps->audio_modes[i].sample_size; 2262 } 2263 } 2264 2265 audio_info->flags.all = edid_caps->speaker_flags; 2266 2267 /* TODO: We only check for the progressive mode, check for interlace mode too */ 2268 if (drm_connector->latency_present[0]) { 2269 audio_info->video_latency = drm_connector->video_latency[0]; 2270 audio_info->audio_latency = drm_connector->audio_latency[0]; 2271 } 2272 2273 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 2274 2275 } 2276 2277 static void 2278 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 2279 struct drm_display_mode *dst_mode) 2280 { 2281 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 2282 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 2283 dst_mode->crtc_clock = src_mode->crtc_clock; 2284 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 2285 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 2286 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 2287 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 2288 dst_mode->crtc_htotal = src_mode->crtc_htotal; 2289 dst_mode->crtc_hskew = src_mode->crtc_hskew; 2290 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 2291 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 2292 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 2293 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 2294 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 2295 } 2296 2297 static void 2298 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 2299 const struct drm_display_mode *native_mode, 2300 bool scale_enabled) 2301 { 2302 if (scale_enabled) { 2303 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 2304 } else if (native_mode->clock == drm_mode->clock && 2305 native_mode->htotal == drm_mode->htotal && 2306 native_mode->vtotal == drm_mode->vtotal) { 2307 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 2308 } else { 2309 /* no scaling nor amdgpu inserted, no need to patch */ 2310 } 2311 } 2312 2313 static int create_fake_sink(struct amdgpu_dm_connector *aconnector) 2314 { 2315 struct dc_sink *sink = NULL; 2316 struct dc_sink_init_data sink_init_data = { 0 }; 2317 2318 sink_init_data.link = aconnector->dc_link; 2319 sink_init_data.sink_signal = aconnector->dc_link->connector_signal; 2320 2321 sink = dc_sink_create(&sink_init_data); 2322 if (!sink) { 2323 DRM_ERROR("Failed to create sink!\n"); 2324 return -ENOMEM; 2325 } 2326 2327 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 2328 aconnector->fake_enable = true; 2329 2330 aconnector->dc_sink = sink; 2331 aconnector->dc_link->local_sink = sink; 2332 2333 return 0; 2334 } 2335 2336 static void set_multisync_trigger_params( 2337 struct dc_stream_state *stream) 2338 { 2339 if (stream->triggered_crtc_reset.enabled) { 2340 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING; 2341 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE; 2342 } 2343 } 2344 2345 static void set_master_stream(struct dc_stream_state *stream_set[], 2346 int stream_count) 2347 { 2348 int j, highest_rfr = 0, master_stream = 0; 2349 2350 for (j = 0; j < stream_count; j++) { 2351 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 2352 int refresh_rate = 0; 2353 2354 refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/ 2355 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 2356 if (refresh_rate > highest_rfr) { 2357 highest_rfr = refresh_rate; 2358 master_stream = j; 2359 } 2360 } 2361 } 2362 for (j = 0; j < stream_count; j++) { 2363 if (stream_set[j]) 2364 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 2365 } 2366 } 2367 2368 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 2369 { 2370 int i = 0; 2371 2372 if (context->stream_count < 2) 2373 return; 2374 for (i = 0; i < context->stream_count ; i++) { 2375 if (!context->streams[i]) 2376 continue; 2377 /* TODO: add a function to read AMD VSDB bits and will set 2378 * crtc_sync_master.multi_sync_enabled flag 2379 * For now its set to false 2380 */ 2381 set_multisync_trigger_params(context->streams[i]); 2382 } 2383 set_master_stream(context->streams, context->stream_count); 2384 } 2385 2386 static struct dc_stream_state * 2387 create_stream_for_sink(struct amdgpu_dm_connector *aconnector, 2388 const struct drm_display_mode *drm_mode, 2389 const struct dm_connector_state *dm_state) 2390 { 2391 struct drm_display_mode *preferred_mode = NULL; 2392 struct drm_connector *drm_connector; 2393 struct dc_stream_state *stream = NULL; 2394 struct drm_display_mode mode = *drm_mode; 2395 bool native_mode_found = false; 2396 2397 if (aconnector == NULL) { 2398 DRM_ERROR("aconnector is NULL!\n"); 2399 return stream; 2400 } 2401 2402 drm_connector = &aconnector->base; 2403 2404 if (!aconnector->dc_sink) { 2405 /* 2406 * Create dc_sink when necessary to MST 2407 * Don't apply fake_sink to MST 2408 */ 2409 if (aconnector->mst_port) { 2410 dm_dp_mst_dc_sink_create(drm_connector); 2411 return stream; 2412 } 2413 2414 if (create_fake_sink(aconnector)) 2415 return stream; 2416 } 2417 2418 stream = dc_create_stream_for_sink(aconnector->dc_sink); 2419 2420 if (stream == NULL) { 2421 DRM_ERROR("Failed to create stream for sink!\n"); 2422 return stream; 2423 } 2424 2425 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 2426 /* Search for preferred mode */ 2427 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 2428 native_mode_found = true; 2429 break; 2430 } 2431 } 2432 if (!native_mode_found) 2433 preferred_mode = list_first_entry_or_null( 2434 &aconnector->base.modes, 2435 struct drm_display_mode, 2436 head); 2437 2438 if (preferred_mode == NULL) { 2439 /* This may not be an error, the use case is when we we have no 2440 * usermode calls to reset and set mode upon hotplug. In this 2441 * case, we call set mode ourselves to restore the previous mode 2442 * and the modelist may not be filled in in time. 2443 */ 2444 DRM_DEBUG_DRIVER("No preferred mode found\n"); 2445 } else { 2446 decide_crtc_timing_for_drm_display_mode( 2447 &mode, preferred_mode, 2448 dm_state ? (dm_state->scaling != RMX_OFF) : false); 2449 } 2450 2451 fill_stream_properties_from_drm_display_mode(stream, 2452 &mode, &aconnector->base); 2453 update_stream_scaling_settings(&mode, dm_state, stream); 2454 2455 fill_audio_info( 2456 &stream->audio_info, 2457 drm_connector, 2458 aconnector->dc_sink); 2459 2460 return stream; 2461 } 2462 2463 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) 2464 { 2465 drm_crtc_cleanup(crtc); 2466 kfree(crtc); 2467 } 2468 2469 static void dm_crtc_destroy_state(struct drm_crtc *crtc, 2470 struct drm_crtc_state *state) 2471 { 2472 struct dm_crtc_state *cur = to_dm_crtc_state(state); 2473 2474 /* TODO Destroy dc_stream objects are stream object is flattened */ 2475 if (cur->stream) 2476 dc_stream_release(cur->stream); 2477 2478 2479 __drm_atomic_helper_crtc_destroy_state(state); 2480 2481 2482 kfree(state); 2483 } 2484 2485 static void dm_crtc_reset_state(struct drm_crtc *crtc) 2486 { 2487 struct dm_crtc_state *state; 2488 2489 if (crtc->state) 2490 dm_crtc_destroy_state(crtc, crtc->state); 2491 2492 state = kzalloc(sizeof(*state), GFP_KERNEL); 2493 if (WARN_ON(!state)) 2494 return; 2495 2496 crtc->state = &state->base; 2497 crtc->state->crtc = crtc; 2498 2499 } 2500 2501 static struct drm_crtc_state * 2502 dm_crtc_duplicate_state(struct drm_crtc *crtc) 2503 { 2504 struct dm_crtc_state *state, *cur; 2505 2506 cur = to_dm_crtc_state(crtc->state); 2507 2508 if (WARN_ON(!crtc->state)) 2509 return NULL; 2510 2511 state = kzalloc(sizeof(*state), GFP_KERNEL); 2512 if (!state) 2513 return NULL; 2514 2515 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); 2516 2517 if (cur->stream) { 2518 state->stream = cur->stream; 2519 dc_stream_retain(state->stream); 2520 } 2521 2522 /* TODO Duplicate dc_stream after objects are stream object is flattened */ 2523 2524 return &state->base; 2525 } 2526 2527 /* Implemented only the options currently availible for the driver */ 2528 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { 2529 .reset = dm_crtc_reset_state, 2530 .destroy = amdgpu_dm_crtc_destroy, 2531 .gamma_set = drm_atomic_helper_legacy_gamma_set, 2532 .set_config = drm_atomic_helper_set_config, 2533 .page_flip = drm_atomic_helper_page_flip, 2534 .atomic_duplicate_state = dm_crtc_duplicate_state, 2535 .atomic_destroy_state = dm_crtc_destroy_state, 2536 .set_crc_source = amdgpu_dm_crtc_set_crc_source, 2537 }; 2538 2539 static enum drm_connector_status 2540 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 2541 { 2542 bool connected; 2543 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 2544 2545 /* Notes: 2546 * 1. This interface is NOT called in context of HPD irq. 2547 * 2. This interface *is called* in context of user-mode ioctl. Which 2548 * makes it a bad place for *any* MST-related activit. */ 2549 2550 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && 2551 !aconnector->fake_enable) 2552 connected = (aconnector->dc_sink != NULL); 2553 else 2554 connected = (aconnector->base.force == DRM_FORCE_ON); 2555 2556 return (connected ? connector_status_connected : 2557 connector_status_disconnected); 2558 } 2559 2560 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 2561 struct drm_connector_state *connector_state, 2562 struct drm_property *property, 2563 uint64_t val) 2564 { 2565 struct drm_device *dev = connector->dev; 2566 struct amdgpu_device *adev = dev->dev_private; 2567 struct dm_connector_state *dm_old_state = 2568 to_dm_connector_state(connector->state); 2569 struct dm_connector_state *dm_new_state = 2570 to_dm_connector_state(connector_state); 2571 2572 int ret = -EINVAL; 2573 2574 if (property == dev->mode_config.scaling_mode_property) { 2575 enum amdgpu_rmx_type rmx_type; 2576 2577 switch (val) { 2578 case DRM_MODE_SCALE_CENTER: 2579 rmx_type = RMX_CENTER; 2580 break; 2581 case DRM_MODE_SCALE_ASPECT: 2582 rmx_type = RMX_ASPECT; 2583 break; 2584 case DRM_MODE_SCALE_FULLSCREEN: 2585 rmx_type = RMX_FULL; 2586 break; 2587 case DRM_MODE_SCALE_NONE: 2588 default: 2589 rmx_type = RMX_OFF; 2590 break; 2591 } 2592 2593 if (dm_old_state->scaling == rmx_type) 2594 return 0; 2595 2596 dm_new_state->scaling = rmx_type; 2597 ret = 0; 2598 } else if (property == adev->mode_info.underscan_hborder_property) { 2599 dm_new_state->underscan_hborder = val; 2600 ret = 0; 2601 } else if (property == adev->mode_info.underscan_vborder_property) { 2602 dm_new_state->underscan_vborder = val; 2603 ret = 0; 2604 } else if (property == adev->mode_info.underscan_property) { 2605 dm_new_state->underscan_enable = val; 2606 ret = 0; 2607 } 2608 2609 return ret; 2610 } 2611 2612 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 2613 const struct drm_connector_state *state, 2614 struct drm_property *property, 2615 uint64_t *val) 2616 { 2617 struct drm_device *dev = connector->dev; 2618 struct amdgpu_device *adev = dev->dev_private; 2619 struct dm_connector_state *dm_state = 2620 to_dm_connector_state(state); 2621 int ret = -EINVAL; 2622 2623 if (property == dev->mode_config.scaling_mode_property) { 2624 switch (dm_state->scaling) { 2625 case RMX_CENTER: 2626 *val = DRM_MODE_SCALE_CENTER; 2627 break; 2628 case RMX_ASPECT: 2629 *val = DRM_MODE_SCALE_ASPECT; 2630 break; 2631 case RMX_FULL: 2632 *val = DRM_MODE_SCALE_FULLSCREEN; 2633 break; 2634 case RMX_OFF: 2635 default: 2636 *val = DRM_MODE_SCALE_NONE; 2637 break; 2638 } 2639 ret = 0; 2640 } else if (property == adev->mode_info.underscan_hborder_property) { 2641 *val = dm_state->underscan_hborder; 2642 ret = 0; 2643 } else if (property == adev->mode_info.underscan_vborder_property) { 2644 *val = dm_state->underscan_vborder; 2645 ret = 0; 2646 } else if (property == adev->mode_info.underscan_property) { 2647 *val = dm_state->underscan_enable; 2648 ret = 0; 2649 } 2650 return ret; 2651 } 2652 2653 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 2654 { 2655 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 2656 const struct dc_link *link = aconnector->dc_link; 2657 struct amdgpu_device *adev = connector->dev->dev_private; 2658 struct amdgpu_display_manager *dm = &adev->dm; 2659 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 2660 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 2661 2662 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) { 2663 amdgpu_dm_register_backlight_device(dm); 2664 2665 if (dm->backlight_dev) { 2666 backlight_device_unregister(dm->backlight_dev); 2667 dm->backlight_dev = NULL; 2668 } 2669 2670 } 2671 #endif 2672 drm_connector_unregister(connector); 2673 drm_connector_cleanup(connector); 2674 kfree(connector); 2675 } 2676 2677 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 2678 { 2679 struct dm_connector_state *state = 2680 to_dm_connector_state(connector->state); 2681 2682 kfree(state); 2683 2684 state = kzalloc(sizeof(*state), GFP_KERNEL); 2685 2686 if (state) { 2687 state->scaling = RMX_OFF; 2688 state->underscan_enable = false; 2689 state->underscan_hborder = 0; 2690 state->underscan_vborder = 0; 2691 2692 connector->state = &state->base; 2693 connector->state->connector = connector; 2694 } 2695 } 2696 2697 struct drm_connector_state * 2698 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 2699 { 2700 struct dm_connector_state *state = 2701 to_dm_connector_state(connector->state); 2702 2703 struct dm_connector_state *new_state = 2704 kmemdup(state, sizeof(*state), GFP_KERNEL); 2705 2706 if (new_state) { 2707 __drm_atomic_helper_connector_duplicate_state(connector, 2708 &new_state->base); 2709 return &new_state->base; 2710 } 2711 2712 return NULL; 2713 } 2714 2715 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 2716 .reset = amdgpu_dm_connector_funcs_reset, 2717 .detect = amdgpu_dm_connector_detect, 2718 .fill_modes = drm_helper_probe_single_connector_modes, 2719 .destroy = amdgpu_dm_connector_destroy, 2720 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 2721 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 2722 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 2723 .atomic_get_property = amdgpu_dm_connector_atomic_get_property 2724 }; 2725 2726 static struct drm_encoder *best_encoder(struct drm_connector *connector) 2727 { 2728 int enc_id = connector->encoder_ids[0]; 2729 struct drm_mode_object *obj; 2730 struct drm_encoder *encoder; 2731 2732 DRM_DEBUG_DRIVER("Finding the best encoder\n"); 2733 2734 /* pick the encoder ids */ 2735 if (enc_id) { 2736 obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER); 2737 if (!obj) { 2738 DRM_ERROR("Couldn't find a matching encoder for our connector\n"); 2739 return NULL; 2740 } 2741 encoder = obj_to_encoder(obj); 2742 return encoder; 2743 } 2744 DRM_ERROR("No encoder id\n"); 2745 return NULL; 2746 } 2747 2748 static int get_modes(struct drm_connector *connector) 2749 { 2750 return amdgpu_dm_connector_get_modes(connector); 2751 } 2752 2753 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 2754 { 2755 struct dc_sink_init_data init_params = { 2756 .link = aconnector->dc_link, 2757 .sink_signal = SIGNAL_TYPE_VIRTUAL 2758 }; 2759 struct edid *edid; 2760 2761 if (!aconnector->base.edid_blob_ptr) { 2762 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", 2763 aconnector->base.name); 2764 2765 aconnector->base.force = DRM_FORCE_OFF; 2766 aconnector->base.override_edid = false; 2767 return; 2768 } 2769 2770 edid = (struct edid *) aconnector->base.edid_blob_ptr->data; 2771 2772 aconnector->edid = edid; 2773 2774 aconnector->dc_em_sink = dc_link_add_remote_sink( 2775 aconnector->dc_link, 2776 (uint8_t *)edid, 2777 (edid->extensions + 1) * EDID_LENGTH, 2778 &init_params); 2779 2780 if (aconnector->base.force == DRM_FORCE_ON) 2781 aconnector->dc_sink = aconnector->dc_link->local_sink ? 2782 aconnector->dc_link->local_sink : 2783 aconnector->dc_em_sink; 2784 } 2785 2786 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 2787 { 2788 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 2789 2790 /* In case of headless boot with force on for DP managed connector 2791 * Those settings have to be != 0 to get initial modeset 2792 */ 2793 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 2794 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 2795 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 2796 } 2797 2798 2799 aconnector->base.override_edid = true; 2800 create_eml_sink(aconnector); 2801 } 2802 2803 int amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 2804 struct drm_display_mode *mode) 2805 { 2806 int result = MODE_ERROR; 2807 struct dc_sink *dc_sink; 2808 struct amdgpu_device *adev = connector->dev->dev_private; 2809 /* TODO: Unhardcode stream count */ 2810 struct dc_stream_state *stream; 2811 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 2812 enum dc_status dc_result = DC_OK; 2813 2814 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 2815 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 2816 return result; 2817 2818 /* Only run this the first time mode_valid is called to initilialize 2819 * EDID mgmt 2820 */ 2821 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 2822 !aconnector->dc_em_sink) 2823 handle_edid_mgmt(aconnector); 2824 2825 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 2826 2827 if (dc_sink == NULL) { 2828 DRM_ERROR("dc_sink is NULL!\n"); 2829 goto fail; 2830 } 2831 2832 stream = create_stream_for_sink(aconnector, mode, NULL); 2833 if (stream == NULL) { 2834 DRM_ERROR("Failed to create stream for sink!\n"); 2835 goto fail; 2836 } 2837 2838 drm_mode_set_crtcinfo(mode, 0); 2839 fill_stream_properties_from_drm_display_mode(stream, mode, connector); 2840 2841 stream->src.width = mode->hdisplay; 2842 stream->src.height = mode->vdisplay; 2843 stream->dst = stream->src; 2844 2845 dc_result = dc_validate_stream(adev->dm.dc, stream); 2846 2847 if (dc_result == DC_OK) 2848 result = MODE_OK; 2849 else 2850 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n", 2851 mode->vdisplay, 2852 mode->hdisplay, 2853 mode->clock, 2854 dc_result); 2855 2856 dc_stream_release(stream); 2857 2858 fail: 2859 /* TODO: error handling*/ 2860 return result; 2861 } 2862 2863 static const struct drm_connector_helper_funcs 2864 amdgpu_dm_connector_helper_funcs = { 2865 /* 2866 * If hotplug a second bigger display in FB Con mode, bigger resolution 2867 * modes will be filtered by drm_mode_validate_size(), and those modes 2868 * is missing after user start lightdm. So we need to renew modes list. 2869 * in get_modes call back, not just return the modes count 2870 */ 2871 .get_modes = get_modes, 2872 .mode_valid = amdgpu_dm_connector_mode_valid, 2873 .best_encoder = best_encoder 2874 }; 2875 2876 static void dm_crtc_helper_disable(struct drm_crtc *crtc) 2877 { 2878 } 2879 2880 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, 2881 struct drm_crtc_state *state) 2882 { 2883 struct amdgpu_device *adev = crtc->dev->dev_private; 2884 struct dc *dc = adev->dm.dc; 2885 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state); 2886 int ret = -EINVAL; 2887 2888 if (unlikely(!dm_crtc_state->stream && 2889 modeset_required(state, NULL, dm_crtc_state->stream))) { 2890 WARN_ON(1); 2891 return ret; 2892 } 2893 2894 /* In some use cases, like reset, no stream is attached */ 2895 if (!dm_crtc_state->stream) 2896 return 0; 2897 2898 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) 2899 return 0; 2900 2901 return ret; 2902 } 2903 2904 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, 2905 const struct drm_display_mode *mode, 2906 struct drm_display_mode *adjusted_mode) 2907 { 2908 return true; 2909 } 2910 2911 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { 2912 .disable = dm_crtc_helper_disable, 2913 .atomic_check = dm_crtc_helper_atomic_check, 2914 .mode_fixup = dm_crtc_helper_mode_fixup 2915 }; 2916 2917 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 2918 { 2919 2920 } 2921 2922 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 2923 struct drm_crtc_state *crtc_state, 2924 struct drm_connector_state *conn_state) 2925 { 2926 return 0; 2927 } 2928 2929 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 2930 .disable = dm_encoder_helper_disable, 2931 .atomic_check = dm_encoder_helper_atomic_check 2932 }; 2933 2934 static void dm_drm_plane_reset(struct drm_plane *plane) 2935 { 2936 struct dm_plane_state *amdgpu_state = NULL; 2937 2938 if (plane->state) 2939 plane->funcs->atomic_destroy_state(plane, plane->state); 2940 2941 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); 2942 WARN_ON(amdgpu_state == NULL); 2943 2944 if (amdgpu_state) { 2945 plane->state = &amdgpu_state->base; 2946 plane->state->plane = plane; 2947 plane->state->rotation = DRM_MODE_ROTATE_0; 2948 } 2949 } 2950 2951 static struct drm_plane_state * 2952 dm_drm_plane_duplicate_state(struct drm_plane *plane) 2953 { 2954 struct dm_plane_state *dm_plane_state, *old_dm_plane_state; 2955 2956 old_dm_plane_state = to_dm_plane_state(plane->state); 2957 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); 2958 if (!dm_plane_state) 2959 return NULL; 2960 2961 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); 2962 2963 if (old_dm_plane_state->dc_state) { 2964 dm_plane_state->dc_state = old_dm_plane_state->dc_state; 2965 dc_plane_state_retain(dm_plane_state->dc_state); 2966 } 2967 2968 return &dm_plane_state->base; 2969 } 2970 2971 void dm_drm_plane_destroy_state(struct drm_plane *plane, 2972 struct drm_plane_state *state) 2973 { 2974 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 2975 2976 if (dm_plane_state->dc_state) 2977 dc_plane_state_release(dm_plane_state->dc_state); 2978 2979 drm_atomic_helper_plane_destroy_state(plane, state); 2980 } 2981 2982 static const struct drm_plane_funcs dm_plane_funcs = { 2983 .update_plane = drm_atomic_helper_update_plane, 2984 .disable_plane = drm_atomic_helper_disable_plane, 2985 .destroy = drm_plane_cleanup, 2986 .reset = dm_drm_plane_reset, 2987 .atomic_duplicate_state = dm_drm_plane_duplicate_state, 2988 .atomic_destroy_state = dm_drm_plane_destroy_state, 2989 }; 2990 2991 static int dm_plane_helper_prepare_fb(struct drm_plane *plane, 2992 struct drm_plane_state *new_state) 2993 { 2994 struct amdgpu_framebuffer *afb; 2995 struct drm_gem_object *obj; 2996 struct amdgpu_device *adev; 2997 struct amdgpu_bo *rbo; 2998 uint64_t chroma_addr = 0; 2999 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; 3000 unsigned int awidth; 3001 uint32_t domain; 3002 int r; 3003 3004 dm_plane_state_old = to_dm_plane_state(plane->state); 3005 dm_plane_state_new = to_dm_plane_state(new_state); 3006 3007 if (!new_state->fb) { 3008 DRM_DEBUG_DRIVER("No FB bound\n"); 3009 return 0; 3010 } 3011 3012 afb = to_amdgpu_framebuffer(new_state->fb); 3013 3014 obj = afb->obj; 3015 rbo = gem_to_amdgpu_bo(obj); 3016 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 3017 r = amdgpu_bo_reserve(rbo, false); 3018 if (unlikely(r != 0)) 3019 return r; 3020 3021 if (plane->type != DRM_PLANE_TYPE_CURSOR) 3022 domain = amdgpu_display_framebuffer_domains(adev); 3023 else 3024 domain = AMDGPU_GEM_DOMAIN_VRAM; 3025 3026 r = amdgpu_bo_pin(rbo, domain, &afb->address); 3027 3028 amdgpu_bo_unreserve(rbo); 3029 3030 if (unlikely(r != 0)) { 3031 if (r != -ERESTARTSYS) 3032 DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 3033 return r; 3034 } 3035 3036 amdgpu_bo_ref(rbo); 3037 3038 if (dm_plane_state_new->dc_state && 3039 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { 3040 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; 3041 3042 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 3043 plane_state->address.grph.addr.low_part = lower_32_bits(afb->address); 3044 plane_state->address.grph.addr.high_part = upper_32_bits(afb->address); 3045 } else { 3046 awidth = ALIGN(new_state->fb->width, 64); 3047 plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; 3048 plane_state->address.video_progressive.luma_addr.low_part 3049 = lower_32_bits(afb->address); 3050 plane_state->address.video_progressive.luma_addr.high_part 3051 = upper_32_bits(afb->address); 3052 chroma_addr = afb->address + (u64)awidth * new_state->fb->height; 3053 plane_state->address.video_progressive.chroma_addr.low_part 3054 = lower_32_bits(chroma_addr); 3055 plane_state->address.video_progressive.chroma_addr.high_part 3056 = upper_32_bits(chroma_addr); 3057 } 3058 } 3059 3060 /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer 3061 * prepare and cleanup in drm_atomic_helper_prepare_planes 3062 * and drm_atomic_helper_cleanup_planes because fb doens't in s3. 3063 * IN 4.10 kernel this code should be removed and amdgpu_device_suspend 3064 * code touching fram buffers should be avoided for DC. 3065 */ 3066 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 3067 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc); 3068 3069 acrtc->cursor_bo = obj; 3070 } 3071 return 0; 3072 } 3073 3074 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, 3075 struct drm_plane_state *old_state) 3076 { 3077 struct amdgpu_bo *rbo; 3078 struct amdgpu_framebuffer *afb; 3079 int r; 3080 3081 if (!old_state->fb) 3082 return; 3083 3084 afb = to_amdgpu_framebuffer(old_state->fb); 3085 rbo = gem_to_amdgpu_bo(afb->obj); 3086 r = amdgpu_bo_reserve(rbo, false); 3087 if (unlikely(r)) { 3088 DRM_ERROR("failed to reserve rbo before unpin\n"); 3089 return; 3090 } 3091 3092 amdgpu_bo_unpin(rbo); 3093 amdgpu_bo_unreserve(rbo); 3094 amdgpu_bo_unref(&rbo); 3095 } 3096 3097 static int dm_plane_atomic_check(struct drm_plane *plane, 3098 struct drm_plane_state *state) 3099 { 3100 struct amdgpu_device *adev = plane->dev->dev_private; 3101 struct dc *dc = adev->dm.dc; 3102 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 3103 3104 if (!dm_plane_state->dc_state) 3105 return 0; 3106 3107 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 3108 return 0; 3109 3110 return -EINVAL; 3111 } 3112 3113 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { 3114 .prepare_fb = dm_plane_helper_prepare_fb, 3115 .cleanup_fb = dm_plane_helper_cleanup_fb, 3116 .atomic_check = dm_plane_atomic_check, 3117 }; 3118 3119 /* 3120 * TODO: these are currently initialized to rgb formats only. 3121 * For future use cases we should either initialize them dynamically based on 3122 * plane capabilities, or initialize this array to all formats, so internal drm 3123 * check will succeed, and let DC to implement proper check 3124 */ 3125 static const uint32_t rgb_formats[] = { 3126 DRM_FORMAT_RGB888, 3127 DRM_FORMAT_XRGB8888, 3128 DRM_FORMAT_ARGB8888, 3129 DRM_FORMAT_RGBA8888, 3130 DRM_FORMAT_XRGB2101010, 3131 DRM_FORMAT_XBGR2101010, 3132 DRM_FORMAT_ARGB2101010, 3133 DRM_FORMAT_ABGR2101010, 3134 }; 3135 3136 static const uint32_t yuv_formats[] = { 3137 DRM_FORMAT_NV12, 3138 DRM_FORMAT_NV21, 3139 }; 3140 3141 static const u32 cursor_formats[] = { 3142 DRM_FORMAT_ARGB8888 3143 }; 3144 3145 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 3146 struct amdgpu_plane *aplane, 3147 unsigned long possible_crtcs) 3148 { 3149 int res = -EPERM; 3150 3151 switch (aplane->base.type) { 3152 case DRM_PLANE_TYPE_PRIMARY: 3153 aplane->base.format_default = true; 3154 3155 res = drm_universal_plane_init( 3156 dm->adev->ddev, 3157 &aplane->base, 3158 possible_crtcs, 3159 &dm_plane_funcs, 3160 rgb_formats, 3161 ARRAY_SIZE(rgb_formats), 3162 NULL, aplane->base.type, NULL); 3163 break; 3164 case DRM_PLANE_TYPE_OVERLAY: 3165 res = drm_universal_plane_init( 3166 dm->adev->ddev, 3167 &aplane->base, 3168 possible_crtcs, 3169 &dm_plane_funcs, 3170 yuv_formats, 3171 ARRAY_SIZE(yuv_formats), 3172 NULL, aplane->base.type, NULL); 3173 break; 3174 case DRM_PLANE_TYPE_CURSOR: 3175 res = drm_universal_plane_init( 3176 dm->adev->ddev, 3177 &aplane->base, 3178 possible_crtcs, 3179 &dm_plane_funcs, 3180 cursor_formats, 3181 ARRAY_SIZE(cursor_formats), 3182 NULL, aplane->base.type, NULL); 3183 break; 3184 } 3185 3186 drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs); 3187 3188 /* Create (reset) the plane state */ 3189 if (aplane->base.funcs->reset) 3190 aplane->base.funcs->reset(&aplane->base); 3191 3192 3193 return res; 3194 } 3195 3196 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 3197 struct drm_plane *plane, 3198 uint32_t crtc_index) 3199 { 3200 struct amdgpu_crtc *acrtc = NULL; 3201 struct amdgpu_plane *cursor_plane; 3202 3203 int res = -ENOMEM; 3204 3205 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); 3206 if (!cursor_plane) 3207 goto fail; 3208 3209 cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR; 3210 res = amdgpu_dm_plane_init(dm, cursor_plane, 0); 3211 3212 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); 3213 if (!acrtc) 3214 goto fail; 3215 3216 res = drm_crtc_init_with_planes( 3217 dm->ddev, 3218 &acrtc->base, 3219 plane, 3220 &cursor_plane->base, 3221 &amdgpu_dm_crtc_funcs, NULL); 3222 3223 if (res) 3224 goto fail; 3225 3226 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); 3227 3228 /* Create (reset) the plane state */ 3229 if (acrtc->base.funcs->reset) 3230 acrtc->base.funcs->reset(&acrtc->base); 3231 3232 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size; 3233 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size; 3234 3235 acrtc->crtc_id = crtc_index; 3236 acrtc->base.enabled = false; 3237 3238 dm->adev->mode_info.crtcs[crtc_index] = acrtc; 3239 drm_mode_crtc_set_gamma_size(&acrtc->base, 256); 3240 3241 return 0; 3242 3243 fail: 3244 kfree(acrtc); 3245 kfree(cursor_plane); 3246 return res; 3247 } 3248 3249 3250 static int to_drm_connector_type(enum signal_type st) 3251 { 3252 switch (st) { 3253 case SIGNAL_TYPE_HDMI_TYPE_A: 3254 return DRM_MODE_CONNECTOR_HDMIA; 3255 case SIGNAL_TYPE_EDP: 3256 return DRM_MODE_CONNECTOR_eDP; 3257 case SIGNAL_TYPE_RGB: 3258 return DRM_MODE_CONNECTOR_VGA; 3259 case SIGNAL_TYPE_DISPLAY_PORT: 3260 case SIGNAL_TYPE_DISPLAY_PORT_MST: 3261 return DRM_MODE_CONNECTOR_DisplayPort; 3262 case SIGNAL_TYPE_DVI_DUAL_LINK: 3263 case SIGNAL_TYPE_DVI_SINGLE_LINK: 3264 return DRM_MODE_CONNECTOR_DVID; 3265 case SIGNAL_TYPE_VIRTUAL: 3266 return DRM_MODE_CONNECTOR_VIRTUAL; 3267 3268 default: 3269 return DRM_MODE_CONNECTOR_Unknown; 3270 } 3271 } 3272 3273 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 3274 { 3275 const struct drm_connector_helper_funcs *helper = 3276 connector->helper_private; 3277 struct drm_encoder *encoder; 3278 struct amdgpu_encoder *amdgpu_encoder; 3279 3280 encoder = helper->best_encoder(connector); 3281 3282 if (encoder == NULL) 3283 return; 3284 3285 amdgpu_encoder = to_amdgpu_encoder(encoder); 3286 3287 amdgpu_encoder->native_mode.clock = 0; 3288 3289 if (!list_empty(&connector->probed_modes)) { 3290 struct drm_display_mode *preferred_mode = NULL; 3291 3292 list_for_each_entry(preferred_mode, 3293 &connector->probed_modes, 3294 head) { 3295 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 3296 amdgpu_encoder->native_mode = *preferred_mode; 3297 3298 break; 3299 } 3300 3301 } 3302 } 3303 3304 static struct drm_display_mode * 3305 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 3306 char *name, 3307 int hdisplay, int vdisplay) 3308 { 3309 struct drm_device *dev = encoder->dev; 3310 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3311 struct drm_display_mode *mode = NULL; 3312 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 3313 3314 mode = drm_mode_duplicate(dev, native_mode); 3315 3316 if (mode == NULL) 3317 return NULL; 3318 3319 mode->hdisplay = hdisplay; 3320 mode->vdisplay = vdisplay; 3321 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 3322 strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 3323 3324 return mode; 3325 3326 } 3327 3328 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 3329 struct drm_connector *connector) 3330 { 3331 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3332 struct drm_display_mode *mode = NULL; 3333 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 3334 struct amdgpu_dm_connector *amdgpu_dm_connector = 3335 to_amdgpu_dm_connector(connector); 3336 int i; 3337 int n; 3338 struct mode_size { 3339 char name[DRM_DISPLAY_MODE_LEN]; 3340 int w; 3341 int h; 3342 } common_modes[] = { 3343 { "640x480", 640, 480}, 3344 { "800x600", 800, 600}, 3345 { "1024x768", 1024, 768}, 3346 { "1280x720", 1280, 720}, 3347 { "1280x800", 1280, 800}, 3348 {"1280x1024", 1280, 1024}, 3349 { "1440x900", 1440, 900}, 3350 {"1680x1050", 1680, 1050}, 3351 {"1600x1200", 1600, 1200}, 3352 {"1920x1080", 1920, 1080}, 3353 {"1920x1200", 1920, 1200} 3354 }; 3355 3356 n = ARRAY_SIZE(common_modes); 3357 3358 for (i = 0; i < n; i++) { 3359 struct drm_display_mode *curmode = NULL; 3360 bool mode_existed = false; 3361 3362 if (common_modes[i].w > native_mode->hdisplay || 3363 common_modes[i].h > native_mode->vdisplay || 3364 (common_modes[i].w == native_mode->hdisplay && 3365 common_modes[i].h == native_mode->vdisplay)) 3366 continue; 3367 3368 list_for_each_entry(curmode, &connector->probed_modes, head) { 3369 if (common_modes[i].w == curmode->hdisplay && 3370 common_modes[i].h == curmode->vdisplay) { 3371 mode_existed = true; 3372 break; 3373 } 3374 } 3375 3376 if (mode_existed) 3377 continue; 3378 3379 mode = amdgpu_dm_create_common_mode(encoder, 3380 common_modes[i].name, common_modes[i].w, 3381 common_modes[i].h); 3382 drm_mode_probed_add(connector, mode); 3383 amdgpu_dm_connector->num_modes++; 3384 } 3385 } 3386 3387 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 3388 struct edid *edid) 3389 { 3390 struct amdgpu_dm_connector *amdgpu_dm_connector = 3391 to_amdgpu_dm_connector(connector); 3392 3393 if (edid) { 3394 /* empty probed_modes */ 3395 INIT_LIST_HEAD(&connector->probed_modes); 3396 amdgpu_dm_connector->num_modes = 3397 drm_add_edid_modes(connector, edid); 3398 3399 amdgpu_dm_get_native_mode(connector); 3400 } else { 3401 amdgpu_dm_connector->num_modes = 0; 3402 } 3403 } 3404 3405 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 3406 { 3407 const struct drm_connector_helper_funcs *helper = 3408 connector->helper_private; 3409 struct amdgpu_dm_connector *amdgpu_dm_connector = 3410 to_amdgpu_dm_connector(connector); 3411 struct drm_encoder *encoder; 3412 struct edid *edid = amdgpu_dm_connector->edid; 3413 3414 encoder = helper->best_encoder(connector); 3415 3416 amdgpu_dm_connector_ddc_get_modes(connector, edid); 3417 amdgpu_dm_connector_add_common_modes(encoder, connector); 3418 return amdgpu_dm_connector->num_modes; 3419 } 3420 3421 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 3422 struct amdgpu_dm_connector *aconnector, 3423 int connector_type, 3424 struct dc_link *link, 3425 int link_index) 3426 { 3427 struct amdgpu_device *adev = dm->ddev->dev_private; 3428 3429 aconnector->connector_id = link_index; 3430 aconnector->dc_link = link; 3431 aconnector->base.interlace_allowed = false; 3432 aconnector->base.doublescan_allowed = false; 3433 aconnector->base.stereo_allowed = false; 3434 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 3435 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 3436 3437 mutex_init(&aconnector->hpd_lock); 3438 3439 /* configure support HPD hot plug connector_>polled default value is 0 3440 * which means HPD hot plug not supported 3441 */ 3442 switch (connector_type) { 3443 case DRM_MODE_CONNECTOR_HDMIA: 3444 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 3445 break; 3446 case DRM_MODE_CONNECTOR_DisplayPort: 3447 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 3448 break; 3449 case DRM_MODE_CONNECTOR_DVID: 3450 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 3451 break; 3452 default: 3453 break; 3454 } 3455 3456 drm_object_attach_property(&aconnector->base.base, 3457 dm->ddev->mode_config.scaling_mode_property, 3458 DRM_MODE_SCALE_NONE); 3459 3460 drm_object_attach_property(&aconnector->base.base, 3461 adev->mode_info.underscan_property, 3462 UNDERSCAN_OFF); 3463 drm_object_attach_property(&aconnector->base.base, 3464 adev->mode_info.underscan_hborder_property, 3465 0); 3466 drm_object_attach_property(&aconnector->base.base, 3467 adev->mode_info.underscan_vborder_property, 3468 0); 3469 3470 } 3471 3472 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 3473 struct i2c_msg *msgs, int num) 3474 { 3475 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 3476 struct ddc_service *ddc_service = i2c->ddc_service; 3477 struct i2c_command cmd; 3478 int i; 3479 int result = -EIO; 3480 3481 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); 3482 3483 if (!cmd.payloads) 3484 return result; 3485 3486 cmd.number_of_payloads = num; 3487 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 3488 cmd.speed = 100; 3489 3490 for (i = 0; i < num; i++) { 3491 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 3492 cmd.payloads[i].address = msgs[i].addr; 3493 cmd.payloads[i].length = msgs[i].len; 3494 cmd.payloads[i].data = msgs[i].buf; 3495 } 3496 3497 if (dal_i2caux_submit_i2c_command( 3498 ddc_service->ctx->i2caux, 3499 ddc_service->ddc_pin, 3500 &cmd)) 3501 result = num; 3502 3503 kfree(cmd.payloads); 3504 return result; 3505 } 3506 3507 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 3508 { 3509 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 3510 } 3511 3512 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 3513 .master_xfer = amdgpu_dm_i2c_xfer, 3514 .functionality = amdgpu_dm_i2c_func, 3515 }; 3516 3517 static struct amdgpu_i2c_adapter * 3518 create_i2c(struct ddc_service *ddc_service, 3519 int link_index, 3520 int *res) 3521 { 3522 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 3523 struct amdgpu_i2c_adapter *i2c; 3524 3525 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); 3526 if (!i2c) 3527 return NULL; 3528 i2c->base.owner = THIS_MODULE; 3529 i2c->base.class = I2C_CLASS_DDC; 3530 i2c->base.dev.parent = &adev->pdev->dev; 3531 i2c->base.algo = &amdgpu_dm_i2c_algo; 3532 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); 3533 i2c_set_adapdata(&i2c->base, i2c); 3534 i2c->ddc_service = ddc_service; 3535 3536 return i2c; 3537 } 3538 3539 /* Note: this function assumes that dc_link_detect() was called for the 3540 * dc_link which will be represented by this aconnector. 3541 */ 3542 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 3543 struct amdgpu_dm_connector *aconnector, 3544 uint32_t link_index, 3545 struct amdgpu_encoder *aencoder) 3546 { 3547 int res = 0; 3548 int connector_type; 3549 struct dc *dc = dm->dc; 3550 struct dc_link *link = dc_get_link_at_index(dc, link_index); 3551 struct amdgpu_i2c_adapter *i2c; 3552 3553 link->priv = aconnector; 3554 3555 DRM_DEBUG_DRIVER("%s()\n", __func__); 3556 3557 i2c = create_i2c(link->ddc, link->link_index, &res); 3558 if (!i2c) { 3559 DRM_ERROR("Failed to create i2c adapter data\n"); 3560 return -ENOMEM; 3561 } 3562 3563 aconnector->i2c = i2c; 3564 res = i2c_add_adapter(&i2c->base); 3565 3566 if (res) { 3567 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); 3568 goto out_free; 3569 } 3570 3571 connector_type = to_drm_connector_type(link->connector_signal); 3572 3573 res = drm_connector_init( 3574 dm->ddev, 3575 &aconnector->base, 3576 &amdgpu_dm_connector_funcs, 3577 connector_type); 3578 3579 if (res) { 3580 DRM_ERROR("connector_init failed\n"); 3581 aconnector->connector_id = -1; 3582 goto out_free; 3583 } 3584 3585 drm_connector_helper_add( 3586 &aconnector->base, 3587 &amdgpu_dm_connector_helper_funcs); 3588 3589 if (aconnector->base.funcs->reset) 3590 aconnector->base.funcs->reset(&aconnector->base); 3591 3592 amdgpu_dm_connector_init_helper( 3593 dm, 3594 aconnector, 3595 connector_type, 3596 link, 3597 link_index); 3598 3599 drm_mode_connector_attach_encoder( 3600 &aconnector->base, &aencoder->base); 3601 3602 drm_connector_register(&aconnector->base); 3603 3604 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 3605 || connector_type == DRM_MODE_CONNECTOR_eDP) 3606 amdgpu_dm_initialize_dp_connector(dm, aconnector); 3607 3608 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 3609 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 3610 3611 /* NOTE: this currently will create backlight device even if a panel 3612 * is not connected to the eDP/LVDS connector. 3613 * 3614 * This is less than ideal but we don't have sink information at this 3615 * stage since detection happens after. We can't do detection earlier 3616 * since MST detection needs connectors to be created first. 3617 */ 3618 if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) { 3619 /* Event if registration failed, we should continue with 3620 * DM initialization because not having a backlight control 3621 * is better then a black screen. 3622 */ 3623 amdgpu_dm_register_backlight_device(dm); 3624 3625 if (dm->backlight_dev) 3626 dm->backlight_link = link; 3627 } 3628 #endif 3629 3630 out_free: 3631 if (res) { 3632 kfree(i2c); 3633 aconnector->i2c = NULL; 3634 } 3635 return res; 3636 } 3637 3638 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 3639 { 3640 switch (adev->mode_info.num_crtc) { 3641 case 1: 3642 return 0x1; 3643 case 2: 3644 return 0x3; 3645 case 3: 3646 return 0x7; 3647 case 4: 3648 return 0xf; 3649 case 5: 3650 return 0x1f; 3651 case 6: 3652 default: 3653 return 0x3f; 3654 } 3655 } 3656 3657 static int amdgpu_dm_encoder_init(struct drm_device *dev, 3658 struct amdgpu_encoder *aencoder, 3659 uint32_t link_index) 3660 { 3661 struct amdgpu_device *adev = dev->dev_private; 3662 3663 int res = drm_encoder_init(dev, 3664 &aencoder->base, 3665 &amdgpu_dm_encoder_funcs, 3666 DRM_MODE_ENCODER_TMDS, 3667 NULL); 3668 3669 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 3670 3671 if (!res) 3672 aencoder->encoder_id = link_index; 3673 else 3674 aencoder->encoder_id = -1; 3675 3676 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 3677 3678 return res; 3679 } 3680 3681 static void manage_dm_interrupts(struct amdgpu_device *adev, 3682 struct amdgpu_crtc *acrtc, 3683 bool enable) 3684 { 3685 /* 3686 * this is not correct translation but will work as soon as VBLANK 3687 * constant is the same as PFLIP 3688 */ 3689 int irq_type = 3690 amdgpu_crtc_idx_to_irq_type( 3691 adev, 3692 acrtc->crtc_id); 3693 3694 if (enable) { 3695 drm_crtc_vblank_on(&acrtc->base); 3696 amdgpu_irq_get( 3697 adev, 3698 &adev->pageflip_irq, 3699 irq_type); 3700 } else { 3701 3702 amdgpu_irq_put( 3703 adev, 3704 &adev->pageflip_irq, 3705 irq_type); 3706 drm_crtc_vblank_off(&acrtc->base); 3707 } 3708 } 3709 3710 static bool 3711 is_scaling_state_different(const struct dm_connector_state *dm_state, 3712 const struct dm_connector_state *old_dm_state) 3713 { 3714 if (dm_state->scaling != old_dm_state->scaling) 3715 return true; 3716 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 3717 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 3718 return true; 3719 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 3720 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 3721 return true; 3722 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 3723 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 3724 return true; 3725 return false; 3726 } 3727 3728 static void remove_stream(struct amdgpu_device *adev, 3729 struct amdgpu_crtc *acrtc, 3730 struct dc_stream_state *stream) 3731 { 3732 /* this is the update mode case */ 3733 if (adev->dm.freesync_module) 3734 mod_freesync_remove_stream(adev->dm.freesync_module, stream); 3735 3736 acrtc->otg_inst = -1; 3737 acrtc->enabled = false; 3738 } 3739 3740 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, 3741 struct dc_cursor_position *position) 3742 { 3743 struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc); 3744 int x, y; 3745 int xorigin = 0, yorigin = 0; 3746 3747 if (!crtc || !plane->state->fb) { 3748 position->enable = false; 3749 position->x = 0; 3750 position->y = 0; 3751 return 0; 3752 } 3753 3754 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || 3755 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { 3756 DRM_ERROR("%s: bad cursor width or height %d x %d\n", 3757 __func__, 3758 plane->state->crtc_w, 3759 plane->state->crtc_h); 3760 return -EINVAL; 3761 } 3762 3763 x = plane->state->crtc_x; 3764 y = plane->state->crtc_y; 3765 /* avivo cursor are offset into the total surface */ 3766 x += crtc->primary->state->src_x >> 16; 3767 y += crtc->primary->state->src_y >> 16; 3768 if (x < 0) { 3769 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 3770 x = 0; 3771 } 3772 if (y < 0) { 3773 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 3774 y = 0; 3775 } 3776 position->enable = true; 3777 position->x = x; 3778 position->y = y; 3779 position->x_hotspot = xorigin; 3780 position->y_hotspot = yorigin; 3781 3782 return 0; 3783 } 3784 3785 static void handle_cursor_update(struct drm_plane *plane, 3786 struct drm_plane_state *old_plane_state) 3787 { 3788 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 3789 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 3790 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 3791 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 3792 uint64_t address = afb ? afb->address : 0; 3793 struct dc_cursor_position position; 3794 struct dc_cursor_attributes attributes; 3795 int ret; 3796 3797 if (!plane->state->fb && !old_plane_state->fb) 3798 return; 3799 3800 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n", 3801 __func__, 3802 amdgpu_crtc->crtc_id, 3803 plane->state->crtc_w, 3804 plane->state->crtc_h); 3805 3806 ret = get_cursor_position(plane, crtc, &position); 3807 if (ret) 3808 return; 3809 3810 if (!position.enable) { 3811 /* turn off cursor */ 3812 if (crtc_state && crtc_state->stream) 3813 dc_stream_set_cursor_position(crtc_state->stream, 3814 &position); 3815 return; 3816 } 3817 3818 amdgpu_crtc->cursor_width = plane->state->crtc_w; 3819 amdgpu_crtc->cursor_height = plane->state->crtc_h; 3820 3821 attributes.address.high_part = upper_32_bits(address); 3822 attributes.address.low_part = lower_32_bits(address); 3823 attributes.width = plane->state->crtc_w; 3824 attributes.height = plane->state->crtc_h; 3825 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 3826 attributes.rotation_angle = 0; 3827 attributes.attribute_flags.value = 0; 3828 3829 attributes.pitch = attributes.width; 3830 3831 if (crtc_state->stream) { 3832 if (!dc_stream_set_cursor_attributes(crtc_state->stream, 3833 &attributes)) 3834 DRM_ERROR("DC failed to set cursor attributes\n"); 3835 3836 if (!dc_stream_set_cursor_position(crtc_state->stream, 3837 &position)) 3838 DRM_ERROR("DC failed to set cursor position\n"); 3839 } 3840 } 3841 3842 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 3843 { 3844 3845 assert_spin_locked(&acrtc->base.dev->event_lock); 3846 WARN_ON(acrtc->event); 3847 3848 acrtc->event = acrtc->base.state->event; 3849 3850 /* Set the flip status */ 3851 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 3852 3853 /* Mark this event as consumed */ 3854 acrtc->base.state->event = NULL; 3855 3856 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 3857 acrtc->crtc_id); 3858 } 3859 3860 /* 3861 * Executes flip 3862 * 3863 * Waits on all BO's fences and for proper vblank count 3864 */ 3865 static void amdgpu_dm_do_flip(struct drm_crtc *crtc, 3866 struct drm_framebuffer *fb, 3867 uint32_t target, 3868 struct dc_state *state) 3869 { 3870 unsigned long flags; 3871 uint32_t target_vblank; 3872 int r, vpos, hpos; 3873 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 3874 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 3875 struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj); 3876 struct amdgpu_device *adev = crtc->dev->dev_private; 3877 bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; 3878 struct dc_flip_addrs addr = { {0} }; 3879 /* TODO eliminate or rename surface_update */ 3880 struct dc_surface_update surface_updates[1] = { {0} }; 3881 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); 3882 3883 3884 /* Prepare wait for target vblank early - before the fence-waits */ 3885 target_vblank = target - drm_crtc_vblank_count(crtc) + 3886 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id); 3887 3888 /* TODO This might fail and hence better not used, wait 3889 * explicitly on fences instead 3890 * and in general should be called for 3891 * blocking commit to as per framework helpers 3892 */ 3893 r = amdgpu_bo_reserve(abo, true); 3894 if (unlikely(r != 0)) { 3895 DRM_ERROR("failed to reserve buffer before flip\n"); 3896 WARN_ON(1); 3897 } 3898 3899 /* Wait for all fences on this FB */ 3900 WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false, 3901 MAX_SCHEDULE_TIMEOUT) < 0); 3902 3903 amdgpu_bo_unreserve(abo); 3904 3905 /* Wait until we're out of the vertical blank period before the one 3906 * targeted by the flip 3907 */ 3908 while ((acrtc->enabled && 3909 (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0, 3910 &vpos, &hpos, NULL, NULL, 3911 &crtc->hwmode) 3912 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 3913 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 3914 (int)(target_vblank - 3915 amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) { 3916 usleep_range(1000, 1100); 3917 } 3918 3919 /* Flip */ 3920 spin_lock_irqsave(&crtc->dev->event_lock, flags); 3921 /* update crtc fb */ 3922 crtc->primary->fb = fb; 3923 3924 WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE); 3925 WARN_ON(!acrtc_state->stream); 3926 3927 addr.address.grph.addr.low_part = lower_32_bits(afb->address); 3928 addr.address.grph.addr.high_part = upper_32_bits(afb->address); 3929 addr.flip_immediate = async_flip; 3930 3931 3932 if (acrtc->base.state->event) 3933 prepare_flip_isr(acrtc); 3934 3935 surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0]; 3936 surface_updates->flip_addr = &addr; 3937 3938 3939 dc_commit_updates_for_stream(adev->dm.dc, 3940 surface_updates, 3941 1, 3942 acrtc_state->stream, 3943 NULL, 3944 &surface_updates->surface, 3945 state); 3946 3947 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n", 3948 __func__, 3949 addr.address.grph.addr.high_part, 3950 addr.address.grph.addr.low_part); 3951 3952 3953 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 3954 } 3955 3956 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 3957 struct drm_device *dev, 3958 struct amdgpu_display_manager *dm, 3959 struct drm_crtc *pcrtc, 3960 bool *wait_for_vblank) 3961 { 3962 uint32_t i; 3963 struct drm_plane *plane; 3964 struct drm_plane_state *old_plane_state, *new_plane_state; 3965 struct dc_stream_state *dc_stream_attach; 3966 struct dc_plane_state *plane_states_constructed[MAX_SURFACES]; 3967 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 3968 struct drm_crtc_state *new_pcrtc_state = 3969 drm_atomic_get_new_crtc_state(state, pcrtc); 3970 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 3971 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 3972 int planes_count = 0; 3973 unsigned long flags; 3974 3975 /* update planes when needed */ 3976 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 3977 struct drm_crtc *crtc = new_plane_state->crtc; 3978 struct drm_crtc_state *new_crtc_state; 3979 struct drm_framebuffer *fb = new_plane_state->fb; 3980 bool pflip_needed; 3981 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 3982 3983 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 3984 handle_cursor_update(plane, old_plane_state); 3985 continue; 3986 } 3987 3988 if (!fb || !crtc || pcrtc != crtc) 3989 continue; 3990 3991 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 3992 if (!new_crtc_state->active) 3993 continue; 3994 3995 pflip_needed = !state->allow_modeset; 3996 3997 spin_lock_irqsave(&crtc->dev->event_lock, flags); 3998 if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) { 3999 DRM_ERROR("%s: acrtc %d, already busy\n", 4000 __func__, 4001 acrtc_attach->crtc_id); 4002 /* In commit tail framework this cannot happen */ 4003 WARN_ON(1); 4004 } 4005 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 4006 4007 if (!pflip_needed) { 4008 WARN_ON(!dm_new_plane_state->dc_state); 4009 4010 plane_states_constructed[planes_count] = dm_new_plane_state->dc_state; 4011 4012 dc_stream_attach = acrtc_state->stream; 4013 planes_count++; 4014 4015 } else if (new_crtc_state->planes_changed) { 4016 /* Assume even ONE crtc with immediate flip means 4017 * entire can't wait for VBLANK 4018 * TODO Check if it's correct 4019 */ 4020 *wait_for_vblank = 4021 new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ? 4022 false : true; 4023 4024 /* TODO: Needs rework for multiplane flip */ 4025 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 4026 drm_crtc_vblank_get(crtc); 4027 4028 amdgpu_dm_do_flip( 4029 crtc, 4030 fb, 4031 drm_crtc_vblank_count(crtc) + *wait_for_vblank, 4032 dm_state->context); 4033 } 4034 4035 } 4036 4037 if (planes_count) { 4038 unsigned long flags; 4039 4040 if (new_pcrtc_state->event) { 4041 4042 drm_crtc_vblank_get(pcrtc); 4043 4044 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 4045 prepare_flip_isr(acrtc_attach); 4046 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 4047 } 4048 4049 if (false == dc_commit_planes_to_stream(dm->dc, 4050 plane_states_constructed, 4051 planes_count, 4052 dc_stream_attach, 4053 dm_state->context)) 4054 dm_error("%s: Failed to attach plane!\n", __func__); 4055 } else { 4056 /*TODO BUG Here should go disable planes on CRTC. */ 4057 } 4058 } 4059 4060 /** 4061 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 4062 * @crtc_state: the DRM CRTC state 4063 * @stream_state: the DC stream state. 4064 * 4065 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 4066 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 4067 */ 4068 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 4069 struct dc_stream_state *stream_state) 4070 { 4071 stream_state->mode_changed = crtc_state->mode_changed; 4072 } 4073 4074 static int amdgpu_dm_atomic_commit(struct drm_device *dev, 4075 struct drm_atomic_state *state, 4076 bool nonblock) 4077 { 4078 struct drm_crtc *crtc; 4079 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 4080 struct amdgpu_device *adev = dev->dev_private; 4081 int i; 4082 4083 /* 4084 * We evade vblanks and pflips on crtc that 4085 * should be changed. We do it here to flush & disable 4086 * interrupts before drm_swap_state is called in drm_atomic_helper_commit 4087 * it will update crtc->dm_crtc_state->stream pointer which is used in 4088 * the ISRs. 4089 */ 4090 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4091 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 4092 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 4093 4094 if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream) 4095 manage_dm_interrupts(adev, acrtc, false); 4096 } 4097 /* Add check here for SoC's that support hardware cursor plane, to 4098 * unset legacy_cursor_update */ 4099 4100 return drm_atomic_helper_commit(dev, state, nonblock); 4101 4102 /*TODO Handle EINTR, reenable IRQ*/ 4103 } 4104 4105 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 4106 { 4107 struct drm_device *dev = state->dev; 4108 struct amdgpu_device *adev = dev->dev_private; 4109 struct amdgpu_display_manager *dm = &adev->dm; 4110 struct dm_atomic_state *dm_state; 4111 uint32_t i, j; 4112 struct drm_crtc *crtc; 4113 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 4114 unsigned long flags; 4115 bool wait_for_vblank = true; 4116 struct drm_connector *connector; 4117 struct drm_connector_state *old_con_state, *new_con_state; 4118 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 4119 4120 drm_atomic_helper_update_legacy_modeset_state(dev, state); 4121 4122 dm_state = to_dm_atomic_state(state); 4123 4124 /* update changed items */ 4125 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4126 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 4127 4128 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4129 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 4130 4131 DRM_DEBUG_DRIVER( 4132 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " 4133 "planes_changed:%d, mode_changed:%d,active_changed:%d," 4134 "connectors_changed:%d\n", 4135 acrtc->crtc_id, 4136 new_crtc_state->enable, 4137 new_crtc_state->active, 4138 new_crtc_state->planes_changed, 4139 new_crtc_state->mode_changed, 4140 new_crtc_state->active_changed, 4141 new_crtc_state->connectors_changed); 4142 4143 /* Copy all transient state flags into dc state */ 4144 if (dm_new_crtc_state->stream) { 4145 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 4146 dm_new_crtc_state->stream); 4147 } 4148 4149 /* handles headless hotplug case, updating new_state and 4150 * aconnector as needed 4151 */ 4152 4153 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 4154 4155 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); 4156 4157 if (!dm_new_crtc_state->stream) { 4158 /* 4159 * this could happen because of issues with 4160 * userspace notifications delivery. 4161 * In this case userspace tries to set mode on 4162 * display which is disconnect in fact. 4163 * dc_sink in NULL in this case on aconnector. 4164 * We expect reset mode will come soon. 4165 * 4166 * This can also happen when unplug is done 4167 * during resume sequence ended 4168 * 4169 * In this case, we want to pretend we still 4170 * have a sink to keep the pipe running so that 4171 * hw state is consistent with the sw state 4172 */ 4173 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 4174 __func__, acrtc->base.base.id); 4175 continue; 4176 } 4177 4178 if (dm_old_crtc_state->stream) 4179 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 4180 4181 acrtc->enabled = true; 4182 acrtc->hw_mode = new_crtc_state->mode; 4183 crtc->hwmode = new_crtc_state->mode; 4184 } else if (modereset_required(new_crtc_state)) { 4185 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); 4186 4187 /* i.e. reset mode */ 4188 if (dm_old_crtc_state->stream) 4189 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 4190 } 4191 } /* for_each_crtc_in_state() */ 4192 4193 /* 4194 * Add streams after required streams from new and replaced streams 4195 * are removed from freesync module 4196 */ 4197 if (adev->dm.freesync_module) { 4198 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 4199 new_crtc_state, i) { 4200 struct amdgpu_dm_connector *aconnector = NULL; 4201 struct dm_connector_state *dm_new_con_state = NULL; 4202 struct amdgpu_crtc *acrtc = NULL; 4203 bool modeset_needed; 4204 4205 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4206 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 4207 modeset_needed = modeset_required( 4208 new_crtc_state, 4209 dm_new_crtc_state->stream, 4210 dm_old_crtc_state->stream); 4211 /* We add stream to freesync if: 4212 * 1. Said stream is not null, and 4213 * 2. A modeset is requested. This means that the 4214 * stream was removed previously, and needs to be 4215 * replaced. 4216 */ 4217 if (dm_new_crtc_state->stream == NULL || 4218 !modeset_needed) 4219 continue; 4220 4221 acrtc = to_amdgpu_crtc(crtc); 4222 4223 aconnector = 4224 amdgpu_dm_find_first_crtc_matching_connector( 4225 state, crtc); 4226 if (!aconnector) { 4227 DRM_DEBUG_DRIVER("Atomic commit: Failed to " 4228 "find connector for acrtc " 4229 "id:%d skipping freesync " 4230 "init\n", 4231 acrtc->crtc_id); 4232 continue; 4233 } 4234 4235 mod_freesync_add_stream(adev->dm.freesync_module, 4236 dm_new_crtc_state->stream, 4237 &aconnector->caps); 4238 new_con_state = drm_atomic_get_new_connector_state( 4239 state, &aconnector->base); 4240 dm_new_con_state = to_dm_connector_state(new_con_state); 4241 4242 mod_freesync_set_user_enable(adev->dm.freesync_module, 4243 &dm_new_crtc_state->stream, 4244 1, 4245 &dm_new_con_state->user_enable); 4246 } 4247 } 4248 4249 if (dm_state->context) { 4250 dm_enable_per_frame_crtc_master_sync(dm_state->context); 4251 WARN_ON(!dc_commit_state(dm->dc, dm_state->context)); 4252 } 4253 4254 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 4255 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 4256 4257 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4258 4259 if (dm_new_crtc_state->stream != NULL) { 4260 const struct dc_stream_status *status = 4261 dc_stream_get_status(dm_new_crtc_state->stream); 4262 4263 if (!status) 4264 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); 4265 else 4266 acrtc->otg_inst = status->primary_otg_inst; 4267 } 4268 } 4269 4270 /* Handle scaling and underscan changes*/ 4271 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 4272 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 4273 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 4274 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 4275 struct dc_stream_status *status = NULL; 4276 4277 if (acrtc) 4278 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 4279 4280 /* Skip any modesets/resets */ 4281 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 4282 continue; 4283 4284 /* Skip any thing not scale or underscan changes */ 4285 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 4286 continue; 4287 4288 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4289 4290 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 4291 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); 4292 4293 if (!dm_new_crtc_state->stream) 4294 continue; 4295 4296 status = dc_stream_get_status(dm_new_crtc_state->stream); 4297 WARN_ON(!status); 4298 WARN_ON(!status->plane_count); 4299 4300 /*TODO How it works with MPO ?*/ 4301 if (!dc_commit_planes_to_stream( 4302 dm->dc, 4303 status->plane_states, 4304 status->plane_count, 4305 dm_new_crtc_state->stream, 4306 dm_state->context)) 4307 dm_error("%s: Failed to update stream scaling!\n", __func__); 4308 } 4309 4310 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 4311 new_crtc_state, i) { 4312 /* 4313 * loop to enable interrupts on newly arrived crtc 4314 */ 4315 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 4316 bool modeset_needed; 4317 4318 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4319 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 4320 modeset_needed = modeset_required( 4321 new_crtc_state, 4322 dm_new_crtc_state->stream, 4323 dm_old_crtc_state->stream); 4324 4325 if (dm_new_crtc_state->stream == NULL || !modeset_needed) 4326 continue; 4327 4328 if (adev->dm.freesync_module) 4329 mod_freesync_notify_mode_change( 4330 adev->dm.freesync_module, 4331 &dm_new_crtc_state->stream, 1); 4332 4333 manage_dm_interrupts(adev, acrtc, true); 4334 } 4335 4336 /* update planes when needed per crtc*/ 4337 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 4338 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4339 4340 if (dm_new_crtc_state->stream) 4341 amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank); 4342 } 4343 4344 4345 /* 4346 * send vblank event on all events not handled in flip and 4347 * mark consumed event for drm_atomic_helper_commit_hw_done 4348 */ 4349 spin_lock_irqsave(&adev->ddev->event_lock, flags); 4350 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 4351 4352 if (new_crtc_state->event) 4353 drm_send_event_locked(dev, &new_crtc_state->event->base); 4354 4355 new_crtc_state->event = NULL; 4356 } 4357 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 4358 4359 /* Signal HW programming completion */ 4360 drm_atomic_helper_commit_hw_done(state); 4361 4362 if (wait_for_vblank) 4363 drm_atomic_helper_wait_for_flip_done(dev, state); 4364 4365 drm_atomic_helper_cleanup_planes(dev, state); 4366 } 4367 4368 4369 static int dm_force_atomic_commit(struct drm_connector *connector) 4370 { 4371 int ret = 0; 4372 struct drm_device *ddev = connector->dev; 4373 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 4374 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 4375 struct drm_plane *plane = disconnected_acrtc->base.primary; 4376 struct drm_connector_state *conn_state; 4377 struct drm_crtc_state *crtc_state; 4378 struct drm_plane_state *plane_state; 4379 4380 if (!state) 4381 return -ENOMEM; 4382 4383 state->acquire_ctx = ddev->mode_config.acquire_ctx; 4384 4385 /* Construct an atomic state to restore previous display setting */ 4386 4387 /* 4388 * Attach connectors to drm_atomic_state 4389 */ 4390 conn_state = drm_atomic_get_connector_state(state, connector); 4391 4392 ret = PTR_ERR_OR_ZERO(conn_state); 4393 if (ret) 4394 goto err; 4395 4396 /* Attach crtc to drm_atomic_state*/ 4397 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 4398 4399 ret = PTR_ERR_OR_ZERO(crtc_state); 4400 if (ret) 4401 goto err; 4402 4403 /* force a restore */ 4404 crtc_state->mode_changed = true; 4405 4406 /* Attach plane to drm_atomic_state */ 4407 plane_state = drm_atomic_get_plane_state(state, plane); 4408 4409 ret = PTR_ERR_OR_ZERO(plane_state); 4410 if (ret) 4411 goto err; 4412 4413 4414 /* Call commit internally with the state we just constructed */ 4415 ret = drm_atomic_commit(state); 4416 if (!ret) 4417 return 0; 4418 4419 err: 4420 DRM_ERROR("Restoring old state failed with %i\n", ret); 4421 drm_atomic_state_put(state); 4422 4423 return ret; 4424 } 4425 4426 /* 4427 * This functions handle all cases when set mode does not come upon hotplug. 4428 * This include when the same display is unplugged then plugged back into the 4429 * same port and when we are running without usermode desktop manager supprot 4430 */ 4431 void dm_restore_drm_connector_state(struct drm_device *dev, 4432 struct drm_connector *connector) 4433 { 4434 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 4435 struct amdgpu_crtc *disconnected_acrtc; 4436 struct dm_crtc_state *acrtc_state; 4437 4438 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 4439 return; 4440 4441 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 4442 if (!disconnected_acrtc) 4443 return; 4444 4445 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 4446 if (!acrtc_state->stream) 4447 return; 4448 4449 /* 4450 * If the previous sink is not released and different from the current, 4451 * we deduce we are in a state where we can not rely on usermode call 4452 * to turn on the display, so we do it here 4453 */ 4454 if (acrtc_state->stream->sink != aconnector->dc_sink) 4455 dm_force_atomic_commit(&aconnector->base); 4456 } 4457 4458 /*` 4459 * Grabs all modesetting locks to serialize against any blocking commits, 4460 * Waits for completion of all non blocking commits. 4461 */ 4462 static int do_aquire_global_lock(struct drm_device *dev, 4463 struct drm_atomic_state *state) 4464 { 4465 struct drm_crtc *crtc; 4466 struct drm_crtc_commit *commit; 4467 long ret; 4468 4469 /* Adding all modeset locks to aquire_ctx will 4470 * ensure that when the framework release it the 4471 * extra locks we are locking here will get released to 4472 */ 4473 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 4474 if (ret) 4475 return ret; 4476 4477 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4478 spin_lock(&crtc->commit_lock); 4479 commit = list_first_entry_or_null(&crtc->commit_list, 4480 struct drm_crtc_commit, commit_entry); 4481 if (commit) 4482 drm_crtc_commit_get(commit); 4483 spin_unlock(&crtc->commit_lock); 4484 4485 if (!commit) 4486 continue; 4487 4488 /* Make sure all pending HW programming completed and 4489 * page flips done 4490 */ 4491 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 4492 4493 if (ret > 0) 4494 ret = wait_for_completion_interruptible_timeout( 4495 &commit->flip_done, 10*HZ); 4496 4497 if (ret == 0) 4498 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " 4499 "timed out\n", crtc->base.id, crtc->name); 4500 4501 drm_crtc_commit_put(commit); 4502 } 4503 4504 return ret < 0 ? ret : 0; 4505 } 4506 4507 static int dm_update_crtcs_state(struct dc *dc, 4508 struct drm_atomic_state *state, 4509 bool enable, 4510 bool *lock_and_validation_needed) 4511 { 4512 struct drm_crtc *crtc; 4513 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 4514 int i; 4515 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 4516 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 4517 struct dc_stream_state *new_stream; 4518 int ret = 0; 4519 4520 /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */ 4521 /* update changed items */ 4522 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4523 struct amdgpu_crtc *acrtc = NULL; 4524 struct amdgpu_dm_connector *aconnector = NULL; 4525 struct drm_connector_state *new_con_state = NULL; 4526 struct dm_connector_state *dm_conn_state = NULL; 4527 4528 new_stream = NULL; 4529 4530 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 4531 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4532 acrtc = to_amdgpu_crtc(crtc); 4533 4534 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 4535 4536 /* TODO This hack should go away */ 4537 if (aconnector && enable) { 4538 // Make sure fake sink is created in plug-in scenario 4539 new_con_state = drm_atomic_get_connector_state(state, 4540 &aconnector->base); 4541 4542 if (IS_ERR(new_con_state)) { 4543 ret = PTR_ERR_OR_ZERO(new_con_state); 4544 break; 4545 } 4546 4547 dm_conn_state = to_dm_connector_state(new_con_state); 4548 4549 new_stream = create_stream_for_sink(aconnector, 4550 &new_crtc_state->mode, 4551 dm_conn_state); 4552 4553 /* 4554 * we can have no stream on ACTION_SET if a display 4555 * was disconnected during S3, in this case it not and 4556 * error, the OS will be updated after detection, and 4557 * do the right thing on next atomic commit 4558 */ 4559 4560 if (!new_stream) { 4561 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 4562 __func__, acrtc->base.base.id); 4563 break; 4564 } 4565 4566 if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 4567 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 4568 new_crtc_state->mode_changed = false; 4569 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", 4570 new_crtc_state->mode_changed); 4571 } 4572 } 4573 4574 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 4575 goto next_crtc; 4576 4577 DRM_DEBUG_DRIVER( 4578 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " 4579 "planes_changed:%d, mode_changed:%d,active_changed:%d," 4580 "connectors_changed:%d\n", 4581 acrtc->crtc_id, 4582 new_crtc_state->enable, 4583 new_crtc_state->active, 4584 new_crtc_state->planes_changed, 4585 new_crtc_state->mode_changed, 4586 new_crtc_state->active_changed, 4587 new_crtc_state->connectors_changed); 4588 4589 /* Remove stream for any changed/disabled CRTC */ 4590 if (!enable) { 4591 4592 if (!dm_old_crtc_state->stream) 4593 goto next_crtc; 4594 4595 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 4596 crtc->base.id); 4597 4598 /* i.e. reset mode */ 4599 if (dc_remove_stream_from_ctx( 4600 dc, 4601 dm_state->context, 4602 dm_old_crtc_state->stream) != DC_OK) { 4603 ret = -EINVAL; 4604 goto fail; 4605 } 4606 4607 dc_stream_release(dm_old_crtc_state->stream); 4608 dm_new_crtc_state->stream = NULL; 4609 4610 *lock_and_validation_needed = true; 4611 4612 } else {/* Add stream for any updated/enabled CRTC */ 4613 /* 4614 * Quick fix to prevent NULL pointer on new_stream when 4615 * added MST connectors not found in existing crtc_state in the chained mode 4616 * TODO: need to dig out the root cause of that 4617 */ 4618 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port)) 4619 goto next_crtc; 4620 4621 if (modereset_required(new_crtc_state)) 4622 goto next_crtc; 4623 4624 if (modeset_required(new_crtc_state, new_stream, 4625 dm_old_crtc_state->stream)) { 4626 4627 WARN_ON(dm_new_crtc_state->stream); 4628 4629 dm_new_crtc_state->stream = new_stream; 4630 4631 dc_stream_retain(new_stream); 4632 4633 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n", 4634 crtc->base.id); 4635 4636 if (dc_add_stream_to_ctx( 4637 dc, 4638 dm_state->context, 4639 dm_new_crtc_state->stream) != DC_OK) { 4640 ret = -EINVAL; 4641 goto fail; 4642 } 4643 4644 *lock_and_validation_needed = true; 4645 } 4646 } 4647 4648 next_crtc: 4649 /* Release extra reference */ 4650 if (new_stream) 4651 dc_stream_release(new_stream); 4652 } 4653 4654 return ret; 4655 4656 fail: 4657 if (new_stream) 4658 dc_stream_release(new_stream); 4659 return ret; 4660 } 4661 4662 static int dm_update_planes_state(struct dc *dc, 4663 struct drm_atomic_state *state, 4664 bool enable, 4665 bool *lock_and_validation_needed) 4666 { 4667 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 4668 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 4669 struct drm_plane *plane; 4670 struct drm_plane_state *old_plane_state, *new_plane_state; 4671 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 4672 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 4673 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 4674 int i ; 4675 /* TODO return page_flip_needed() function */ 4676 bool pflip_needed = !state->allow_modeset; 4677 int ret = 0; 4678 4679 if (pflip_needed) 4680 return ret; 4681 4682 /* Add new planes */ 4683 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 4684 new_plane_crtc = new_plane_state->crtc; 4685 old_plane_crtc = old_plane_state->crtc; 4686 dm_new_plane_state = to_dm_plane_state(new_plane_state); 4687 dm_old_plane_state = to_dm_plane_state(old_plane_state); 4688 4689 /*TODO Implement atomic check for cursor plane */ 4690 if (plane->type == DRM_PLANE_TYPE_CURSOR) 4691 continue; 4692 4693 /* Remove any changed/removed planes */ 4694 if (!enable) { 4695 4696 if (!old_plane_crtc) 4697 continue; 4698 4699 old_crtc_state = drm_atomic_get_old_crtc_state( 4700 state, old_plane_crtc); 4701 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 4702 4703 if (!dm_old_crtc_state->stream) 4704 continue; 4705 4706 DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n", 4707 plane->base.id, old_plane_crtc->base.id); 4708 4709 if (!dc_remove_plane_from_context( 4710 dc, 4711 dm_old_crtc_state->stream, 4712 dm_old_plane_state->dc_state, 4713 dm_state->context)) { 4714 4715 ret = EINVAL; 4716 return ret; 4717 } 4718 4719 4720 dc_plane_state_release(dm_old_plane_state->dc_state); 4721 dm_new_plane_state->dc_state = NULL; 4722 4723 *lock_and_validation_needed = true; 4724 4725 } else { /* Add new planes */ 4726 4727 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 4728 continue; 4729 4730 if (!new_plane_crtc) 4731 continue; 4732 4733 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 4734 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4735 4736 if (!dm_new_crtc_state->stream) 4737 continue; 4738 4739 4740 WARN_ON(dm_new_plane_state->dc_state); 4741 4742 dm_new_plane_state->dc_state = dc_create_plane_state(dc); 4743 4744 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", 4745 plane->base.id, new_plane_crtc->base.id); 4746 4747 if (!dm_new_plane_state->dc_state) { 4748 ret = -EINVAL; 4749 return ret; 4750 } 4751 4752 ret = fill_plane_attributes( 4753 new_plane_crtc->dev->dev_private, 4754 dm_new_plane_state->dc_state, 4755 new_plane_state, 4756 new_crtc_state); 4757 if (ret) 4758 return ret; 4759 4760 4761 if (!dc_add_plane_to_context( 4762 dc, 4763 dm_new_crtc_state->stream, 4764 dm_new_plane_state->dc_state, 4765 dm_state->context)) { 4766 4767 ret = -EINVAL; 4768 return ret; 4769 } 4770 4771 /* Tell DC to do a full surface update every time there 4772 * is a plane change. Inefficient, but works for now. 4773 */ 4774 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 4775 4776 *lock_and_validation_needed = true; 4777 } 4778 } 4779 4780 4781 return ret; 4782 } 4783 4784 static int amdgpu_dm_atomic_check(struct drm_device *dev, 4785 struct drm_atomic_state *state) 4786 { 4787 struct amdgpu_device *adev = dev->dev_private; 4788 struct dc *dc = adev->dm.dc; 4789 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 4790 struct drm_connector *connector; 4791 struct drm_connector_state *old_con_state, *new_con_state; 4792 struct drm_crtc *crtc; 4793 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 4794 int ret, i; 4795 4796 /* 4797 * This bool will be set for true for any modeset/reset 4798 * or plane update which implies non fast surface update. 4799 */ 4800 bool lock_and_validation_needed = false; 4801 4802 ret = drm_atomic_helper_check_modeset(dev, state); 4803 if (ret) 4804 goto fail; 4805 4806 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4807 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 4808 !new_crtc_state->color_mgmt_changed) 4809 continue; 4810 4811 if (!new_crtc_state->enable) 4812 continue; 4813 4814 ret = drm_atomic_add_affected_connectors(state, crtc); 4815 if (ret) 4816 return ret; 4817 4818 ret = drm_atomic_add_affected_planes(state, crtc); 4819 if (ret) 4820 goto fail; 4821 } 4822 4823 dm_state->context = dc_create_state(); 4824 ASSERT(dm_state->context); 4825 dc_resource_state_copy_construct_current(dc, dm_state->context); 4826 4827 /* Remove exiting planes if they are modified */ 4828 ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed); 4829 if (ret) { 4830 goto fail; 4831 } 4832 4833 /* Disable all crtcs which require disable */ 4834 ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed); 4835 if (ret) { 4836 goto fail; 4837 } 4838 4839 /* Enable all crtcs which require enable */ 4840 ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed); 4841 if (ret) { 4842 goto fail; 4843 } 4844 4845 /* Add new/modified planes */ 4846 ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed); 4847 if (ret) { 4848 goto fail; 4849 } 4850 4851 /* Run this here since we want to validate the streams we created */ 4852 ret = drm_atomic_helper_check_planes(dev, state); 4853 if (ret) 4854 goto fail; 4855 4856 /* Check scaling and underscan changes*/ 4857 /*TODO Removed scaling changes validation due to inability to commit 4858 * new stream into context w\o causing full reset. Need to 4859 * decide how to handle. 4860 */ 4861 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 4862 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 4863 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 4864 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 4865 4866 /* Skip any modesets/resets */ 4867 if (!acrtc || drm_atomic_crtc_needs_modeset( 4868 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 4869 continue; 4870 4871 /* Skip any thing not scale or underscan changes */ 4872 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 4873 continue; 4874 4875 lock_and_validation_needed = true; 4876 } 4877 4878 /* 4879 * For full updates case when 4880 * removing/adding/updating streams on once CRTC while flipping 4881 * on another CRTC, 4882 * acquiring global lock will guarantee that any such full 4883 * update commit 4884 * will wait for completion of any outstanding flip using DRMs 4885 * synchronization events. 4886 */ 4887 4888 if (lock_and_validation_needed) { 4889 4890 ret = do_aquire_global_lock(dev, state); 4891 if (ret) 4892 goto fail; 4893 4894 if (dc_validate_global_state(dc, dm_state->context) != DC_OK) { 4895 ret = -EINVAL; 4896 goto fail; 4897 } 4898 } 4899 4900 /* Must be success */ 4901 WARN_ON(ret); 4902 return ret; 4903 4904 fail: 4905 if (ret == -EDEADLK) 4906 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n"); 4907 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 4908 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); 4909 else 4910 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret); 4911 4912 return ret; 4913 } 4914 4915 static bool is_dp_capable_without_timing_msa(struct dc *dc, 4916 struct amdgpu_dm_connector *amdgpu_dm_connector) 4917 { 4918 uint8_t dpcd_data; 4919 bool capable = false; 4920 4921 if (amdgpu_dm_connector->dc_link && 4922 dm_helpers_dp_read_dpcd( 4923 NULL, 4924 amdgpu_dm_connector->dc_link, 4925 DP_DOWN_STREAM_PORT_COUNT, 4926 &dpcd_data, 4927 sizeof(dpcd_data))) { 4928 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; 4929 } 4930 4931 return capable; 4932 } 4933 void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector, 4934 struct edid *edid) 4935 { 4936 int i; 4937 uint64_t val_capable; 4938 bool edid_check_required; 4939 struct detailed_timing *timing; 4940 struct detailed_non_pixel *data; 4941 struct detailed_data_monitor_range *range; 4942 struct amdgpu_dm_connector *amdgpu_dm_connector = 4943 to_amdgpu_dm_connector(connector); 4944 4945 struct drm_device *dev = connector->dev; 4946 struct amdgpu_device *adev = dev->dev_private; 4947 4948 edid_check_required = false; 4949 if (!amdgpu_dm_connector->dc_sink) { 4950 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); 4951 return; 4952 } 4953 if (!adev->dm.freesync_module) 4954 return; 4955 /* 4956 * if edid non zero restrict freesync only for dp and edp 4957 */ 4958 if (edid) { 4959 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT 4960 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { 4961 edid_check_required = is_dp_capable_without_timing_msa( 4962 adev->dm.dc, 4963 amdgpu_dm_connector); 4964 } 4965 } 4966 val_capable = 0; 4967 if (edid_check_required == true && (edid->version > 1 || 4968 (edid->version == 1 && edid->revision > 1))) { 4969 for (i = 0; i < 4; i++) { 4970 4971 timing = &edid->detailed_timings[i]; 4972 data = &timing->data.other_data; 4973 range = &data->data.range; 4974 /* 4975 * Check if monitor has continuous frequency mode 4976 */ 4977 if (data->type != EDID_DETAIL_MONITOR_RANGE) 4978 continue; 4979 /* 4980 * Check for flag range limits only. If flag == 1 then 4981 * no additional timing information provided. 4982 * Default GTF, GTF Secondary curve and CVT are not 4983 * supported 4984 */ 4985 if (range->flags != 1) 4986 continue; 4987 4988 amdgpu_dm_connector->min_vfreq = range->min_vfreq; 4989 amdgpu_dm_connector->max_vfreq = range->max_vfreq; 4990 amdgpu_dm_connector->pixel_clock_mhz = 4991 range->pixel_clock_mhz * 10; 4992 break; 4993 } 4994 4995 if (amdgpu_dm_connector->max_vfreq - 4996 amdgpu_dm_connector->min_vfreq > 10) { 4997 amdgpu_dm_connector->caps.supported = true; 4998 amdgpu_dm_connector->caps.min_refresh_in_micro_hz = 4999 amdgpu_dm_connector->min_vfreq * 1000000; 5000 amdgpu_dm_connector->caps.max_refresh_in_micro_hz = 5001 amdgpu_dm_connector->max_vfreq * 1000000; 5002 val_capable = 1; 5003 } 5004 } 5005 5006 /* 5007 * TODO figure out how to notify user-mode or DRM of freesync caps 5008 * once we figure out how to deal with freesync in an upstreamable 5009 * fashion 5010 */ 5011 5012 } 5013 5014 void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector) 5015 { 5016 /* 5017 * TODO fill in once we figure out how to deal with freesync in 5018 * an upstreamable fashion 5019 */ 5020 } 5021