1 /* 2 * Copyright 2012-15 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <drm/display/drm_dp_helper.h> 27 #include <drm/display/drm_dp_mst_helper.h> 28 #include <drm/drm_atomic.h> 29 #include <drm/drm_atomic_helper.h> 30 #include "dm_services.h" 31 #include "amdgpu.h" 32 #include "amdgpu_dm.h" 33 #include "amdgpu_dm_mst_types.h" 34 35 #include "dc.h" 36 #include "dm_helpers.h" 37 38 #include "dc_link_ddc.h" 39 #include "dc_link_dp.h" 40 #include "ddc_service_types.h" 41 #include "dpcd_defs.h" 42 43 #include "i2caux_interface.h" 44 #include "dmub_cmd.h" 45 #if defined(CONFIG_DEBUG_FS) 46 #include "amdgpu_dm_debugfs.h" 47 #endif 48 49 #include "dc/dcn20/dcn20_resource.h" 50 bool is_timing_changed(struct dc_stream_state *cur_stream, 51 struct dc_stream_state *new_stream); 52 53 54 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, 55 struct drm_dp_aux_msg *msg) 56 { 57 ssize_t result = 0; 58 struct aux_payload payload; 59 enum aux_return_code_type operation_result; 60 struct amdgpu_device *adev; 61 struct ddc_service *ddc; 62 63 if (WARN_ON(msg->size > 16)) 64 return -E2BIG; 65 66 payload.address = msg->address; 67 payload.data = msg->buffer; 68 payload.length = msg->size; 69 payload.reply = &msg->reply; 70 payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0; 71 payload.write = (msg->request & DP_AUX_I2C_READ) == 0; 72 payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0; 73 payload.write_status_update = 74 (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; 75 payload.defer_delay = 0; 76 77 result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, 78 &operation_result); 79 80 /* 81 * w/a on certain intel platform where hpd is unexpected to pull low during 82 * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON 83 * aux transaction is succuess in such case, therefore bypass the error 84 */ 85 ddc = TO_DM_AUX(aux)->ddc_service; 86 adev = ddc->ctx->driver_context; 87 if (adev->dm.aux_hpd_discon_quirk) { 88 if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && 89 operation_result == AUX_RET_ERROR_HPD_DISCON) { 90 result = 0; 91 operation_result = AUX_RET_SUCCESS; 92 } 93 } 94 95 if (payload.write && result >= 0) 96 result = msg->size; 97 98 if (result < 0) 99 switch (operation_result) { 100 case AUX_RET_SUCCESS: 101 break; 102 case AUX_RET_ERROR_HPD_DISCON: 103 case AUX_RET_ERROR_UNKNOWN: 104 case AUX_RET_ERROR_INVALID_OPERATION: 105 case AUX_RET_ERROR_PROTOCOL_ERROR: 106 result = -EIO; 107 break; 108 case AUX_RET_ERROR_INVALID_REPLY: 109 case AUX_RET_ERROR_ENGINE_ACQUIRE: 110 result = -EBUSY; 111 break; 112 case AUX_RET_ERROR_TIMEOUT: 113 result = -ETIMEDOUT; 114 break; 115 } 116 117 return result; 118 } 119 120 static void 121 dm_dp_mst_connector_destroy(struct drm_connector *connector) 122 { 123 struct amdgpu_dm_connector *aconnector = 124 to_amdgpu_dm_connector(connector); 125 126 if (aconnector->dc_sink) { 127 dc_link_remove_remote_sink(aconnector->dc_link, 128 aconnector->dc_sink); 129 dc_sink_release(aconnector->dc_sink); 130 } 131 132 kfree(aconnector->edid); 133 134 drm_connector_cleanup(connector); 135 drm_dp_mst_put_port_malloc(aconnector->port); 136 kfree(aconnector); 137 } 138 139 static int 140 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) 141 { 142 struct amdgpu_dm_connector *amdgpu_dm_connector = 143 to_amdgpu_dm_connector(connector); 144 int r; 145 146 r = drm_dp_mst_connector_late_register(connector, 147 amdgpu_dm_connector->port); 148 if (r < 0) 149 return r; 150 151 #if defined(CONFIG_DEBUG_FS) 152 connector_debugfs_init(amdgpu_dm_connector); 153 #endif 154 155 return 0; 156 } 157 158 static void 159 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) 160 { 161 struct amdgpu_dm_connector *aconnector = 162 to_amdgpu_dm_connector(connector); 163 struct drm_dp_mst_port *port = aconnector->port; 164 struct amdgpu_dm_connector *root = aconnector->mst_port; 165 struct dc_link *dc_link = aconnector->dc_link; 166 struct dc_sink *dc_sink = aconnector->dc_sink; 167 168 drm_dp_mst_connector_early_unregister(connector, port); 169 170 /* 171 * Release dc_sink for connector which its attached port is 172 * no longer in the mst topology 173 */ 174 drm_modeset_lock(&root->mst_mgr.base.lock, NULL); 175 if (dc_sink) { 176 if (dc_link->sink_count) 177 dc_link_remove_remote_sink(dc_link, dc_sink); 178 179 dc_sink_release(dc_sink); 180 aconnector->dc_sink = NULL; 181 aconnector->edid = NULL; 182 } 183 184 aconnector->mst_status = MST_STATUS_DEFAULT; 185 drm_modeset_unlock(&root->mst_mgr.base.lock); 186 } 187 188 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { 189 .fill_modes = drm_helper_probe_single_connector_modes, 190 .destroy = dm_dp_mst_connector_destroy, 191 .reset = amdgpu_dm_connector_funcs_reset, 192 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 193 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 194 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 195 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 196 .late_register = amdgpu_dm_mst_connector_late_register, 197 .early_unregister = amdgpu_dm_mst_connector_early_unregister, 198 }; 199 200 #if defined(CONFIG_DRM_AMD_DC_DCN) 201 bool needs_dsc_aux_workaround(struct dc_link *link) 202 { 203 if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && 204 (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && 205 link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) 206 return true; 207 208 return false; 209 } 210 211 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) 212 { 213 struct dc_sink *dc_sink = aconnector->dc_sink; 214 struct drm_dp_mst_port *port = aconnector->port; 215 u8 dsc_caps[16] = { 0 }; 216 u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2 217 u8 *dsc_branch_dec_caps = NULL; 218 219 aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); 220 221 /* 222 * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs 223 * because it only check the dsc/fec caps of the "port variable" and not the dock 224 * 225 * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display 226 * 227 * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux 228 * 229 */ 230 if (!aconnector->dsc_aux && !port->parent->port_parent && 231 needs_dsc_aux_workaround(aconnector->dc_link)) 232 aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; 233 234 if (!aconnector->dsc_aux) 235 return false; 236 237 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0) 238 return false; 239 240 if (drm_dp_dpcd_read(aconnector->dsc_aux, 241 DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, dsc_branch_dec_caps_raw, 3) == 3) 242 dsc_branch_dec_caps = dsc_branch_dec_caps_raw; 243 244 if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 245 dsc_caps, dsc_branch_dec_caps, 246 &dc_sink->dsc_caps.dsc_dec_caps)) 247 return false; 248 249 return true; 250 } 251 252 static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector) 253 { 254 union dp_downstream_port_present ds_port_present; 255 256 if (!aconnector->dsc_aux) 257 return false; 258 259 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, &ds_port_present, 1) < 0) { 260 DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n"); 261 return false; 262 } 263 264 aconnector->mst_downstream_port_present = ds_port_present; 265 DRM_INFO("Downstream port present %d, type %d\n", 266 ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE); 267 268 return true; 269 } 270 #endif 271 272 static int dm_dp_mst_get_modes(struct drm_connector *connector) 273 { 274 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 275 int ret = 0; 276 277 if (!aconnector) 278 return drm_add_edid_modes(connector, NULL); 279 280 if (!aconnector->edid) { 281 struct edid *edid; 282 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 283 284 if (!edid) { 285 amdgpu_dm_set_mst_status(&aconnector->mst_status, 286 MST_REMOTE_EDID, false); 287 288 drm_connector_update_edid_property( 289 &aconnector->base, 290 NULL); 291 292 DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name); 293 if (!aconnector->dc_sink) { 294 struct dc_sink *dc_sink; 295 struct dc_sink_init_data init_params = { 296 .link = aconnector->dc_link, 297 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 298 299 dc_sink = dc_link_add_remote_sink( 300 aconnector->dc_link, 301 NULL, 302 0, 303 &init_params); 304 305 if (!dc_sink) { 306 DRM_ERROR("Unable to add a remote sink\n"); 307 return 0; 308 } 309 310 dc_sink->priv = aconnector; 311 aconnector->dc_sink = dc_sink; 312 } 313 314 return ret; 315 } 316 317 aconnector->edid = edid; 318 amdgpu_dm_set_mst_status(&aconnector->mst_status, 319 MST_REMOTE_EDID, true); 320 } 321 322 if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) { 323 dc_sink_release(aconnector->dc_sink); 324 aconnector->dc_sink = NULL; 325 } 326 327 if (!aconnector->dc_sink) { 328 struct dc_sink *dc_sink; 329 struct dc_sink_init_data init_params = { 330 .link = aconnector->dc_link, 331 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 332 dc_sink = dc_link_add_remote_sink( 333 aconnector->dc_link, 334 (uint8_t *)aconnector->edid, 335 (aconnector->edid->extensions + 1) * EDID_LENGTH, 336 &init_params); 337 338 if (!dc_sink) { 339 DRM_ERROR("Unable to add a remote sink\n"); 340 return 0; 341 } 342 343 dc_sink->priv = aconnector; 344 /* dc_link_add_remote_sink returns a new reference */ 345 aconnector->dc_sink = dc_sink; 346 347 if (aconnector->dc_sink) { 348 amdgpu_dm_update_freesync_caps( 349 connector, aconnector->edid); 350 351 #if defined(CONFIG_DRM_AMD_DC_DCN) 352 if (!validate_dsc_caps_on_connector(aconnector)) 353 memset(&aconnector->dc_sink->dsc_caps, 354 0, sizeof(aconnector->dc_sink->dsc_caps)); 355 356 if (!retrieve_downstream_port_device(aconnector)) 357 memset(&aconnector->mst_downstream_port_present, 358 0, sizeof(aconnector->mst_downstream_port_present)); 359 #endif 360 } 361 } 362 363 drm_connector_update_edid_property( 364 &aconnector->base, aconnector->edid); 365 366 ret = drm_add_edid_modes(connector, aconnector->edid); 367 368 return ret; 369 } 370 371 static struct drm_encoder * 372 dm_mst_atomic_best_encoder(struct drm_connector *connector, 373 struct drm_atomic_state *state) 374 { 375 struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, 376 connector); 377 struct drm_device *dev = connector->dev; 378 struct amdgpu_device *adev = drm_to_adev(dev); 379 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); 380 381 return &adev->dm.mst_encoders[acrtc->crtc_id].base; 382 } 383 384 static int 385 dm_dp_mst_detect(struct drm_connector *connector, 386 struct drm_modeset_acquire_ctx *ctx, bool force) 387 { 388 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 389 struct amdgpu_dm_connector *master = aconnector->mst_port; 390 struct drm_dp_mst_port *port = aconnector->port; 391 int connection_status; 392 393 if (drm_connector_is_unregistered(connector)) 394 return connector_status_disconnected; 395 396 connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, 397 aconnector->port); 398 399 if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) { 400 uint8_t dpcd_rev; 401 int ret; 402 403 ret = drm_dp_dpcd_readb(&port->aux, DP_DP13_DPCD_REV, &dpcd_rev); 404 405 if (ret == 1) { 406 port->dpcd_rev = dpcd_rev; 407 408 /* Could be DP1.2 DP Rx case*/ 409 if (!dpcd_rev) { 410 ret = drm_dp_dpcd_readb(&port->aux, DP_DPCD_REV, &dpcd_rev); 411 412 if (ret == 1) 413 port->dpcd_rev = dpcd_rev; 414 } 415 416 if (!dpcd_rev) 417 DRM_DEBUG_KMS("Can't decide DPCD revision number!"); 418 } 419 420 /* 421 * Could be legacy sink, logical port etc on DP1.2. 422 * Will get Nack under these cases when issue remote 423 * DPCD read. 424 */ 425 if (ret != 1) 426 DRM_DEBUG_KMS("Can't access DPCD"); 427 } else if (port->pdt == DP_PEER_DEVICE_NONE) { 428 port->dpcd_rev = 0; 429 } 430 431 /* 432 * Release dc_sink for connector which unplug event is notified by CSN msg 433 */ 434 if (connection_status == connector_status_disconnected && aconnector->dc_sink) { 435 if (aconnector->dc_link->sink_count) 436 dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); 437 438 dc_sink_release(aconnector->dc_sink); 439 aconnector->dc_sink = NULL; 440 aconnector->edid = NULL; 441 442 amdgpu_dm_set_mst_status(&aconnector->mst_status, 443 MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD, 444 false); 445 } 446 447 return connection_status; 448 } 449 450 static int dm_dp_mst_atomic_check(struct drm_connector *connector, 451 struct drm_atomic_state *state) 452 { 453 struct drm_connector_state *new_conn_state = 454 drm_atomic_get_new_connector_state(state, connector); 455 struct drm_connector_state *old_conn_state = 456 drm_atomic_get_old_connector_state(state, connector); 457 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 458 struct drm_crtc_state *new_crtc_state; 459 struct drm_dp_mst_topology_mgr *mst_mgr; 460 struct drm_dp_mst_port *mst_port; 461 462 mst_port = aconnector->port; 463 mst_mgr = &aconnector->mst_port->mst_mgr; 464 465 if (!old_conn_state->crtc) 466 return 0; 467 468 if (new_conn_state->crtc) { 469 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 470 if (!new_crtc_state || 471 !drm_atomic_crtc_needs_modeset(new_crtc_state) || 472 new_crtc_state->enable) 473 return 0; 474 } 475 476 return drm_dp_atomic_release_vcpi_slots(state, 477 mst_mgr, 478 mst_port); 479 } 480 481 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { 482 .get_modes = dm_dp_mst_get_modes, 483 .mode_valid = amdgpu_dm_connector_mode_valid, 484 .atomic_best_encoder = dm_mst_atomic_best_encoder, 485 .detect_ctx = dm_dp_mst_detect, 486 .atomic_check = dm_dp_mst_atomic_check, 487 }; 488 489 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 490 { 491 drm_encoder_cleanup(encoder); 492 kfree(encoder); 493 } 494 495 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 496 .destroy = amdgpu_dm_encoder_destroy, 497 }; 498 499 void 500 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev) 501 { 502 struct drm_device *dev = adev_to_drm(adev); 503 int i; 504 505 for (i = 0; i < adev->dm.display_indexes_num; i++) { 506 struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i]; 507 struct drm_encoder *encoder = &amdgpu_encoder->base; 508 509 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 510 511 drm_encoder_init( 512 dev, 513 &amdgpu_encoder->base, 514 &amdgpu_dm_encoder_funcs, 515 DRM_MODE_ENCODER_DPMST, 516 NULL); 517 518 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); 519 } 520 } 521 522 static struct drm_connector * 523 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, 524 struct drm_dp_mst_port *port, 525 const char *pathprop) 526 { 527 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); 528 struct drm_device *dev = master->base.dev; 529 struct amdgpu_device *adev = drm_to_adev(dev); 530 struct amdgpu_dm_connector *aconnector; 531 struct drm_connector *connector; 532 int i; 533 534 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 535 if (!aconnector) 536 return NULL; 537 538 connector = &aconnector->base; 539 aconnector->port = port; 540 aconnector->mst_port = master; 541 amdgpu_dm_set_mst_status(&aconnector->mst_status, 542 MST_PROBE, true); 543 544 if (drm_connector_init( 545 dev, 546 connector, 547 &dm_dp_mst_connector_funcs, 548 DRM_MODE_CONNECTOR_DisplayPort)) { 549 kfree(aconnector); 550 return NULL; 551 } 552 drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs); 553 554 amdgpu_dm_connector_init_helper( 555 &adev->dm, 556 aconnector, 557 DRM_MODE_CONNECTOR_DisplayPort, 558 master->dc_link, 559 master->connector_id); 560 561 for (i = 0; i < adev->dm.display_indexes_num; i++) { 562 drm_connector_attach_encoder(&aconnector->base, 563 &adev->dm.mst_encoders[i].base); 564 } 565 566 connector->max_bpc_property = master->base.max_bpc_property; 567 if (connector->max_bpc_property) 568 drm_connector_attach_max_bpc_property(connector, 8, 16); 569 570 connector->vrr_capable_property = master->base.vrr_capable_property; 571 if (connector->vrr_capable_property) 572 drm_connector_attach_vrr_capable_property(connector); 573 574 drm_object_attach_property( 575 &connector->base, 576 dev->mode_config.path_property, 577 0); 578 drm_object_attach_property( 579 &connector->base, 580 dev->mode_config.tile_property, 581 0); 582 583 drm_connector_set_path_property(connector, pathprop); 584 585 /* 586 * Initialize connector state before adding the connectror to drm and 587 * framebuffer lists 588 */ 589 amdgpu_dm_connector_funcs_reset(connector); 590 591 drm_dp_mst_get_port_malloc(port); 592 593 return connector; 594 } 595 596 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { 597 .add_connector = dm_dp_add_mst_connector, 598 }; 599 600 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, 601 struct amdgpu_dm_connector *aconnector, 602 int link_index) 603 { 604 struct dc_link_settings max_link_enc_cap = {0}; 605 606 aconnector->dm_dp_aux.aux.name = 607 kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d", 608 link_index); 609 aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; 610 aconnector->dm_dp_aux.aux.drm_dev = dm->ddev; 611 aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc; 612 613 drm_dp_aux_init(&aconnector->dm_dp_aux.aux); 614 drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, 615 &aconnector->base); 616 617 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) 618 return; 619 620 dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap); 621 aconnector->mst_mgr.cbs = &dm_mst_cbs; 622 drm_dp_mst_topology_mgr_init( 623 &aconnector->mst_mgr, 624 adev_to_drm(dm->adev), 625 &aconnector->dm_dp_aux.aux, 626 16, 627 4, 628 max_link_enc_cap.lane_count, 629 drm_dp_bw_code_to_link_rate(max_link_enc_cap.link_rate), 630 aconnector->connector_id); 631 632 drm_connector_attach_dp_subconnector_property(&aconnector->base); 633 } 634 635 int dm_mst_get_pbn_divider(struct dc_link *link) 636 { 637 if (!link) 638 return 0; 639 640 return dc_link_bandwidth_kbps(link, 641 dc_link_get_link_cap(link)) / (8 * 1000 * 54); 642 } 643 644 #if defined(CONFIG_DRM_AMD_DC_DCN) 645 646 struct dsc_mst_fairness_params { 647 struct dc_crtc_timing *timing; 648 struct dc_sink *sink; 649 struct dc_dsc_bw_range bw_range; 650 bool compression_possible; 651 struct drm_dp_mst_port *port; 652 enum dsc_clock_force_state clock_force_enable; 653 uint32_t num_slices_h; 654 uint32_t num_slices_v; 655 uint32_t bpp_overwrite; 656 struct amdgpu_dm_connector *aconnector; 657 }; 658 659 static int kbps_to_peak_pbn(int kbps) 660 { 661 u64 peak_kbps = kbps; 662 663 peak_kbps *= 1006; 664 peak_kbps = div_u64(peak_kbps, 1000); 665 return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); 666 } 667 668 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, 669 struct dsc_mst_fairness_vars *vars, 670 int count, 671 int k) 672 { 673 int i; 674 675 for (i = 0; i < count; i++) { 676 memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); 677 if (vars[i + k].dsc_enabled && dc_dsc_compute_config( 678 params[i].sink->ctx->dc->res_pool->dscs[0], 679 ¶ms[i].sink->dsc_caps.dsc_dec_caps, 680 params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, 681 params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit, 682 0, 683 params[i].timing, 684 ¶ms[i].timing->dsc_cfg)) { 685 params[i].timing->flags.DSC = 1; 686 687 if (params[i].bpp_overwrite) 688 params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite; 689 else 690 params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16; 691 692 if (params[i].num_slices_h) 693 params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h; 694 695 if (params[i].num_slices_v) 696 params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v; 697 } else { 698 params[i].timing->flags.DSC = 0; 699 } 700 params[i].timing->dsc_cfg.mst_pbn = vars[i + k].pbn; 701 } 702 703 for (i = 0; i < count; i++) { 704 if (params[i].sink) { 705 if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL && 706 params[i].sink->sink_signal != SIGNAL_TYPE_NONE) 707 DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i, 708 params[i].sink->edid_caps.display_name); 709 } 710 711 DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n", 712 params[i].timing->flags.DSC, 713 params[i].timing->dsc_cfg.bits_per_pixel, 714 vars[i + k].pbn); 715 } 716 } 717 718 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) 719 { 720 struct dc_dsc_config dsc_config; 721 u64 kbps; 722 723 kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); 724 dc_dsc_compute_config( 725 param.sink->ctx->dc->res_pool->dscs[0], 726 ¶m.sink->dsc_caps.dsc_dec_caps, 727 param.sink->ctx->dc->debug.dsc_min_slice_height_override, 728 param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit, 729 (int) kbps, param.timing, &dsc_config); 730 731 return dsc_config.bits_per_pixel; 732 } 733 734 static bool increase_dsc_bpp(struct drm_atomic_state *state, 735 struct dc_link *dc_link, 736 struct dsc_mst_fairness_params *params, 737 struct dsc_mst_fairness_vars *vars, 738 int count, 739 int k) 740 { 741 int i; 742 bool bpp_increased[MAX_PIPES]; 743 int initial_slack[MAX_PIPES]; 744 int min_initial_slack; 745 int next_index; 746 int remaining_to_increase = 0; 747 int pbn_per_timeslot; 748 int link_timeslots_used; 749 int fair_pbn_alloc; 750 751 pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link); 752 753 for (i = 0; i < count; i++) { 754 if (vars[i + k].dsc_enabled) { 755 initial_slack[i] = 756 kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn; 757 bpp_increased[i] = false; 758 remaining_to_increase += 1; 759 } else { 760 initial_slack[i] = 0; 761 bpp_increased[i] = true; 762 } 763 } 764 765 while (remaining_to_increase) { 766 next_index = -1; 767 min_initial_slack = -1; 768 for (i = 0; i < count; i++) { 769 if (!bpp_increased[i]) { 770 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) { 771 min_initial_slack = initial_slack[i]; 772 next_index = i; 773 } 774 } 775 } 776 777 if (next_index == -1) 778 break; 779 780 link_timeslots_used = 0; 781 782 for (i = 0; i < count; i++) 783 link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot); 784 785 fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot; 786 787 if (initial_slack[next_index] > fair_pbn_alloc) { 788 vars[next_index].pbn += fair_pbn_alloc; 789 if (drm_dp_atomic_find_vcpi_slots(state, 790 params[next_index].port->mgr, 791 params[next_index].port, 792 vars[next_index].pbn, 793 pbn_per_timeslot) < 0) 794 return false; 795 if (!drm_dp_mst_atomic_check(state)) { 796 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); 797 } else { 798 vars[next_index].pbn -= fair_pbn_alloc; 799 if (drm_dp_atomic_find_vcpi_slots(state, 800 params[next_index].port->mgr, 801 params[next_index].port, 802 vars[next_index].pbn, 803 pbn_per_timeslot) < 0) 804 return false; 805 } 806 } else { 807 vars[next_index].pbn += initial_slack[next_index]; 808 if (drm_dp_atomic_find_vcpi_slots(state, 809 params[next_index].port->mgr, 810 params[next_index].port, 811 vars[next_index].pbn, 812 pbn_per_timeslot) < 0) 813 return false; 814 if (!drm_dp_mst_atomic_check(state)) { 815 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; 816 } else { 817 vars[next_index].pbn -= initial_slack[next_index]; 818 if (drm_dp_atomic_find_vcpi_slots(state, 819 params[next_index].port->mgr, 820 params[next_index].port, 821 vars[next_index].pbn, 822 pbn_per_timeslot) < 0) 823 return false; 824 } 825 } 826 827 bpp_increased[next_index] = true; 828 remaining_to_increase--; 829 } 830 return true; 831 } 832 833 static bool try_disable_dsc(struct drm_atomic_state *state, 834 struct dc_link *dc_link, 835 struct dsc_mst_fairness_params *params, 836 struct dsc_mst_fairness_vars *vars, 837 int count, 838 int k) 839 { 840 int i; 841 bool tried[MAX_PIPES]; 842 int kbps_increase[MAX_PIPES]; 843 int max_kbps_increase; 844 int next_index; 845 int remaining_to_try = 0; 846 847 for (i = 0; i < count; i++) { 848 if (vars[i + k].dsc_enabled 849 && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16 850 && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { 851 kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; 852 tried[i] = false; 853 remaining_to_try += 1; 854 } else { 855 kbps_increase[i] = 0; 856 tried[i] = true; 857 } 858 } 859 860 while (remaining_to_try) { 861 next_index = -1; 862 max_kbps_increase = -1; 863 for (i = 0; i < count; i++) { 864 if (!tried[i]) { 865 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) { 866 max_kbps_increase = kbps_increase[i]; 867 next_index = i; 868 } 869 } 870 } 871 872 if (next_index == -1) 873 break; 874 875 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); 876 if (drm_dp_atomic_find_vcpi_slots(state, 877 params[next_index].port->mgr, 878 params[next_index].port, 879 vars[next_index].pbn, 880 dm_mst_get_pbn_divider(dc_link)) < 0) 881 return false; 882 883 if (!drm_dp_mst_atomic_check(state)) { 884 vars[next_index].dsc_enabled = false; 885 vars[next_index].bpp_x16 = 0; 886 } else { 887 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps); 888 if (drm_dp_atomic_find_vcpi_slots(state, 889 params[next_index].port->mgr, 890 params[next_index].port, 891 vars[next_index].pbn, 892 dm_mst_get_pbn_divider(dc_link)) < 0) 893 return false; 894 } 895 896 tried[next_index] = true; 897 remaining_to_try--; 898 } 899 return true; 900 } 901 902 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, 903 struct dc_state *dc_state, 904 struct dc_link *dc_link, 905 struct dsc_mst_fairness_vars *vars, 906 int *link_vars_start_index) 907 { 908 int i, k; 909 struct dc_stream_state *stream; 910 struct dsc_mst_fairness_params params[MAX_PIPES]; 911 struct amdgpu_dm_connector *aconnector; 912 int count = 0; 913 bool debugfs_overwrite = false; 914 915 memset(params, 0, sizeof(params)); 916 917 /* Set up params */ 918 for (i = 0; i < dc_state->stream_count; i++) { 919 struct dc_dsc_policy dsc_policy = {0}; 920 921 stream = dc_state->streams[i]; 922 923 if (stream->link != dc_link) 924 continue; 925 926 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 927 if (!aconnector) 928 continue; 929 930 if (!aconnector->port) 931 continue; 932 933 stream->timing.flags.DSC = 0; 934 935 params[count].timing = &stream->timing; 936 params[count].sink = stream->sink; 937 params[count].aconnector = aconnector; 938 params[count].port = aconnector->port; 939 params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; 940 if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) 941 debugfs_overwrite = true; 942 params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 943 params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 944 params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; 945 params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; 946 dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy); 947 if (!dc_dsc_compute_bandwidth_range( 948 stream->sink->ctx->dc->res_pool->dscs[0], 949 stream->sink->ctx->dc->debug.dsc_min_slice_height_override, 950 dsc_policy.min_target_bpp * 16, 951 dsc_policy.max_target_bpp * 16, 952 &stream->sink->dsc_caps.dsc_dec_caps, 953 &stream->timing, ¶ms[count].bw_range)) 954 params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); 955 956 count++; 957 } 958 959 if (count == 0) { 960 ASSERT(0); 961 return true; 962 } 963 964 /* k is start index of vars for current phy link used by mst hub */ 965 k = *link_vars_start_index; 966 /* set vars start index for next mst hub phy link */ 967 *link_vars_start_index += count; 968 969 /* Try no compression */ 970 for (i = 0; i < count; i++) { 971 vars[i + k].aconnector = params[i].aconnector; 972 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); 973 vars[i + k].dsc_enabled = false; 974 vars[i + k].bpp_x16 = 0; 975 if (drm_dp_atomic_find_vcpi_slots(state, 976 params[i].port->mgr, 977 params[i].port, 978 vars[i + k].pbn, 979 dm_mst_get_pbn_divider(dc_link)) < 0) 980 return false; 981 } 982 if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { 983 set_dsc_configs_from_fairness_vars(params, vars, count, k); 984 return true; 985 } 986 987 /* Try max compression */ 988 for (i = 0; i < count; i++) { 989 if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { 990 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); 991 vars[i + k].dsc_enabled = true; 992 vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; 993 if (drm_dp_atomic_find_vcpi_slots(state, 994 params[i].port->mgr, 995 params[i].port, 996 vars[i + k].pbn, 997 dm_mst_get_pbn_divider(dc_link)) < 0) 998 return false; 999 } else { 1000 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); 1001 vars[i + k].dsc_enabled = false; 1002 vars[i + k].bpp_x16 = 0; 1003 if (drm_dp_atomic_find_vcpi_slots(state, 1004 params[i].port->mgr, 1005 params[i].port, 1006 vars[i + k].pbn, 1007 dm_mst_get_pbn_divider(dc_link)) < 0) 1008 return false; 1009 } 1010 } 1011 if (drm_dp_mst_atomic_check(state)) 1012 return false; 1013 1014 /* Optimize degree of compression */ 1015 if (!increase_dsc_bpp(state, dc_link, params, vars, count, k)) 1016 return false; 1017 1018 if (!try_disable_dsc(state, dc_link, params, vars, count, k)) 1019 return false; 1020 1021 set_dsc_configs_from_fairness_vars(params, vars, count, k); 1022 1023 return true; 1024 } 1025 1026 static bool is_dsc_need_re_compute( 1027 struct drm_atomic_state *state, 1028 struct dc_state *dc_state, 1029 struct dc_link *dc_link) 1030 { 1031 int i, j; 1032 bool is_dsc_need_re_compute = false; 1033 struct amdgpu_dm_connector *stream_on_link[MAX_PIPES]; 1034 int new_stream_on_link_num = 0; 1035 struct amdgpu_dm_connector *aconnector; 1036 struct dc_stream_state *stream; 1037 const struct dc *dc = dc_link->dc; 1038 1039 /* only check phy used by dsc mst branch */ 1040 if (dc_link->type != dc_connection_mst_branch) 1041 return false; 1042 1043 if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || 1044 dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) 1045 return false; 1046 1047 for (i = 0; i < MAX_PIPES; i++) 1048 stream_on_link[i] = NULL; 1049 1050 /* check if there is mode change in new request */ 1051 for (i = 0; i < dc_state->stream_count; i++) { 1052 struct drm_crtc_state *new_crtc_state; 1053 struct drm_connector_state *new_conn_state; 1054 1055 stream = dc_state->streams[i]; 1056 if (!stream) 1057 continue; 1058 1059 /* check if stream using the same link for mst */ 1060 if (stream->link != dc_link) 1061 continue; 1062 1063 aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; 1064 if (!aconnector) 1065 continue; 1066 1067 stream_on_link[new_stream_on_link_num] = aconnector; 1068 new_stream_on_link_num++; 1069 1070 new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); 1071 if (!new_conn_state) 1072 continue; 1073 1074 if (IS_ERR(new_conn_state)) 1075 continue; 1076 1077 if (!new_conn_state->crtc) 1078 continue; 1079 1080 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 1081 if (!new_crtc_state) 1082 continue; 1083 1084 if (IS_ERR(new_crtc_state)) 1085 continue; 1086 1087 if (new_crtc_state->enable && new_crtc_state->active) { 1088 if (new_crtc_state->mode_changed || new_crtc_state->active_changed || 1089 new_crtc_state->connectors_changed) 1090 return true; 1091 } 1092 } 1093 1094 /* check current_state if there stream on link but it is not in 1095 * new request state 1096 */ 1097 for (i = 0; i < dc->current_state->stream_count; i++) { 1098 stream = dc->current_state->streams[i]; 1099 /* only check stream on the mst hub */ 1100 if (stream->link != dc_link) 1101 continue; 1102 1103 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 1104 if (!aconnector) 1105 continue; 1106 1107 for (j = 0; j < new_stream_on_link_num; j++) { 1108 if (stream_on_link[j]) { 1109 if (aconnector == stream_on_link[j]) 1110 break; 1111 } 1112 } 1113 1114 if (j == new_stream_on_link_num) { 1115 /* not in new state */ 1116 is_dsc_need_re_compute = true; 1117 break; 1118 } 1119 } 1120 1121 return is_dsc_need_re_compute; 1122 } 1123 1124 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 1125 struct dc_state *dc_state, 1126 struct dsc_mst_fairness_vars *vars) 1127 { 1128 int i, j; 1129 struct dc_stream_state *stream; 1130 bool computed_streams[MAX_PIPES]; 1131 struct amdgpu_dm_connector *aconnector; 1132 int link_vars_start_index = 0; 1133 1134 for (i = 0; i < dc_state->stream_count; i++) 1135 computed_streams[i] = false; 1136 1137 for (i = 0; i < dc_state->stream_count; i++) { 1138 stream = dc_state->streams[i]; 1139 1140 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) 1141 continue; 1142 1143 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 1144 1145 if (!aconnector || !aconnector->dc_sink) 1146 continue; 1147 1148 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) 1149 continue; 1150 1151 if (computed_streams[i]) 1152 continue; 1153 1154 if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) 1155 return false; 1156 1157 if (!is_dsc_need_re_compute(state, dc_state, stream->link)) 1158 continue; 1159 1160 mutex_lock(&aconnector->mst_mgr.lock); 1161 if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, 1162 vars, &link_vars_start_index)) { 1163 mutex_unlock(&aconnector->mst_mgr.lock); 1164 return false; 1165 } 1166 mutex_unlock(&aconnector->mst_mgr.lock); 1167 1168 for (j = 0; j < dc_state->stream_count; j++) { 1169 if (dc_state->streams[j]->link == stream->link) 1170 computed_streams[j] = true; 1171 } 1172 } 1173 1174 for (i = 0; i < dc_state->stream_count; i++) { 1175 stream = dc_state->streams[i]; 1176 1177 if (stream->timing.flags.DSC == 1) 1178 if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) 1179 return false; 1180 } 1181 1182 return true; 1183 } 1184 1185 static bool 1186 pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 1187 struct dc_state *dc_state, 1188 struct dsc_mst_fairness_vars *vars) 1189 { 1190 int i, j; 1191 struct dc_stream_state *stream; 1192 bool computed_streams[MAX_PIPES]; 1193 struct amdgpu_dm_connector *aconnector; 1194 int link_vars_start_index = 0; 1195 1196 for (i = 0; i < dc_state->stream_count; i++) 1197 computed_streams[i] = false; 1198 1199 for (i = 0; i < dc_state->stream_count; i++) { 1200 stream = dc_state->streams[i]; 1201 1202 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) 1203 continue; 1204 1205 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 1206 1207 if (!aconnector || !aconnector->dc_sink) 1208 continue; 1209 1210 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) 1211 continue; 1212 1213 if (computed_streams[i]) 1214 continue; 1215 1216 if (!is_dsc_need_re_compute(state, dc_state, stream->link)) 1217 continue; 1218 1219 mutex_lock(&aconnector->mst_mgr.lock); 1220 if (!compute_mst_dsc_configs_for_link(state, 1221 dc_state, 1222 stream->link, 1223 vars, 1224 &link_vars_start_index)) { 1225 mutex_unlock(&aconnector->mst_mgr.lock); 1226 return false; 1227 } 1228 mutex_unlock(&aconnector->mst_mgr.lock); 1229 1230 for (j = 0; j < dc_state->stream_count; j++) { 1231 if (dc_state->streams[j]->link == stream->link) 1232 computed_streams[j] = true; 1233 } 1234 } 1235 1236 return true; 1237 } 1238 1239 static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state, 1240 struct dc_stream_state *stream) 1241 { 1242 int i; 1243 struct drm_crtc *crtc; 1244 struct drm_crtc_state *new_state, *old_state; 1245 1246 for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, i) { 1247 struct dm_crtc_state *dm_state = to_dm_crtc_state(new_state); 1248 1249 if (dm_state->stream == stream) 1250 return i; 1251 } 1252 return -1; 1253 } 1254 1255 static bool is_link_to_dschub(struct dc_link *dc_link) 1256 { 1257 union dpcd_dsc_basic_capabilities *dsc_caps = 1258 &dc_link->dpcd_caps.dsc_caps.dsc_basic_caps; 1259 1260 /* only check phy used by dsc mst branch */ 1261 if (dc_link->type != dc_connection_mst_branch) 1262 return false; 1263 1264 if (!(dsc_caps->fields.dsc_support.DSC_SUPPORT || 1265 dsc_caps->fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) 1266 return false; 1267 return true; 1268 } 1269 1270 static bool is_dsc_precompute_needed(struct drm_atomic_state *state) 1271 { 1272 int i; 1273 struct drm_crtc *crtc; 1274 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1275 bool ret = false; 1276 1277 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1278 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(new_crtc_state); 1279 1280 if (!amdgpu_dm_find_first_crtc_matching_connector(state, crtc)) { 1281 ret = false; 1282 break; 1283 } 1284 if (dm_crtc_state->stream && dm_crtc_state->stream->link) 1285 if (is_link_to_dschub(dm_crtc_state->stream->link)) 1286 ret = true; 1287 } 1288 return ret; 1289 } 1290 1291 bool pre_validate_dsc(struct drm_atomic_state *state, 1292 struct dm_atomic_state **dm_state_ptr, 1293 struct dsc_mst_fairness_vars *vars) 1294 { 1295 int i; 1296 struct dm_atomic_state *dm_state; 1297 struct dc_state *local_dc_state = NULL; 1298 int ret = 0; 1299 1300 if (!is_dsc_precompute_needed(state)) { 1301 DRM_INFO_ONCE("DSC precompute is not needed.\n"); 1302 return true; 1303 } 1304 if (dm_atomic_get_state(state, dm_state_ptr)) { 1305 DRM_INFO_ONCE("dm_atomic_get_state() failed\n"); 1306 return false; 1307 } 1308 dm_state = *dm_state_ptr; 1309 1310 /* 1311 * create local vailable for dc_state. copy content of streams of dm_state->context 1312 * to local variable. make sure stream pointer of local variable not the same as stream 1313 * from dm_state->context. 1314 */ 1315 1316 local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL); 1317 if (!local_dc_state) 1318 return false; 1319 1320 for (i = 0; i < local_dc_state->stream_count; i++) { 1321 struct dc_stream_state *stream = dm_state->context->streams[i]; 1322 int ind = find_crtc_index_in_state_by_stream(state, stream); 1323 1324 if (ind >= 0) { 1325 struct amdgpu_dm_connector *aconnector; 1326 struct drm_connector_state *drm_new_conn_state; 1327 struct dm_connector_state *dm_new_conn_state; 1328 struct dm_crtc_state *dm_old_crtc_state; 1329 1330 aconnector = 1331 amdgpu_dm_find_first_crtc_matching_connector(state, 1332 state->crtcs[ind].ptr); 1333 drm_new_conn_state = 1334 drm_atomic_get_new_connector_state(state, 1335 &aconnector->base); 1336 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 1337 dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state); 1338 1339 local_dc_state->streams[i] = 1340 create_validate_stream_for_sink(aconnector, 1341 &state->crtcs[ind].new_state->mode, 1342 dm_new_conn_state, 1343 dm_old_crtc_state->stream); 1344 if (local_dc_state->streams[i] == NULL) { 1345 ret = -EINVAL; 1346 break; 1347 } 1348 } 1349 } 1350 1351 if (ret != 0) 1352 goto clean_exit; 1353 1354 if (!pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars)) { 1355 DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n"); 1356 ret = -EINVAL; 1357 goto clean_exit; 1358 } 1359 1360 /* 1361 * compare local_streams -> timing with dm_state->context, 1362 * if the same set crtc_state->mode-change = 0; 1363 */ 1364 for (i = 0; i < local_dc_state->stream_count; i++) { 1365 struct dc_stream_state *stream = dm_state->context->streams[i]; 1366 1367 if (local_dc_state->streams[i] && 1368 is_timing_changed(stream, local_dc_state->streams[i])) { 1369 DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i); 1370 } else { 1371 int ind = find_crtc_index_in_state_by_stream(state, stream); 1372 1373 if (ind >= 0) 1374 state->crtcs[ind].new_state->mode_changed = 0; 1375 } 1376 } 1377 clean_exit: 1378 for (i = 0; i < local_dc_state->stream_count; i++) { 1379 struct dc_stream_state *stream = dm_state->context->streams[i]; 1380 1381 if (local_dc_state->streams[i] != stream) 1382 dc_stream_release(local_dc_state->streams[i]); 1383 } 1384 1385 kfree(local_dc_state); 1386 1387 return (ret == 0); 1388 } 1389 1390 static unsigned int kbps_from_pbn(unsigned int pbn) 1391 { 1392 unsigned int kbps = pbn; 1393 1394 kbps *= (1000000 / PEAK_FACTOR_X1000); 1395 kbps *= 8; 1396 kbps *= 54; 1397 kbps /= 64; 1398 1399 return kbps; 1400 } 1401 1402 static bool is_dsc_common_config_possible(struct dc_stream_state *stream, 1403 struct dc_dsc_bw_range *bw_range) 1404 { 1405 struct dc_dsc_policy dsc_policy = {0}; 1406 1407 dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy); 1408 dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], 1409 stream->sink->ctx->dc->debug.dsc_min_slice_height_override, 1410 dsc_policy.min_target_bpp * 16, 1411 dsc_policy.max_target_bpp * 16, 1412 &stream->sink->dsc_caps.dsc_dec_caps, 1413 &stream->timing, bw_range); 1414 1415 return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16; 1416 } 1417 #endif /* CONFIG_DRM_AMD_DC_DCN */ 1418 1419 enum dc_status dm_dp_mst_is_port_support_mode( 1420 struct amdgpu_dm_connector *aconnector, 1421 struct dc_stream_state *stream) 1422 { 1423 int bpp, pbn, branch_max_throughput_mps = 0; 1424 #if defined(CONFIG_DRM_AMD_DC_DCN) 1425 struct dc_link_settings cur_link_settings; 1426 unsigned int end_to_end_bw_in_kbps = 0; 1427 unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; 1428 unsigned int max_compressed_bw_in_kbps = 0; 1429 struct dc_dsc_bw_range bw_range = {0}; 1430 1431 /* 1432 * check if the mode could be supported if DSC pass-through is supported 1433 * AND check if there enough bandwidth available to support the mode 1434 * with DSC enabled. 1435 */ 1436 if (is_dsc_common_config_possible(stream, &bw_range) && 1437 aconnector->port->passthrough_aux) { 1438 mutex_lock(&aconnector->mst_mgr.lock); 1439 1440 cur_link_settings = stream->link->verified_link_cap; 1441 1442 upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 1443 &cur_link_settings 1444 ); 1445 down_link_bw_in_kbps = kbps_from_pbn(aconnector->port->full_pbn); 1446 1447 /* pick the bottleneck */ 1448 end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, 1449 down_link_bw_in_kbps); 1450 1451 mutex_unlock(&aconnector->mst_mgr.lock); 1452 1453 /* 1454 * use the maximum dsc compression bandwidth as the required 1455 * bandwidth for the mode 1456 */ 1457 max_compressed_bw_in_kbps = bw_range.min_kbps; 1458 1459 if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) { 1460 DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n"); 1461 return DC_FAIL_BANDWIDTH_VALIDATE; 1462 } 1463 } else { 1464 #endif 1465 /* check if mode could be supported within full_pbn */ 1466 bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; 1467 pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false); 1468 1469 if (pbn > aconnector->port->full_pbn) 1470 return DC_FAIL_BANDWIDTH_VALIDATE; 1471 #if defined(CONFIG_DRM_AMD_DC_DCN) 1472 } 1473 #endif 1474 1475 /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ 1476 switch (stream->timing.pixel_encoding) { 1477 case PIXEL_ENCODING_RGB: 1478 case PIXEL_ENCODING_YCBCR444: 1479 branch_max_throughput_mps = 1480 aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps; 1481 break; 1482 case PIXEL_ENCODING_YCBCR422: 1483 case PIXEL_ENCODING_YCBCR420: 1484 branch_max_throughput_mps = 1485 aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps; 1486 break; 1487 default: 1488 break; 1489 } 1490 1491 if (branch_max_throughput_mps != 0 && 1492 ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) 1493 return DC_FAIL_BANDWIDTH_VALIDATE; 1494 1495 return DC_OK; 1496 } 1497