1 /* 2 * Copyright 2012-15 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <drm/drm_atomic.h> 27 #include <drm/drm_atomic_helper.h> 28 #include <drm/dp/drm_dp_mst_helper.h> 29 #include <drm/dp/drm_dp_helper.h> 30 #include "dm_services.h" 31 #include "amdgpu.h" 32 #include "amdgpu_dm.h" 33 #include "amdgpu_dm_mst_types.h" 34 35 #include "dc.h" 36 #include "dm_helpers.h" 37 38 #include "dc_link_ddc.h" 39 #include "ddc_service_types.h" 40 #include "dpcd_defs.h" 41 42 #include "i2caux_interface.h" 43 #include "dmub_cmd.h" 44 #if defined(CONFIG_DEBUG_FS) 45 #include "amdgpu_dm_debugfs.h" 46 #endif 47 48 #if defined(CONFIG_DRM_AMD_DC_DCN) 49 #include "dc/dcn20/dcn20_resource.h" 50 bool is_timing_changed(struct dc_stream_state *cur_stream, 51 struct dc_stream_state *new_stream); 52 53 #endif 54 55 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, 56 struct drm_dp_aux_msg *msg) 57 { 58 ssize_t result = 0; 59 struct aux_payload payload; 60 enum aux_return_code_type operation_result; 61 62 if (WARN_ON(msg->size > 16)) 63 return -E2BIG; 64 65 payload.address = msg->address; 66 payload.data = msg->buffer; 67 payload.length = msg->size; 68 payload.reply = &msg->reply; 69 payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0; 70 payload.write = (msg->request & DP_AUX_I2C_READ) == 0; 71 payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0; 72 payload.write_status_update = 73 (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; 74 payload.defer_delay = 0; 75 76 result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, 77 &operation_result); 78 79 if (payload.write && result >= 0) 80 result = msg->size; 81 82 if (result < 0) 83 switch (operation_result) { 84 case AUX_RET_SUCCESS: 85 break; 86 case AUX_RET_ERROR_HPD_DISCON: 87 case AUX_RET_ERROR_UNKNOWN: 88 case AUX_RET_ERROR_INVALID_OPERATION: 89 case AUX_RET_ERROR_PROTOCOL_ERROR: 90 result = -EIO; 91 break; 92 case AUX_RET_ERROR_INVALID_REPLY: 93 case AUX_RET_ERROR_ENGINE_ACQUIRE: 94 result = -EBUSY; 95 break; 96 case AUX_RET_ERROR_TIMEOUT: 97 result = -ETIMEDOUT; 98 break; 99 } 100 101 return result; 102 } 103 104 static void 105 dm_dp_mst_connector_destroy(struct drm_connector *connector) 106 { 107 struct amdgpu_dm_connector *aconnector = 108 to_amdgpu_dm_connector(connector); 109 110 if (aconnector->dc_sink) { 111 dc_link_remove_remote_sink(aconnector->dc_link, 112 aconnector->dc_sink); 113 dc_sink_release(aconnector->dc_sink); 114 } 115 116 kfree(aconnector->edid); 117 118 drm_connector_cleanup(connector); 119 drm_dp_mst_put_port_malloc(aconnector->port); 120 kfree(aconnector); 121 } 122 123 static int 124 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) 125 { 126 struct amdgpu_dm_connector *amdgpu_dm_connector = 127 to_amdgpu_dm_connector(connector); 128 int r; 129 130 r = drm_dp_mst_connector_late_register(connector, 131 amdgpu_dm_connector->port); 132 if (r < 0) 133 return r; 134 135 #if defined(CONFIG_DEBUG_FS) 136 connector_debugfs_init(amdgpu_dm_connector); 137 #endif 138 139 return 0; 140 } 141 142 static void 143 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) 144 { 145 struct amdgpu_dm_connector *amdgpu_dm_connector = 146 to_amdgpu_dm_connector(connector); 147 struct drm_dp_mst_port *port = amdgpu_dm_connector->port; 148 149 drm_dp_mst_connector_early_unregister(connector, port); 150 } 151 152 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { 153 .fill_modes = drm_helper_probe_single_connector_modes, 154 .destroy = dm_dp_mst_connector_destroy, 155 .reset = amdgpu_dm_connector_funcs_reset, 156 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 157 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 158 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 159 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 160 .late_register = amdgpu_dm_mst_connector_late_register, 161 .early_unregister = amdgpu_dm_mst_connector_early_unregister, 162 }; 163 164 #if defined(CONFIG_DRM_AMD_DC_DCN) 165 bool needs_dsc_aux_workaround(struct dc_link *link) 166 { 167 if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && 168 (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && 169 link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) 170 return true; 171 172 return false; 173 } 174 175 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) 176 { 177 struct dc_sink *dc_sink = aconnector->dc_sink; 178 struct drm_dp_mst_port *port = aconnector->port; 179 u8 dsc_caps[16] = { 0 }; 180 u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2 181 u8 *dsc_branch_dec_caps = NULL; 182 183 aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); 184 185 /* 186 * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs 187 * because it only check the dsc/fec caps of the "port variable" and not the dock 188 * 189 * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display 190 * 191 * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux 192 * 193 */ 194 if (!aconnector->dsc_aux && !port->parent->port_parent && 195 needs_dsc_aux_workaround(aconnector->dc_link)) 196 aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; 197 198 if (!aconnector->dsc_aux) 199 return false; 200 201 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0) 202 return false; 203 204 if (drm_dp_dpcd_read(aconnector->dsc_aux, 205 DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, dsc_branch_dec_caps_raw, 3) == 3) 206 dsc_branch_dec_caps = dsc_branch_dec_caps_raw; 207 208 if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 209 dsc_caps, dsc_branch_dec_caps, 210 &dc_sink->dsc_caps.dsc_dec_caps)) 211 return false; 212 213 return true; 214 } 215 216 static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector) 217 { 218 union dp_downstream_port_present ds_port_present; 219 220 if (!aconnector->dsc_aux) 221 return false; 222 223 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, &ds_port_present, 1) < 0) { 224 DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n"); 225 return false; 226 } 227 228 aconnector->mst_downstream_port_present = ds_port_present; 229 DRM_INFO("Downstream port present %d, type %d\n", 230 ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE); 231 232 return true; 233 } 234 #endif 235 236 static int dm_dp_mst_get_modes(struct drm_connector *connector) 237 { 238 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 239 int ret = 0; 240 241 if (!aconnector) 242 return drm_add_edid_modes(connector, NULL); 243 244 if (!aconnector->edid) { 245 struct edid *edid; 246 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 247 248 if (!edid) { 249 drm_connector_update_edid_property( 250 &aconnector->base, 251 NULL); 252 253 DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name); 254 if (!aconnector->dc_sink) { 255 struct dc_sink *dc_sink; 256 struct dc_sink_init_data init_params = { 257 .link = aconnector->dc_link, 258 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 259 260 dc_sink = dc_link_add_remote_sink( 261 aconnector->dc_link, 262 NULL, 263 0, 264 &init_params); 265 266 if (!dc_sink) { 267 DRM_ERROR("Unable to add a remote sink\n"); 268 return 0; 269 } 270 271 dc_sink->priv = aconnector; 272 aconnector->dc_sink = dc_sink; 273 } 274 275 return ret; 276 } 277 278 aconnector->edid = edid; 279 } 280 281 if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) { 282 dc_sink_release(aconnector->dc_sink); 283 aconnector->dc_sink = NULL; 284 } 285 286 if (!aconnector->dc_sink) { 287 struct dc_sink *dc_sink; 288 struct dc_sink_init_data init_params = { 289 .link = aconnector->dc_link, 290 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 291 dc_sink = dc_link_add_remote_sink( 292 aconnector->dc_link, 293 (uint8_t *)aconnector->edid, 294 (aconnector->edid->extensions + 1) * EDID_LENGTH, 295 &init_params); 296 297 if (!dc_sink) { 298 DRM_ERROR("Unable to add a remote sink\n"); 299 return 0; 300 } 301 302 dc_sink->priv = aconnector; 303 /* dc_link_add_remote_sink returns a new reference */ 304 aconnector->dc_sink = dc_sink; 305 306 if (aconnector->dc_sink) { 307 amdgpu_dm_update_freesync_caps( 308 connector, aconnector->edid); 309 310 #if defined(CONFIG_DRM_AMD_DC_DCN) 311 if (!validate_dsc_caps_on_connector(aconnector)) 312 memset(&aconnector->dc_sink->dsc_caps, 313 0, sizeof(aconnector->dc_sink->dsc_caps)); 314 315 if (!retrieve_downstream_port_device(aconnector)) 316 memset(&aconnector->mst_downstream_port_present, 317 0, sizeof(aconnector->mst_downstream_port_present)); 318 #endif 319 } 320 } 321 322 drm_connector_update_edid_property( 323 &aconnector->base, aconnector->edid); 324 325 ret = drm_add_edid_modes(connector, aconnector->edid); 326 327 return ret; 328 } 329 330 static struct drm_encoder * 331 dm_mst_atomic_best_encoder(struct drm_connector *connector, 332 struct drm_atomic_state *state) 333 { 334 struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, 335 connector); 336 struct drm_device *dev = connector->dev; 337 struct amdgpu_device *adev = drm_to_adev(dev); 338 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); 339 340 return &adev->dm.mst_encoders[acrtc->crtc_id].base; 341 } 342 343 static int 344 dm_dp_mst_detect(struct drm_connector *connector, 345 struct drm_modeset_acquire_ctx *ctx, bool force) 346 { 347 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 348 struct amdgpu_dm_connector *master = aconnector->mst_port; 349 350 if (drm_connector_is_unregistered(connector)) 351 return connector_status_disconnected; 352 353 return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, 354 aconnector->port); 355 } 356 357 static int dm_dp_mst_atomic_check(struct drm_connector *connector, 358 struct drm_atomic_state *state) 359 { 360 struct drm_connector_state *new_conn_state = 361 drm_atomic_get_new_connector_state(state, connector); 362 struct drm_connector_state *old_conn_state = 363 drm_atomic_get_old_connector_state(state, connector); 364 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 365 struct drm_crtc_state *new_crtc_state; 366 struct drm_dp_mst_topology_mgr *mst_mgr; 367 struct drm_dp_mst_port *mst_port; 368 369 mst_port = aconnector->port; 370 mst_mgr = &aconnector->mst_port->mst_mgr; 371 372 if (!old_conn_state->crtc) 373 return 0; 374 375 if (new_conn_state->crtc) { 376 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 377 if (!new_crtc_state || 378 !drm_atomic_crtc_needs_modeset(new_crtc_state) || 379 new_crtc_state->enable) 380 return 0; 381 } 382 383 return drm_dp_atomic_release_vcpi_slots(state, 384 mst_mgr, 385 mst_port); 386 } 387 388 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { 389 .get_modes = dm_dp_mst_get_modes, 390 .mode_valid = amdgpu_dm_connector_mode_valid, 391 .atomic_best_encoder = dm_mst_atomic_best_encoder, 392 .detect_ctx = dm_dp_mst_detect, 393 .atomic_check = dm_dp_mst_atomic_check, 394 }; 395 396 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 397 { 398 drm_encoder_cleanup(encoder); 399 kfree(encoder); 400 } 401 402 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 403 .destroy = amdgpu_dm_encoder_destroy, 404 }; 405 406 void 407 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev) 408 { 409 struct drm_device *dev = adev_to_drm(adev); 410 int i; 411 412 for (i = 0; i < adev->dm.display_indexes_num; i++) { 413 struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i]; 414 struct drm_encoder *encoder = &amdgpu_encoder->base; 415 416 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 417 418 drm_encoder_init( 419 dev, 420 &amdgpu_encoder->base, 421 &amdgpu_dm_encoder_funcs, 422 DRM_MODE_ENCODER_DPMST, 423 NULL); 424 425 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); 426 } 427 } 428 429 static struct drm_connector * 430 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, 431 struct drm_dp_mst_port *port, 432 const char *pathprop) 433 { 434 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); 435 struct drm_device *dev = master->base.dev; 436 struct amdgpu_device *adev = drm_to_adev(dev); 437 struct amdgpu_dm_connector *aconnector; 438 struct drm_connector *connector; 439 int i; 440 441 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 442 if (!aconnector) 443 return NULL; 444 445 connector = &aconnector->base; 446 aconnector->port = port; 447 aconnector->mst_port = master; 448 449 if (drm_connector_init( 450 dev, 451 connector, 452 &dm_dp_mst_connector_funcs, 453 DRM_MODE_CONNECTOR_DisplayPort)) { 454 kfree(aconnector); 455 return NULL; 456 } 457 drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs); 458 459 amdgpu_dm_connector_init_helper( 460 &adev->dm, 461 aconnector, 462 DRM_MODE_CONNECTOR_DisplayPort, 463 master->dc_link, 464 master->connector_id); 465 466 for (i = 0; i < adev->dm.display_indexes_num; i++) { 467 drm_connector_attach_encoder(&aconnector->base, 468 &adev->dm.mst_encoders[i].base); 469 } 470 471 connector->max_bpc_property = master->base.max_bpc_property; 472 if (connector->max_bpc_property) 473 drm_connector_attach_max_bpc_property(connector, 8, 16); 474 475 connector->vrr_capable_property = master->base.vrr_capable_property; 476 if (connector->vrr_capable_property) 477 drm_connector_attach_vrr_capable_property(connector); 478 479 drm_object_attach_property( 480 &connector->base, 481 dev->mode_config.path_property, 482 0); 483 drm_object_attach_property( 484 &connector->base, 485 dev->mode_config.tile_property, 486 0); 487 488 drm_connector_set_path_property(connector, pathprop); 489 490 /* 491 * Initialize connector state before adding the connectror to drm and 492 * framebuffer lists 493 */ 494 amdgpu_dm_connector_funcs_reset(connector); 495 496 drm_dp_mst_get_port_malloc(port); 497 498 return connector; 499 } 500 501 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { 502 .add_connector = dm_dp_add_mst_connector, 503 }; 504 505 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, 506 struct amdgpu_dm_connector *aconnector, 507 int link_index) 508 { 509 struct dc_link_settings max_link_enc_cap = {0}; 510 511 aconnector->dm_dp_aux.aux.name = 512 kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d", 513 link_index); 514 aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; 515 aconnector->dm_dp_aux.aux.drm_dev = dm->ddev; 516 aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc; 517 518 drm_dp_aux_init(&aconnector->dm_dp_aux.aux); 519 drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, 520 &aconnector->base); 521 522 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) 523 return; 524 525 dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap); 526 aconnector->mst_mgr.cbs = &dm_mst_cbs; 527 drm_dp_mst_topology_mgr_init( 528 &aconnector->mst_mgr, 529 adev_to_drm(dm->adev), 530 &aconnector->dm_dp_aux.aux, 531 16, 532 4, 533 max_link_enc_cap.lane_count, 534 drm_dp_bw_code_to_link_rate(max_link_enc_cap.link_rate), 535 aconnector->connector_id); 536 537 drm_connector_attach_dp_subconnector_property(&aconnector->base); 538 } 539 540 int dm_mst_get_pbn_divider(struct dc_link *link) 541 { 542 if (!link) 543 return 0; 544 545 return dc_link_bandwidth_kbps(link, 546 dc_link_get_link_cap(link)) / (8 * 1000 * 54); 547 } 548 549 #if defined(CONFIG_DRM_AMD_DC_DCN) 550 551 struct dsc_mst_fairness_params { 552 struct dc_crtc_timing *timing; 553 struct dc_sink *sink; 554 struct dc_dsc_bw_range bw_range; 555 bool compression_possible; 556 struct drm_dp_mst_port *port; 557 enum dsc_clock_force_state clock_force_enable; 558 uint32_t num_slices_h; 559 uint32_t num_slices_v; 560 uint32_t bpp_overwrite; 561 struct amdgpu_dm_connector *aconnector; 562 }; 563 564 static int kbps_to_peak_pbn(int kbps) 565 { 566 u64 peak_kbps = kbps; 567 568 peak_kbps *= 1006; 569 peak_kbps = div_u64(peak_kbps, 1000); 570 return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); 571 } 572 573 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, 574 struct dsc_mst_fairness_vars *vars, 575 int count, 576 int k) 577 { 578 int i; 579 580 for (i = 0; i < count; i++) { 581 memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); 582 if (vars[i + k].dsc_enabled && dc_dsc_compute_config( 583 params[i].sink->ctx->dc->res_pool->dscs[0], 584 ¶ms[i].sink->dsc_caps.dsc_dec_caps, 585 params[i].sink->ctx->dc->debug.dsc_min_slice_height_override, 586 params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit, 587 0, 588 params[i].timing, 589 ¶ms[i].timing->dsc_cfg)) { 590 params[i].timing->flags.DSC = 1; 591 592 if (params[i].bpp_overwrite) 593 params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite; 594 else 595 params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16; 596 597 if (params[i].num_slices_h) 598 params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h; 599 600 if (params[i].num_slices_v) 601 params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v; 602 } else { 603 params[i].timing->flags.DSC = 0; 604 } 605 params[i].timing->dsc_cfg.mst_pbn = vars[i + k].pbn; 606 } 607 608 for (i = 0; i < count; i++) { 609 if (params[i].sink) { 610 if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL && 611 params[i].sink->sink_signal != SIGNAL_TYPE_NONE) 612 DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i, 613 params[i].sink->edid_caps.display_name); 614 } 615 616 DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n", 617 params[i].timing->flags.DSC, 618 params[i].timing->dsc_cfg.bits_per_pixel, 619 vars[i + k].pbn); 620 } 621 } 622 623 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) 624 { 625 struct dc_dsc_config dsc_config; 626 u64 kbps; 627 628 kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); 629 dc_dsc_compute_config( 630 param.sink->ctx->dc->res_pool->dscs[0], 631 ¶m.sink->dsc_caps.dsc_dec_caps, 632 param.sink->ctx->dc->debug.dsc_min_slice_height_override, 633 param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit, 634 (int) kbps, param.timing, &dsc_config); 635 636 return dsc_config.bits_per_pixel; 637 } 638 639 static void increase_dsc_bpp(struct drm_atomic_state *state, 640 struct dc_link *dc_link, 641 struct dsc_mst_fairness_params *params, 642 struct dsc_mst_fairness_vars *vars, 643 int count, 644 int k) 645 { 646 int i; 647 bool bpp_increased[MAX_PIPES]; 648 int initial_slack[MAX_PIPES]; 649 int min_initial_slack; 650 int next_index; 651 int remaining_to_increase = 0; 652 int pbn_per_timeslot; 653 int link_timeslots_used; 654 int fair_pbn_alloc; 655 656 pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link); 657 658 for (i = 0; i < count; i++) { 659 if (vars[i + k].dsc_enabled) { 660 initial_slack[i] = 661 kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn; 662 bpp_increased[i] = false; 663 remaining_to_increase += 1; 664 } else { 665 initial_slack[i] = 0; 666 bpp_increased[i] = true; 667 } 668 } 669 670 while (remaining_to_increase) { 671 next_index = -1; 672 min_initial_slack = -1; 673 for (i = 0; i < count; i++) { 674 if (!bpp_increased[i]) { 675 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) { 676 min_initial_slack = initial_slack[i]; 677 next_index = i; 678 } 679 } 680 } 681 682 if (next_index == -1) 683 break; 684 685 link_timeslots_used = 0; 686 687 for (i = 0; i < count; i++) 688 link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot); 689 690 fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot; 691 692 if (initial_slack[next_index] > fair_pbn_alloc) { 693 vars[next_index].pbn += fair_pbn_alloc; 694 if (drm_dp_atomic_find_vcpi_slots(state, 695 params[next_index].port->mgr, 696 params[next_index].port, 697 vars[next_index].pbn, 698 pbn_per_timeslot) < 0) 699 return; 700 if (!drm_dp_mst_atomic_check(state)) { 701 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); 702 } else { 703 vars[next_index].pbn -= fair_pbn_alloc; 704 if (drm_dp_atomic_find_vcpi_slots(state, 705 params[next_index].port->mgr, 706 params[next_index].port, 707 vars[next_index].pbn, 708 pbn_per_timeslot) < 0) 709 return; 710 } 711 } else { 712 vars[next_index].pbn += initial_slack[next_index]; 713 if (drm_dp_atomic_find_vcpi_slots(state, 714 params[next_index].port->mgr, 715 params[next_index].port, 716 vars[next_index].pbn, 717 pbn_per_timeslot) < 0) 718 return; 719 if (!drm_dp_mst_atomic_check(state)) { 720 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; 721 } else { 722 vars[next_index].pbn -= initial_slack[next_index]; 723 if (drm_dp_atomic_find_vcpi_slots(state, 724 params[next_index].port->mgr, 725 params[next_index].port, 726 vars[next_index].pbn, 727 pbn_per_timeslot) < 0) 728 return; 729 } 730 } 731 732 bpp_increased[next_index] = true; 733 remaining_to_increase--; 734 } 735 } 736 737 static void try_disable_dsc(struct drm_atomic_state *state, 738 struct dc_link *dc_link, 739 struct dsc_mst_fairness_params *params, 740 struct dsc_mst_fairness_vars *vars, 741 int count, 742 int k) 743 { 744 int i; 745 bool tried[MAX_PIPES]; 746 int kbps_increase[MAX_PIPES]; 747 int max_kbps_increase; 748 int next_index; 749 int remaining_to_try = 0; 750 751 for (i = 0; i < count; i++) { 752 if (vars[i + k].dsc_enabled 753 && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16 754 && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { 755 kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; 756 tried[i] = false; 757 remaining_to_try += 1; 758 } else { 759 kbps_increase[i] = 0; 760 tried[i] = true; 761 } 762 } 763 764 while (remaining_to_try) { 765 next_index = -1; 766 max_kbps_increase = -1; 767 for (i = 0; i < count; i++) { 768 if (!tried[i]) { 769 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) { 770 max_kbps_increase = kbps_increase[i]; 771 next_index = i; 772 } 773 } 774 } 775 776 if (next_index == -1) 777 break; 778 779 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); 780 if (drm_dp_atomic_find_vcpi_slots(state, 781 params[next_index].port->mgr, 782 params[next_index].port, 783 vars[next_index].pbn, 784 dm_mst_get_pbn_divider(dc_link)) < 0) 785 return; 786 787 if (!drm_dp_mst_atomic_check(state)) { 788 vars[next_index].dsc_enabled = false; 789 vars[next_index].bpp_x16 = 0; 790 } else { 791 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps); 792 if (drm_dp_atomic_find_vcpi_slots(state, 793 params[next_index].port->mgr, 794 params[next_index].port, 795 vars[next_index].pbn, 796 dm_mst_get_pbn_divider(dc_link)) < 0) 797 return; 798 } 799 800 tried[next_index] = true; 801 remaining_to_try--; 802 } 803 } 804 805 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, 806 struct dc_state *dc_state, 807 struct dc_link *dc_link, 808 struct dsc_mst_fairness_vars *vars, 809 int *link_vars_start_index) 810 { 811 int i, k; 812 struct dc_stream_state *stream; 813 struct dsc_mst_fairness_params params[MAX_PIPES]; 814 struct amdgpu_dm_connector *aconnector; 815 int count = 0; 816 bool debugfs_overwrite = false; 817 818 memset(params, 0, sizeof(params)); 819 820 /* Set up params */ 821 for (i = 0; i < dc_state->stream_count; i++) { 822 struct dc_dsc_policy dsc_policy = {0}; 823 824 stream = dc_state->streams[i]; 825 826 if (stream->link != dc_link) 827 continue; 828 829 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 830 if (!aconnector) 831 continue; 832 833 if (!aconnector->port) 834 continue; 835 836 stream->timing.flags.DSC = 0; 837 838 params[count].timing = &stream->timing; 839 params[count].sink = stream->sink; 840 params[count].aconnector = aconnector; 841 params[count].port = aconnector->port; 842 params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; 843 if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) 844 debugfs_overwrite = true; 845 params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 846 params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 847 params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; 848 params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; 849 dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy); 850 if (!dc_dsc_compute_bandwidth_range( 851 stream->sink->ctx->dc->res_pool->dscs[0], 852 stream->sink->ctx->dc->debug.dsc_min_slice_height_override, 853 dsc_policy.min_target_bpp * 16, 854 dsc_policy.max_target_bpp * 16, 855 &stream->sink->dsc_caps.dsc_dec_caps, 856 &stream->timing, ¶ms[count].bw_range)) 857 params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); 858 859 count++; 860 } 861 862 if (count == 0) { 863 ASSERT(0); 864 return true; 865 } 866 867 /* k is start index of vars for current phy link used by mst hub */ 868 k = *link_vars_start_index; 869 /* set vars start index for next mst hub phy link */ 870 *link_vars_start_index += count; 871 872 /* Try no compression */ 873 for (i = 0; i < count; i++) { 874 vars[i + k].aconnector = params[i].aconnector; 875 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); 876 vars[i + k].dsc_enabled = false; 877 vars[i + k].bpp_x16 = 0; 878 if (drm_dp_atomic_find_vcpi_slots(state, 879 params[i].port->mgr, 880 params[i].port, 881 vars[i + k].pbn, 882 dm_mst_get_pbn_divider(dc_link)) < 0) 883 return false; 884 } 885 if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { 886 set_dsc_configs_from_fairness_vars(params, vars, count, k); 887 return true; 888 } 889 890 /* Try max compression */ 891 for (i = 0; i < count; i++) { 892 if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { 893 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); 894 vars[i + k].dsc_enabled = true; 895 vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; 896 if (drm_dp_atomic_find_vcpi_slots(state, 897 params[i].port->mgr, 898 params[i].port, 899 vars[i + k].pbn, 900 dm_mst_get_pbn_divider(dc_link)) < 0) 901 return false; 902 } else { 903 vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); 904 vars[i + k].dsc_enabled = false; 905 vars[i + k].bpp_x16 = 0; 906 if (drm_dp_atomic_find_vcpi_slots(state, 907 params[i].port->mgr, 908 params[i].port, 909 vars[i + k].pbn, 910 dm_mst_get_pbn_divider(dc_link)) < 0) 911 return false; 912 } 913 } 914 if (drm_dp_mst_atomic_check(state)) 915 return false; 916 917 /* Optimize degree of compression */ 918 increase_dsc_bpp(state, dc_link, params, vars, count, k); 919 920 try_disable_dsc(state, dc_link, params, vars, count, k); 921 922 set_dsc_configs_from_fairness_vars(params, vars, count, k); 923 924 return true; 925 } 926 927 static bool is_dsc_need_re_compute( 928 struct drm_atomic_state *state, 929 struct dc_state *dc_state, 930 struct dc_link *dc_link) 931 { 932 int i, j; 933 bool is_dsc_need_re_compute = false; 934 struct amdgpu_dm_connector *stream_on_link[MAX_PIPES]; 935 int new_stream_on_link_num = 0; 936 struct amdgpu_dm_connector *aconnector; 937 struct dc_stream_state *stream; 938 const struct dc *dc = dc_link->dc; 939 940 /* only check phy used by dsc mst branch */ 941 if (dc_link->type != dc_connection_mst_branch) 942 return false; 943 944 if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || 945 dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) 946 return false; 947 948 for (i = 0; i < MAX_PIPES; i++) 949 stream_on_link[i] = NULL; 950 951 /* check if there is mode change in new request */ 952 for (i = 0; i < dc_state->stream_count; i++) { 953 struct drm_crtc_state *new_crtc_state; 954 struct drm_connector_state *new_conn_state; 955 956 stream = dc_state->streams[i]; 957 if (!stream) 958 continue; 959 960 /* check if stream using the same link for mst */ 961 if (stream->link != dc_link) 962 continue; 963 964 aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; 965 if (!aconnector) 966 continue; 967 968 stream_on_link[new_stream_on_link_num] = aconnector; 969 new_stream_on_link_num++; 970 971 new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); 972 if (!new_conn_state) 973 continue; 974 975 if (IS_ERR(new_conn_state)) 976 continue; 977 978 if (!new_conn_state->crtc) 979 continue; 980 981 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 982 if (!new_crtc_state) 983 continue; 984 985 if (IS_ERR(new_crtc_state)) 986 continue; 987 988 if (new_crtc_state->enable && new_crtc_state->active) { 989 if (new_crtc_state->mode_changed || new_crtc_state->active_changed || 990 new_crtc_state->connectors_changed) 991 return true; 992 } 993 } 994 995 /* check current_state if there stream on link but it is not in 996 * new request state 997 */ 998 for (i = 0; i < dc->current_state->stream_count; i++) { 999 stream = dc->current_state->streams[i]; 1000 /* only check stream on the mst hub */ 1001 if (stream->link != dc_link) 1002 continue; 1003 1004 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 1005 if (!aconnector) 1006 continue; 1007 1008 for (j = 0; j < new_stream_on_link_num; j++) { 1009 if (stream_on_link[j]) { 1010 if (aconnector == stream_on_link[j]) 1011 break; 1012 } 1013 } 1014 1015 if (j == new_stream_on_link_num) { 1016 /* not in new state */ 1017 is_dsc_need_re_compute = true; 1018 break; 1019 } 1020 } 1021 1022 return is_dsc_need_re_compute; 1023 } 1024 1025 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 1026 struct dc_state *dc_state, 1027 struct dsc_mst_fairness_vars *vars) 1028 { 1029 int i, j; 1030 struct dc_stream_state *stream; 1031 bool computed_streams[MAX_PIPES]; 1032 struct amdgpu_dm_connector *aconnector; 1033 int link_vars_start_index = 0; 1034 1035 for (i = 0; i < dc_state->stream_count; i++) 1036 computed_streams[i] = false; 1037 1038 for (i = 0; i < dc_state->stream_count; i++) { 1039 stream = dc_state->streams[i]; 1040 1041 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) 1042 continue; 1043 1044 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 1045 1046 if (!aconnector || !aconnector->dc_sink) 1047 continue; 1048 1049 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) 1050 continue; 1051 1052 if (computed_streams[i]) 1053 continue; 1054 1055 if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) 1056 return false; 1057 1058 if (!is_dsc_need_re_compute(state, dc_state, stream->link)) 1059 continue; 1060 1061 mutex_lock(&aconnector->mst_mgr.lock); 1062 if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, 1063 vars, &link_vars_start_index)) { 1064 mutex_unlock(&aconnector->mst_mgr.lock); 1065 return false; 1066 } 1067 mutex_unlock(&aconnector->mst_mgr.lock); 1068 1069 for (j = 0; j < dc_state->stream_count; j++) { 1070 if (dc_state->streams[j]->link == stream->link) 1071 computed_streams[j] = true; 1072 } 1073 } 1074 1075 for (i = 0; i < dc_state->stream_count; i++) { 1076 stream = dc_state->streams[i]; 1077 1078 if (stream->timing.flags.DSC == 1) 1079 if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) 1080 return false; 1081 } 1082 1083 return true; 1084 } 1085 1086 static bool 1087 pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, 1088 struct dc_state *dc_state, 1089 struct dsc_mst_fairness_vars *vars) 1090 { 1091 int i, j; 1092 struct dc_stream_state *stream; 1093 bool computed_streams[MAX_PIPES]; 1094 struct amdgpu_dm_connector *aconnector; 1095 int link_vars_start_index = 0; 1096 1097 for (i = 0; i < dc_state->stream_count; i++) 1098 computed_streams[i] = false; 1099 1100 for (i = 0; i < dc_state->stream_count; i++) { 1101 stream = dc_state->streams[i]; 1102 1103 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) 1104 continue; 1105 1106 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 1107 1108 if (!aconnector || !aconnector->dc_sink) 1109 continue; 1110 1111 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) 1112 continue; 1113 1114 if (computed_streams[i]) 1115 continue; 1116 1117 if (!is_dsc_need_re_compute(state, dc_state, stream->link)) 1118 continue; 1119 1120 mutex_lock(&aconnector->mst_mgr.lock); 1121 if (!compute_mst_dsc_configs_for_link(state, 1122 dc_state, 1123 stream->link, 1124 vars, 1125 &link_vars_start_index)) { 1126 mutex_unlock(&aconnector->mst_mgr.lock); 1127 return false; 1128 } 1129 mutex_unlock(&aconnector->mst_mgr.lock); 1130 1131 for (j = 0; j < dc_state->stream_count; j++) { 1132 if (dc_state->streams[j]->link == stream->link) 1133 computed_streams[j] = true; 1134 } 1135 } 1136 1137 return true; 1138 } 1139 1140 static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state, 1141 struct dc_stream_state *stream) 1142 { 1143 int i; 1144 struct drm_crtc *crtc; 1145 struct drm_crtc_state *new_state, *old_state; 1146 1147 for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, i) { 1148 struct dm_crtc_state *dm_state = to_dm_crtc_state(new_state); 1149 1150 if (dm_state->stream == stream) 1151 return i; 1152 } 1153 return -1; 1154 } 1155 1156 static bool is_link_to_dschub(struct dc_link *dc_link) 1157 { 1158 union dpcd_dsc_basic_capabilities *dsc_caps = 1159 &dc_link->dpcd_caps.dsc_caps.dsc_basic_caps; 1160 1161 /* only check phy used by dsc mst branch */ 1162 if (dc_link->type != dc_connection_mst_branch) 1163 return false; 1164 1165 if (!(dsc_caps->fields.dsc_support.DSC_SUPPORT || 1166 dsc_caps->fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) 1167 return false; 1168 return true; 1169 } 1170 1171 static bool is_dsc_precompute_needed(struct drm_atomic_state *state) 1172 { 1173 int i; 1174 struct drm_crtc *crtc; 1175 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1176 bool ret = false; 1177 1178 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1179 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(new_crtc_state); 1180 1181 if (!amdgpu_dm_find_first_crtc_matching_connector(state, crtc)) { 1182 ret = false; 1183 break; 1184 } 1185 if (dm_crtc_state->stream && dm_crtc_state->stream->link) 1186 if (is_link_to_dschub(dm_crtc_state->stream->link)) 1187 ret = true; 1188 } 1189 return ret; 1190 } 1191 1192 void pre_validate_dsc(struct drm_atomic_state *state, 1193 struct dm_atomic_state **dm_state_ptr, 1194 struct dsc_mst_fairness_vars *vars) 1195 { 1196 int i; 1197 struct dm_atomic_state *dm_state; 1198 struct dc_state *local_dc_state = NULL; 1199 1200 if (!is_dsc_precompute_needed(state)) { 1201 DRM_INFO_ONCE("DSC precompute is not needed.\n"); 1202 return; 1203 } 1204 if (dm_atomic_get_state(state, dm_state_ptr)) { 1205 DRM_INFO_ONCE("dm_atomic_get_state() failed\n"); 1206 return; 1207 } 1208 dm_state = *dm_state_ptr; 1209 1210 /* 1211 * create local vailable for dc_state. copy content of streams of dm_state->context 1212 * to local variable. make sure stream pointer of local variable not the same as stream 1213 * from dm_state->context. 1214 */ 1215 1216 local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL); 1217 if (!local_dc_state) 1218 return; 1219 1220 for (i = 0; i < local_dc_state->stream_count; i++) { 1221 struct dc_stream_state *stream = dm_state->context->streams[i]; 1222 int ind = find_crtc_index_in_state_by_stream(state, stream); 1223 1224 if (ind >= 0) { 1225 struct amdgpu_dm_connector *aconnector; 1226 struct drm_connector_state *drm_new_conn_state; 1227 struct dm_connector_state *dm_new_conn_state; 1228 struct dm_crtc_state *dm_old_crtc_state; 1229 1230 aconnector = 1231 amdgpu_dm_find_first_crtc_matching_connector(state, 1232 state->crtcs[ind].ptr); 1233 drm_new_conn_state = 1234 drm_atomic_get_new_connector_state(state, 1235 &aconnector->base); 1236 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 1237 dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state); 1238 1239 local_dc_state->streams[i] = 1240 create_validate_stream_for_sink(aconnector, 1241 &state->crtcs[ind].new_state->mode, 1242 dm_new_conn_state, 1243 dm_old_crtc_state->stream); 1244 } 1245 } 1246 1247 if (!pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars)) { 1248 DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n"); 1249 goto clean_exit; 1250 } 1251 1252 /* 1253 * compare local_streams -> timing with dm_state->context, 1254 * if the same set crtc_state->mode-change = 0; 1255 */ 1256 for (i = 0; i < local_dc_state->stream_count; i++) { 1257 struct dc_stream_state *stream = dm_state->context->streams[i]; 1258 1259 if (local_dc_state->streams[i] && 1260 is_timing_changed(stream, local_dc_state->streams[i])) { 1261 DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i); 1262 } else { 1263 int ind = find_crtc_index_in_state_by_stream(state, stream); 1264 1265 if (ind >= 0) 1266 state->crtcs[ind].new_state->mode_changed = 0; 1267 } 1268 } 1269 clean_exit: 1270 for (i = 0; i < local_dc_state->stream_count; i++) { 1271 struct dc_stream_state *stream = dm_state->context->streams[i]; 1272 1273 if (local_dc_state->streams[i] != stream) 1274 dc_stream_release(local_dc_state->streams[i]); 1275 } 1276 1277 kfree(local_dc_state); 1278 } 1279 #endif 1280