1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 /* The caprices of the preprocessor require that this be declared right here */ 27 #define CREATE_TRACE_POINTS 28 29 #include "dm_services_types.h" 30 #include "dc.h" 31 #include "link_enc_cfg.h" 32 #include "dc/inc/core_types.h" 33 #include "dal_asic_id.h" 34 #include "dmub/dmub_srv.h" 35 #include "dc/inc/hw/dmcu.h" 36 #include "dc/inc/hw/abm.h" 37 #include "dc/dc_dmub_srv.h" 38 #include "dc/dc_edid_parser.h" 39 #include "dc/dc_stat.h" 40 #include "amdgpu_dm_trace.h" 41 #include "dpcd_defs.h" 42 #include "link/protocols/link_dpcd.h" 43 #include "link_service_types.h" 44 #include "link/protocols/link_dp_capability.h" 45 #include "link/protocols/link_ddc.h" 46 47 #include "vid.h" 48 #include "amdgpu.h" 49 #include "amdgpu_display.h" 50 #include "amdgpu_ucode.h" 51 #include "atom.h" 52 #include "amdgpu_dm.h" 53 #include "amdgpu_dm_plane.h" 54 #include "amdgpu_dm_crtc.h" 55 #include "amdgpu_dm_hdcp.h" 56 #include <drm/display/drm_hdcp_helper.h> 57 #include "amdgpu_pm.h" 58 #include "amdgpu_atombios.h" 59 60 #include "amd_shared.h" 61 #include "amdgpu_dm_irq.h" 62 #include "dm_helpers.h" 63 #include "amdgpu_dm_mst_types.h" 64 #if defined(CONFIG_DEBUG_FS) 65 #include "amdgpu_dm_debugfs.h" 66 #endif 67 #include "amdgpu_dm_psr.h" 68 69 #include "ivsrcid/ivsrcid_vislands30.h" 70 71 #include <linux/backlight.h> 72 #include <linux/module.h> 73 #include <linux/moduleparam.h> 74 #include <linux/types.h> 75 #include <linux/pm_runtime.h> 76 #include <linux/pci.h> 77 #include <linux/firmware.h> 78 #include <linux/component.h> 79 #include <linux/dmi.h> 80 81 #include <drm/display/drm_dp_mst_helper.h> 82 #include <drm/display/drm_hdmi_helper.h> 83 #include <drm/drm_atomic.h> 84 #include <drm/drm_atomic_uapi.h> 85 #include <drm/drm_atomic_helper.h> 86 #include <drm/drm_blend.h> 87 #include <drm/drm_fourcc.h> 88 #include <drm/drm_edid.h> 89 #include <drm/drm_vblank.h> 90 #include <drm/drm_audio_component.h> 91 #include <drm/drm_gem_atomic_helper.h> 92 #include <drm/drm_plane_helper.h> 93 94 #include <acpi/video.h> 95 96 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" 97 98 #include "dcn/dcn_1_0_offset.h" 99 #include "dcn/dcn_1_0_sh_mask.h" 100 #include "soc15_hw_ip.h" 101 #include "soc15_common.h" 102 #include "vega10_ip_offset.h" 103 104 #include "gc/gc_11_0_0_offset.h" 105 #include "gc/gc_11_0_0_sh_mask.h" 106 107 #include "modules/inc/mod_freesync.h" 108 #include "modules/power/power_helpers.h" 109 110 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 111 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 112 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" 113 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); 114 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" 115 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); 116 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" 117 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); 118 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" 119 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); 120 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" 121 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); 122 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" 123 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); 124 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" 125 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); 126 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin" 127 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB); 128 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" 129 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); 130 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" 131 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); 132 133 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" 134 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); 135 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" 136 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); 137 138 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 139 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 140 141 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" 142 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); 143 144 /* Number of bytes in PSP header for firmware. */ 145 #define PSP_HEADER_BYTES 0x100 146 147 /* Number of bytes in PSP footer for firmware. */ 148 #define PSP_FOOTER_BYTES 0x100 149 150 /** 151 * DOC: overview 152 * 153 * The AMDgpu display manager, **amdgpu_dm** (or even simpler, 154 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM 155 * requests into DC requests, and DC responses into DRM responses. 156 * 157 * The root control structure is &struct amdgpu_display_manager. 158 */ 159 160 /* basic init/fini API */ 161 static int amdgpu_dm_init(struct amdgpu_device *adev); 162 static void amdgpu_dm_fini(struct amdgpu_device *adev); 163 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); 164 165 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) 166 { 167 switch (link->dpcd_caps.dongle_type) { 168 case DISPLAY_DONGLE_NONE: 169 return DRM_MODE_SUBCONNECTOR_Native; 170 case DISPLAY_DONGLE_DP_VGA_CONVERTER: 171 return DRM_MODE_SUBCONNECTOR_VGA; 172 case DISPLAY_DONGLE_DP_DVI_CONVERTER: 173 case DISPLAY_DONGLE_DP_DVI_DONGLE: 174 return DRM_MODE_SUBCONNECTOR_DVID; 175 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 176 case DISPLAY_DONGLE_DP_HDMI_DONGLE: 177 return DRM_MODE_SUBCONNECTOR_HDMIA; 178 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: 179 default: 180 return DRM_MODE_SUBCONNECTOR_Unknown; 181 } 182 } 183 184 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) 185 { 186 struct dc_link *link = aconnector->dc_link; 187 struct drm_connector *connector = &aconnector->base; 188 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 189 190 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 191 return; 192 193 if (aconnector->dc_sink) 194 subconnector = get_subconnector_type(link); 195 196 drm_object_property_set_value(&connector->base, 197 connector->dev->mode_config.dp_subconnector_property, 198 subconnector); 199 } 200 201 /* 202 * initializes drm_device display related structures, based on the information 203 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 204 * drm_encoder, drm_mode_config 205 * 206 * Returns 0 on success 207 */ 208 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 209 /* removes and deallocates the drm structures, created by the above function */ 210 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 211 212 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 213 struct amdgpu_dm_connector *amdgpu_dm_connector, 214 u32 link_index, 215 struct amdgpu_encoder *amdgpu_encoder); 216 static int amdgpu_dm_encoder_init(struct drm_device *dev, 217 struct amdgpu_encoder *aencoder, 218 uint32_t link_index); 219 220 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 221 222 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 223 224 static int amdgpu_dm_atomic_check(struct drm_device *dev, 225 struct drm_atomic_state *state); 226 227 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); 228 static void handle_hpd_rx_irq(void *param); 229 230 static bool 231 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 232 struct drm_crtc_state *new_crtc_state); 233 /* 234 * dm_vblank_get_counter 235 * 236 * @brief 237 * Get counter for number of vertical blanks 238 * 239 * @param 240 * struct amdgpu_device *adev - [in] desired amdgpu device 241 * int disp_idx - [in] which CRTC to get the counter from 242 * 243 * @return 244 * Counter for vertical blanks 245 */ 246 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 247 { 248 struct amdgpu_crtc *acrtc = NULL; 249 250 if (crtc >= adev->mode_info.num_crtc) 251 return 0; 252 253 acrtc = adev->mode_info.crtcs[crtc]; 254 255 if (!acrtc->dm_irq_params.stream) { 256 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 257 crtc); 258 return 0; 259 } 260 261 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); 262 } 263 264 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 265 u32 *vbl, u32 *position) 266 { 267 u32 v_blank_start, v_blank_end, h_position, v_position; 268 struct amdgpu_crtc *acrtc = NULL; 269 270 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 271 return -EINVAL; 272 273 acrtc = adev->mode_info.crtcs[crtc]; 274 275 if (!acrtc->dm_irq_params.stream) { 276 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 277 crtc); 278 return 0; 279 } 280 281 /* 282 * TODO rework base driver to use values directly. 283 * for now parse it back into reg-format 284 */ 285 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, 286 &v_blank_start, 287 &v_blank_end, 288 &h_position, 289 &v_position); 290 291 *position = v_position | (h_position << 16); 292 *vbl = v_blank_start | (v_blank_end << 16); 293 294 return 0; 295 } 296 297 static bool dm_is_idle(void *handle) 298 { 299 /* XXX todo */ 300 return true; 301 } 302 303 static int dm_wait_for_idle(void *handle) 304 { 305 /* XXX todo */ 306 return 0; 307 } 308 309 static bool dm_check_soft_reset(void *handle) 310 { 311 return false; 312 } 313 314 static int dm_soft_reset(void *handle) 315 { 316 /* XXX todo */ 317 return 0; 318 } 319 320 static struct amdgpu_crtc * 321 get_crtc_by_otg_inst(struct amdgpu_device *adev, 322 int otg_inst) 323 { 324 struct drm_device *dev = adev_to_drm(adev); 325 struct drm_crtc *crtc; 326 struct amdgpu_crtc *amdgpu_crtc; 327 328 if (WARN_ON(otg_inst == -1)) 329 return adev->mode_info.crtcs[0]; 330 331 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 332 amdgpu_crtc = to_amdgpu_crtc(crtc); 333 334 if (amdgpu_crtc->otg_inst == otg_inst) 335 return amdgpu_crtc; 336 } 337 338 return NULL; 339 } 340 341 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, 342 struct dm_crtc_state *new_state) 343 { 344 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) 345 return true; 346 else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) 347 return true; 348 else 349 return false; 350 } 351 352 static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update, 353 int planes_count) 354 { 355 int i, j; 356 357 for (i = 0, j = planes_count - 1; i < j; i++, j--) 358 swap(array_of_surface_update[i], array_of_surface_update[j]); 359 } 360 361 /** 362 * update_planes_and_stream_adapter() - Send planes to be updated in DC 363 * 364 * DC has a generic way to update planes and stream via 365 * dc_update_planes_and_stream function; however, DM might need some 366 * adjustments and preparation before calling it. This function is a wrapper 367 * for the dc_update_planes_and_stream that does any required configuration 368 * before passing control to DC. 369 * 370 * @dc: Display Core control structure 371 * @update_type: specify whether it is FULL/MEDIUM/FAST update 372 * @planes_count: planes count to update 373 * @stream: stream state 374 * @stream_update: stream update 375 * @array_of_surface_update: dc surface update pointer 376 * 377 */ 378 static inline bool update_planes_and_stream_adapter(struct dc *dc, 379 int update_type, 380 int planes_count, 381 struct dc_stream_state *stream, 382 struct dc_stream_update *stream_update, 383 struct dc_surface_update *array_of_surface_update) 384 { 385 reverse_planes_order(array_of_surface_update, planes_count); 386 387 /* 388 * Previous frame finished and HW is ready for optimization. 389 */ 390 if (update_type == UPDATE_TYPE_FAST) 391 dc_post_update_surfaces_to_stream(dc); 392 393 return dc_update_planes_and_stream(dc, 394 array_of_surface_update, 395 planes_count, 396 stream, 397 stream_update); 398 } 399 400 /** 401 * dm_pflip_high_irq() - Handle pageflip interrupt 402 * @interrupt_params: ignored 403 * 404 * Handles the pageflip interrupt by notifying all interested parties 405 * that the pageflip has been completed. 406 */ 407 static void dm_pflip_high_irq(void *interrupt_params) 408 { 409 struct amdgpu_crtc *amdgpu_crtc; 410 struct common_irq_params *irq_params = interrupt_params; 411 struct amdgpu_device *adev = irq_params->adev; 412 unsigned long flags; 413 struct drm_pending_vblank_event *e; 414 u32 vpos, hpos, v_blank_start, v_blank_end; 415 bool vrr_active; 416 417 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 418 419 /* IRQ could occur when in initial stage */ 420 /* TODO work and BO cleanup */ 421 if (amdgpu_crtc == NULL) { 422 DC_LOG_PFLIP("CRTC is null, returning.\n"); 423 return; 424 } 425 426 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 427 428 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { 429 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", 430 amdgpu_crtc->pflip_status, 431 AMDGPU_FLIP_SUBMITTED, 432 amdgpu_crtc->crtc_id, 433 amdgpu_crtc); 434 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 435 return; 436 } 437 438 /* page flip completed. */ 439 e = amdgpu_crtc->event; 440 amdgpu_crtc->event = NULL; 441 442 WARN_ON(!e); 443 444 vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc); 445 446 /* Fixed refresh rate, or VRR scanout position outside front-porch? */ 447 if (!vrr_active || 448 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, 449 &v_blank_end, &hpos, &vpos) || 450 (vpos < v_blank_start)) { 451 /* Update to correct count and vblank timestamp if racing with 452 * vblank irq. This also updates to the correct vblank timestamp 453 * even in VRR mode, as scanout is past the front-porch atm. 454 */ 455 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 456 457 /* Wake up userspace by sending the pageflip event with proper 458 * count and timestamp of vblank of flip completion. 459 */ 460 if (e) { 461 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); 462 463 /* Event sent, so done with vblank for this flip */ 464 drm_crtc_vblank_put(&amdgpu_crtc->base); 465 } 466 } else if (e) { 467 /* VRR active and inside front-porch: vblank count and 468 * timestamp for pageflip event will only be up to date after 469 * drm_crtc_handle_vblank() has been executed from late vblank 470 * irq handler after start of back-porch (vline 0). We queue the 471 * pageflip event for send-out by drm_crtc_handle_vblank() with 472 * updated timestamp and count, once it runs after us. 473 * 474 * We need to open-code this instead of using the helper 475 * drm_crtc_arm_vblank_event(), as that helper would 476 * call drm_crtc_accurate_vblank_count(), which we must 477 * not call in VRR mode while we are in front-porch! 478 */ 479 480 /* sequence will be replaced by real count during send-out. */ 481 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); 482 e->pipe = amdgpu_crtc->crtc_id; 483 484 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); 485 e = NULL; 486 } 487 488 /* Keep track of vblank of this flip for flip throttling. We use the 489 * cooked hw counter, as that one incremented at start of this vblank 490 * of pageflip completion, so last_flip_vblank is the forbidden count 491 * for queueing new pageflips if vsync + VRR is enabled. 492 */ 493 amdgpu_crtc->dm_irq_params.last_flip_vblank = 494 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); 495 496 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 497 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 498 499 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", 500 amdgpu_crtc->crtc_id, amdgpu_crtc, 501 vrr_active, (int) !e); 502 } 503 504 static void dm_vupdate_high_irq(void *interrupt_params) 505 { 506 struct common_irq_params *irq_params = interrupt_params; 507 struct amdgpu_device *adev = irq_params->adev; 508 struct amdgpu_crtc *acrtc; 509 struct drm_device *drm_dev; 510 struct drm_vblank_crtc *vblank; 511 ktime_t frame_duration_ns, previous_timestamp; 512 unsigned long flags; 513 int vrr_active; 514 515 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 516 517 if (acrtc) { 518 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 519 drm_dev = acrtc->base.dev; 520 vblank = &drm_dev->vblank[acrtc->base.index]; 521 previous_timestamp = atomic64_read(&irq_params->previous_timestamp); 522 frame_duration_ns = vblank->time - previous_timestamp; 523 524 if (frame_duration_ns > 0) { 525 trace_amdgpu_refresh_rate_track(acrtc->base.index, 526 frame_duration_ns, 527 ktime_divns(NSEC_PER_SEC, frame_duration_ns)); 528 atomic64_set(&irq_params->previous_timestamp, vblank->time); 529 } 530 531 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n", 532 acrtc->crtc_id, 533 vrr_active); 534 535 /* Core vblank handling is done here after end of front-porch in 536 * vrr mode, as vblank timestamping will give valid results 537 * while now done after front-porch. This will also deliver 538 * page-flip completion events that have been queued to us 539 * if a pageflip happened inside front-porch. 540 */ 541 if (vrr_active) { 542 amdgpu_dm_crtc_handle_vblank(acrtc); 543 544 /* BTR processing for pre-DCE12 ASICs */ 545 if (acrtc->dm_irq_params.stream && 546 adev->family < AMDGPU_FAMILY_AI) { 547 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 548 mod_freesync_handle_v_update( 549 adev->dm.freesync_module, 550 acrtc->dm_irq_params.stream, 551 &acrtc->dm_irq_params.vrr_params); 552 553 dc_stream_adjust_vmin_vmax( 554 adev->dm.dc, 555 acrtc->dm_irq_params.stream, 556 &acrtc->dm_irq_params.vrr_params.adjust); 557 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 558 } 559 } 560 } 561 } 562 563 /** 564 * dm_crtc_high_irq() - Handles CRTC interrupt 565 * @interrupt_params: used for determining the CRTC instance 566 * 567 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK 568 * event handler. 569 */ 570 static void dm_crtc_high_irq(void *interrupt_params) 571 { 572 struct common_irq_params *irq_params = interrupt_params; 573 struct amdgpu_device *adev = irq_params->adev; 574 struct amdgpu_crtc *acrtc; 575 unsigned long flags; 576 int vrr_active; 577 578 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 579 if (!acrtc) 580 return; 581 582 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 583 584 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, 585 vrr_active, acrtc->dm_irq_params.active_planes); 586 587 /** 588 * Core vblank handling at start of front-porch is only possible 589 * in non-vrr mode, as only there vblank timestamping will give 590 * valid results while done in front-porch. Otherwise defer it 591 * to dm_vupdate_high_irq after end of front-porch. 592 */ 593 if (!vrr_active) 594 amdgpu_dm_crtc_handle_vblank(acrtc); 595 596 /** 597 * Following stuff must happen at start of vblank, for crc 598 * computation and below-the-range btr support in vrr mode. 599 */ 600 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 601 602 /* BTR updates need to happen before VUPDATE on Vega and above. */ 603 if (adev->family < AMDGPU_FAMILY_AI) 604 return; 605 606 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 607 608 if (acrtc->dm_irq_params.stream && 609 acrtc->dm_irq_params.vrr_params.supported && 610 acrtc->dm_irq_params.freesync_config.state == 611 VRR_STATE_ACTIVE_VARIABLE) { 612 mod_freesync_handle_v_update(adev->dm.freesync_module, 613 acrtc->dm_irq_params.stream, 614 &acrtc->dm_irq_params.vrr_params); 615 616 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, 617 &acrtc->dm_irq_params.vrr_params.adjust); 618 } 619 620 /* 621 * If there aren't any active_planes then DCH HUBP may be clock-gated. 622 * In that case, pageflip completion interrupts won't fire and pageflip 623 * completion events won't get delivered. Prevent this by sending 624 * pending pageflip events from here if a flip is still pending. 625 * 626 * If any planes are enabled, use dm_pflip_high_irq() instead, to 627 * avoid race conditions between flip programming and completion, 628 * which could cause too early flip completion events. 629 */ 630 if (adev->family >= AMDGPU_FAMILY_RV && 631 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && 632 acrtc->dm_irq_params.active_planes == 0) { 633 if (acrtc->event) { 634 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); 635 acrtc->event = NULL; 636 drm_crtc_vblank_put(&acrtc->base); 637 } 638 acrtc->pflip_status = AMDGPU_FLIP_NONE; 639 } 640 641 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 642 } 643 644 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 645 /** 646 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for 647 * DCN generation ASICs 648 * @interrupt_params: interrupt parameters 649 * 650 * Used to set crc window/read out crc value at vertical line 0 position 651 */ 652 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) 653 { 654 struct common_irq_params *irq_params = interrupt_params; 655 struct amdgpu_device *adev = irq_params->adev; 656 struct amdgpu_crtc *acrtc; 657 658 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); 659 660 if (!acrtc) 661 return; 662 663 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); 664 } 665 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 666 667 /** 668 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. 669 * @adev: amdgpu_device pointer 670 * @notify: dmub notification structure 671 * 672 * Dmub AUX or SET_CONFIG command completion processing callback 673 * Copies dmub notification to DM which is to be read by AUX command. 674 * issuing thread and also signals the event to wake up the thread. 675 */ 676 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, 677 struct dmub_notification *notify) 678 { 679 if (adev->dm.dmub_notify) 680 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); 681 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) 682 complete(&adev->dm.dmub_aux_transfer_done); 683 } 684 685 /** 686 * dmub_hpd_callback - DMUB HPD interrupt processing callback. 687 * @adev: amdgpu_device pointer 688 * @notify: dmub notification structure 689 * 690 * Dmub Hpd interrupt processing callback. Gets displayindex through the 691 * ink index and calls helper to do the processing. 692 */ 693 static void dmub_hpd_callback(struct amdgpu_device *adev, 694 struct dmub_notification *notify) 695 { 696 struct amdgpu_dm_connector *aconnector; 697 struct amdgpu_dm_connector *hpd_aconnector = NULL; 698 struct drm_connector *connector; 699 struct drm_connector_list_iter iter; 700 struct dc_link *link; 701 u8 link_index = 0; 702 struct drm_device *dev; 703 704 if (adev == NULL) 705 return; 706 707 if (notify == NULL) { 708 DRM_ERROR("DMUB HPD callback notification was NULL"); 709 return; 710 } 711 712 if (notify->link_index > adev->dm.dc->link_count) { 713 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); 714 return; 715 } 716 717 link_index = notify->link_index; 718 link = adev->dm.dc->links[link_index]; 719 dev = adev->dm.ddev; 720 721 drm_connector_list_iter_begin(dev, &iter); 722 drm_for_each_connector_iter(connector, &iter) { 723 aconnector = to_amdgpu_dm_connector(connector); 724 if (link && aconnector->dc_link == link) { 725 if (notify->type == DMUB_NOTIFICATION_HPD) 726 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); 727 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) 728 DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index); 729 else 730 DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n", 731 notify->type, link_index); 732 733 hpd_aconnector = aconnector; 734 break; 735 } 736 } 737 drm_connector_list_iter_end(&iter); 738 739 if (hpd_aconnector) { 740 if (notify->type == DMUB_NOTIFICATION_HPD) 741 handle_hpd_irq_helper(hpd_aconnector); 742 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) 743 handle_hpd_rx_irq(hpd_aconnector); 744 } 745 } 746 747 /** 748 * register_dmub_notify_callback - Sets callback for DMUB notify 749 * @adev: amdgpu_device pointer 750 * @type: Type of dmub notification 751 * @callback: Dmub interrupt callback function 752 * @dmub_int_thread_offload: offload indicator 753 * 754 * API to register a dmub callback handler for a dmub notification 755 * Also sets indicator whether callback processing to be offloaded. 756 * to dmub interrupt handling thread 757 * Return: true if successfully registered, false if there is existing registration 758 */ 759 static bool register_dmub_notify_callback(struct amdgpu_device *adev, 760 enum dmub_notification_type type, 761 dmub_notify_interrupt_callback_t callback, 762 bool dmub_int_thread_offload) 763 { 764 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { 765 adev->dm.dmub_callback[type] = callback; 766 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; 767 } else 768 return false; 769 770 return true; 771 } 772 773 static void dm_handle_hpd_work(struct work_struct *work) 774 { 775 struct dmub_hpd_work *dmub_hpd_wrk; 776 777 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); 778 779 if (!dmub_hpd_wrk->dmub_notify) { 780 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); 781 return; 782 } 783 784 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { 785 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, 786 dmub_hpd_wrk->dmub_notify); 787 } 788 789 kfree(dmub_hpd_wrk->dmub_notify); 790 kfree(dmub_hpd_wrk); 791 792 } 793 794 #define DMUB_TRACE_MAX_READ 64 795 /** 796 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt 797 * @interrupt_params: used for determining the Outbox instance 798 * 799 * Handles the Outbox Interrupt 800 * event handler. 801 */ 802 static void dm_dmub_outbox1_low_irq(void *interrupt_params) 803 { 804 struct dmub_notification notify; 805 struct common_irq_params *irq_params = interrupt_params; 806 struct amdgpu_device *adev = irq_params->adev; 807 struct amdgpu_display_manager *dm = &adev->dm; 808 struct dmcub_trace_buf_entry entry = { 0 }; 809 u32 count = 0; 810 struct dmub_hpd_work *dmub_hpd_wrk; 811 struct dc_link *plink = NULL; 812 813 if (dc_enable_dmub_notifications(adev->dm.dc) && 814 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 815 816 do { 817 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); 818 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { 819 DRM_ERROR("DM: notify type %d invalid!", notify.type); 820 continue; 821 } 822 if (!dm->dmub_callback[notify.type]) { 823 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); 824 continue; 825 } 826 if (dm->dmub_thread_offload[notify.type] == true) { 827 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); 828 if (!dmub_hpd_wrk) { 829 DRM_ERROR("Failed to allocate dmub_hpd_wrk"); 830 return; 831 } 832 dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification), 833 GFP_ATOMIC); 834 if (!dmub_hpd_wrk->dmub_notify) { 835 kfree(dmub_hpd_wrk); 836 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); 837 return; 838 } 839 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); 840 dmub_hpd_wrk->adev = adev; 841 if (notify.type == DMUB_NOTIFICATION_HPD) { 842 plink = adev->dm.dc->links[notify.link_index]; 843 if (plink) { 844 plink->hpd_status = 845 notify.hpd_status == DP_HPD_PLUG; 846 } 847 } 848 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); 849 } else { 850 dm->dmub_callback[notify.type](adev, ¬ify); 851 } 852 } while (notify.pending_notification); 853 } 854 855 856 do { 857 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 858 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 859 entry.param0, entry.param1); 860 861 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 862 entry.trace_code, entry.tick_count, entry.param0, entry.param1); 863 } else 864 break; 865 866 count++; 867 868 } while (count <= DMUB_TRACE_MAX_READ); 869 870 if (count > DMUB_TRACE_MAX_READ) 871 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); 872 } 873 874 static int dm_set_clockgating_state(void *handle, 875 enum amd_clockgating_state state) 876 { 877 return 0; 878 } 879 880 static int dm_set_powergating_state(void *handle, 881 enum amd_powergating_state state) 882 { 883 return 0; 884 } 885 886 /* Prototypes of private functions */ 887 static int dm_early_init(void *handle); 888 889 /* Allocate memory for FBC compressed data */ 890 static void amdgpu_dm_fbc_init(struct drm_connector *connector) 891 { 892 struct drm_device *dev = connector->dev; 893 struct amdgpu_device *adev = drm_to_adev(dev); 894 struct dm_compressor_info *compressor = &adev->dm.compressor; 895 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); 896 struct drm_display_mode *mode; 897 unsigned long max_size = 0; 898 899 if (adev->dm.dc->fbc_compressor == NULL) 900 return; 901 902 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) 903 return; 904 905 if (compressor->bo_ptr) 906 return; 907 908 909 list_for_each_entry(mode, &connector->modes, head) { 910 if (max_size < mode->htotal * mode->vtotal) 911 max_size = mode->htotal * mode->vtotal; 912 } 913 914 if (max_size) { 915 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 916 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, 917 &compressor->gpu_addr, &compressor->cpu_addr); 918 919 if (r) 920 DRM_ERROR("DM: Failed to initialize FBC\n"); 921 else { 922 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 923 DRM_INFO("DM: FBC alloc %lu\n", max_size*4); 924 } 925 926 } 927 928 } 929 930 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, 931 int pipe, bool *enabled, 932 unsigned char *buf, int max_bytes) 933 { 934 struct drm_device *dev = dev_get_drvdata(kdev); 935 struct amdgpu_device *adev = drm_to_adev(dev); 936 struct drm_connector *connector; 937 struct drm_connector_list_iter conn_iter; 938 struct amdgpu_dm_connector *aconnector; 939 int ret = 0; 940 941 *enabled = false; 942 943 mutex_lock(&adev->dm.audio_lock); 944 945 drm_connector_list_iter_begin(dev, &conn_iter); 946 drm_for_each_connector_iter(connector, &conn_iter) { 947 aconnector = to_amdgpu_dm_connector(connector); 948 if (aconnector->audio_inst != port) 949 continue; 950 951 *enabled = true; 952 ret = drm_eld_size(connector->eld); 953 memcpy(buf, connector->eld, min(max_bytes, ret)); 954 955 break; 956 } 957 drm_connector_list_iter_end(&conn_iter); 958 959 mutex_unlock(&adev->dm.audio_lock); 960 961 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); 962 963 return ret; 964 } 965 966 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { 967 .get_eld = amdgpu_dm_audio_component_get_eld, 968 }; 969 970 static int amdgpu_dm_audio_component_bind(struct device *kdev, 971 struct device *hda_kdev, void *data) 972 { 973 struct drm_device *dev = dev_get_drvdata(kdev); 974 struct amdgpu_device *adev = drm_to_adev(dev); 975 struct drm_audio_component *acomp = data; 976 977 acomp->ops = &amdgpu_dm_audio_component_ops; 978 acomp->dev = kdev; 979 adev->dm.audio_component = acomp; 980 981 return 0; 982 } 983 984 static void amdgpu_dm_audio_component_unbind(struct device *kdev, 985 struct device *hda_kdev, void *data) 986 { 987 struct drm_device *dev = dev_get_drvdata(kdev); 988 struct amdgpu_device *adev = drm_to_adev(dev); 989 struct drm_audio_component *acomp = data; 990 991 acomp->ops = NULL; 992 acomp->dev = NULL; 993 adev->dm.audio_component = NULL; 994 } 995 996 static const struct component_ops amdgpu_dm_audio_component_bind_ops = { 997 .bind = amdgpu_dm_audio_component_bind, 998 .unbind = amdgpu_dm_audio_component_unbind, 999 }; 1000 1001 static int amdgpu_dm_audio_init(struct amdgpu_device *adev) 1002 { 1003 int i, ret; 1004 1005 if (!amdgpu_audio) 1006 return 0; 1007 1008 adev->mode_info.audio.enabled = true; 1009 1010 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; 1011 1012 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1013 adev->mode_info.audio.pin[i].channels = -1; 1014 adev->mode_info.audio.pin[i].rate = -1; 1015 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1016 adev->mode_info.audio.pin[i].status_bits = 0; 1017 adev->mode_info.audio.pin[i].category_code = 0; 1018 adev->mode_info.audio.pin[i].connected = false; 1019 adev->mode_info.audio.pin[i].id = 1020 adev->dm.dc->res_pool->audios[i]->inst; 1021 adev->mode_info.audio.pin[i].offset = 0; 1022 } 1023 1024 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1025 if (ret < 0) 1026 return ret; 1027 1028 adev->dm.audio_registered = true; 1029 1030 return 0; 1031 } 1032 1033 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) 1034 { 1035 if (!amdgpu_audio) 1036 return; 1037 1038 if (!adev->mode_info.audio.enabled) 1039 return; 1040 1041 if (adev->dm.audio_registered) { 1042 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1043 adev->dm.audio_registered = false; 1044 } 1045 1046 /* TODO: Disable audio? */ 1047 1048 adev->mode_info.audio.enabled = false; 1049 } 1050 1051 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) 1052 { 1053 struct drm_audio_component *acomp = adev->dm.audio_component; 1054 1055 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { 1056 DRM_DEBUG_KMS("Notify ELD: %d\n", pin); 1057 1058 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 1059 pin, -1); 1060 } 1061 } 1062 1063 static int dm_dmub_hw_init(struct amdgpu_device *adev) 1064 { 1065 const struct dmcub_firmware_header_v1_0 *hdr; 1066 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1067 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 1068 const struct firmware *dmub_fw = adev->dm.dmub_fw; 1069 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 1070 struct abm *abm = adev->dm.dc->res_pool->abm; 1071 struct dmub_srv_hw_params hw_params; 1072 enum dmub_status status; 1073 const unsigned char *fw_inst_const, *fw_bss_data; 1074 u32 i, fw_inst_const_size, fw_bss_data_size; 1075 bool has_hw_support; 1076 1077 if (!dmub_srv) 1078 /* DMUB isn't supported on the ASIC. */ 1079 return 0; 1080 1081 if (!fb_info) { 1082 DRM_ERROR("No framebuffer info for DMUB service.\n"); 1083 return -EINVAL; 1084 } 1085 1086 if (!dmub_fw) { 1087 /* Firmware required for DMUB support. */ 1088 DRM_ERROR("No firmware provided for DMUB.\n"); 1089 return -EINVAL; 1090 } 1091 1092 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); 1093 if (status != DMUB_STATUS_OK) { 1094 DRM_ERROR("Error checking HW support for DMUB: %d\n", status); 1095 return -EINVAL; 1096 } 1097 1098 if (!has_hw_support) { 1099 DRM_INFO("DMUB unsupported on ASIC\n"); 1100 return 0; 1101 } 1102 1103 /* Reset DMCUB if it was previously running - before we overwrite its memory. */ 1104 status = dmub_srv_hw_reset(dmub_srv); 1105 if (status != DMUB_STATUS_OK) 1106 DRM_WARN("Error resetting DMUB HW: %d\n", status); 1107 1108 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 1109 1110 fw_inst_const = dmub_fw->data + 1111 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1112 PSP_HEADER_BYTES; 1113 1114 fw_bss_data = dmub_fw->data + 1115 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1116 le32_to_cpu(hdr->inst_const_bytes); 1117 1118 /* Copy firmware and bios info into FB memory. */ 1119 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1120 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1121 1122 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1123 1124 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, 1125 * amdgpu_ucode_init_single_fw will load dmub firmware 1126 * fw_inst_const part to cw0; otherwise, the firmware back door load 1127 * will be done by dm_dmub_hw_init 1128 */ 1129 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1130 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 1131 fw_inst_const_size); 1132 } 1133 1134 if (fw_bss_data_size) 1135 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, 1136 fw_bss_data, fw_bss_data_size); 1137 1138 /* Copy firmware bios info into FB memory. */ 1139 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, 1140 adev->bios_size); 1141 1142 /* Reset regions that need to be reset. */ 1143 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, 1144 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); 1145 1146 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, 1147 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); 1148 1149 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, 1150 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); 1151 1152 /* Initialize hardware. */ 1153 memset(&hw_params, 0, sizeof(hw_params)); 1154 hw_params.fb_base = adev->gmc.fb_start; 1155 hw_params.fb_offset = adev->vm_manager.vram_base_offset; 1156 1157 /* backdoor load firmware and trigger dmub running */ 1158 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1159 hw_params.load_inst_const = true; 1160 1161 if (dmcu) 1162 hw_params.psp_version = dmcu->psp_version; 1163 1164 for (i = 0; i < fb_info->num_fb; ++i) 1165 hw_params.fb[i] = &fb_info->fb[i]; 1166 1167 switch (adev->ip_versions[DCE_HWIP][0]) { 1168 case IP_VERSION(3, 1, 3): 1169 case IP_VERSION(3, 1, 4): 1170 hw_params.dpia_supported = true; 1171 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; 1172 break; 1173 default: 1174 break; 1175 } 1176 1177 status = dmub_srv_hw_init(dmub_srv, &hw_params); 1178 if (status != DMUB_STATUS_OK) { 1179 DRM_ERROR("Error initializing DMUB HW: %d\n", status); 1180 return -EINVAL; 1181 } 1182 1183 /* Wait for firmware load to finish. */ 1184 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1185 if (status != DMUB_STATUS_OK) 1186 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1187 1188 /* Init DMCU and ABM if available. */ 1189 if (dmcu && abm) { 1190 dmcu->funcs->dmcu_init(dmcu); 1191 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 1192 } 1193 1194 if (!adev->dm.dc->ctx->dmub_srv) 1195 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); 1196 if (!adev->dm.dc->ctx->dmub_srv) { 1197 DRM_ERROR("Couldn't allocate DC DMUB server!\n"); 1198 return -ENOMEM; 1199 } 1200 1201 DRM_INFO("DMUB hardware initialized: version=0x%08X\n", 1202 adev->dm.dmcub_fw_version); 1203 1204 return 0; 1205 } 1206 1207 static void dm_dmub_hw_resume(struct amdgpu_device *adev) 1208 { 1209 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1210 enum dmub_status status; 1211 bool init; 1212 1213 if (!dmub_srv) { 1214 /* DMUB isn't supported on the ASIC. */ 1215 return; 1216 } 1217 1218 status = dmub_srv_is_hw_init(dmub_srv, &init); 1219 if (status != DMUB_STATUS_OK) 1220 DRM_WARN("DMUB hardware init check failed: %d\n", status); 1221 1222 if (status == DMUB_STATUS_OK && init) { 1223 /* Wait for firmware load to finish. */ 1224 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1225 if (status != DMUB_STATUS_OK) 1226 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1227 } else { 1228 /* Perform the full hardware initialization. */ 1229 dm_dmub_hw_init(adev); 1230 } 1231 } 1232 1233 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 1234 { 1235 u64 pt_base; 1236 u32 logical_addr_low; 1237 u32 logical_addr_high; 1238 u32 agp_base, agp_bot, agp_top; 1239 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1240 1241 memset(pa_config, 0, sizeof(*pa_config)); 1242 1243 agp_base = 0; 1244 agp_bot = adev->gmc.agp_start >> 24; 1245 agp_top = adev->gmc.agp_end >> 24; 1246 1247 /* AGP aperture is disabled */ 1248 if (agp_bot == agp_top) { 1249 logical_addr_low = adev->gmc.fb_start >> 18; 1250 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1251 AMD_APU_IS_RENOIR | 1252 AMD_APU_IS_GREEN_SARDINE)) 1253 /* 1254 * Raven2 has a HW issue that it is unable to use the vram which 1255 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1256 * workaround that increase system aperture high address (add 1) 1257 * to get rid of the VM fault and hardware hang. 1258 */ 1259 logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; 1260 else 1261 logical_addr_high = adev->gmc.fb_end >> 18; 1262 } else { 1263 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; 1264 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1265 AMD_APU_IS_RENOIR | 1266 AMD_APU_IS_GREEN_SARDINE)) 1267 /* 1268 * Raven2 has a HW issue that it is unable to use the vram which 1269 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1270 * workaround that increase system aperture high address (add 1) 1271 * to get rid of the VM fault and hardware hang. 1272 */ 1273 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); 1274 else 1275 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; 1276 } 1277 1278 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 1279 1280 page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >> 1281 AMDGPU_GPU_PAGE_SHIFT); 1282 page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >> 1283 AMDGPU_GPU_PAGE_SHIFT); 1284 page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >> 1285 AMDGPU_GPU_PAGE_SHIFT); 1286 page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >> 1287 AMDGPU_GPU_PAGE_SHIFT); 1288 page_table_base.high_part = upper_32_bits(pt_base); 1289 page_table_base.low_part = lower_32_bits(pt_base); 1290 1291 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; 1292 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; 1293 1294 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24; 1295 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; 1296 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; 1297 1298 pa_config->system_aperture.fb_base = adev->gmc.fb_start; 1299 pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset; 1300 pa_config->system_aperture.fb_top = adev->gmc.fb_end; 1301 1302 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; 1303 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; 1304 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; 1305 1306 pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support; 1307 1308 } 1309 1310 static void force_connector_state( 1311 struct amdgpu_dm_connector *aconnector, 1312 enum drm_connector_force force_state) 1313 { 1314 struct drm_connector *connector = &aconnector->base; 1315 1316 mutex_lock(&connector->dev->mode_config.mutex); 1317 aconnector->base.force = force_state; 1318 mutex_unlock(&connector->dev->mode_config.mutex); 1319 1320 mutex_lock(&aconnector->hpd_lock); 1321 drm_kms_helper_connector_hotplug_event(connector); 1322 mutex_unlock(&aconnector->hpd_lock); 1323 } 1324 1325 static void dm_handle_hpd_rx_offload_work(struct work_struct *work) 1326 { 1327 struct hpd_rx_irq_offload_work *offload_work; 1328 struct amdgpu_dm_connector *aconnector; 1329 struct dc_link *dc_link; 1330 struct amdgpu_device *adev; 1331 enum dc_connection_type new_connection_type = dc_connection_none; 1332 unsigned long flags; 1333 union test_response test_response; 1334 1335 memset(&test_response, 0, sizeof(test_response)); 1336 1337 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); 1338 aconnector = offload_work->offload_wq->aconnector; 1339 1340 if (!aconnector) { 1341 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); 1342 goto skip; 1343 } 1344 1345 adev = drm_to_adev(aconnector->base.dev); 1346 dc_link = aconnector->dc_link; 1347 1348 mutex_lock(&aconnector->hpd_lock); 1349 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 1350 DRM_ERROR("KMS: Failed to detect connector\n"); 1351 mutex_unlock(&aconnector->hpd_lock); 1352 1353 if (new_connection_type == dc_connection_none) 1354 goto skip; 1355 1356 if (amdgpu_in_reset(adev)) 1357 goto skip; 1358 1359 if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 1360 offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 1361 dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); 1362 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1363 offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; 1364 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1365 goto skip; 1366 } 1367 1368 mutex_lock(&adev->dm.dc_lock); 1369 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 1370 dc_link_dp_handle_automated_test(dc_link); 1371 1372 if (aconnector->timing_changed) { 1373 /* force connector disconnect and reconnect */ 1374 force_connector_state(aconnector, DRM_FORCE_OFF); 1375 msleep(100); 1376 force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); 1377 } 1378 1379 test_response.bits.ACK = 1; 1380 1381 core_link_write_dpcd( 1382 dc_link, 1383 DP_TEST_RESPONSE, 1384 &test_response.raw, 1385 sizeof(test_response)); 1386 } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && 1387 dc_link_check_link_loss_status(dc_link, &offload_work->data) && 1388 dc_link_dp_allow_hpd_rx_irq(dc_link)) { 1389 /* offload_work->data is from handle_hpd_rx_irq-> 1390 * schedule_hpd_rx_offload_work.this is defer handle 1391 * for hpd short pulse. upon here, link status may be 1392 * changed, need get latest link status from dpcd 1393 * registers. if link status is good, skip run link 1394 * training again. 1395 */ 1396 union hpd_irq_data irq_data; 1397 1398 memset(&irq_data, 0, sizeof(irq_data)); 1399 1400 /* before dc_link_dp_handle_link_loss, allow new link lost handle 1401 * request be added to work queue if link lost at end of dc_link_ 1402 * dp_handle_link_loss 1403 */ 1404 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1405 offload_work->offload_wq->is_handling_link_loss = false; 1406 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1407 1408 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && 1409 dc_link_check_link_loss_status(dc_link, &irq_data)) 1410 dc_link_dp_handle_link_loss(dc_link); 1411 } 1412 mutex_unlock(&adev->dm.dc_lock); 1413 1414 skip: 1415 kfree(offload_work); 1416 1417 } 1418 1419 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) 1420 { 1421 int max_caps = dc->caps.max_links; 1422 int i = 0; 1423 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; 1424 1425 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); 1426 1427 if (!hpd_rx_offload_wq) 1428 return NULL; 1429 1430 1431 for (i = 0; i < max_caps; i++) { 1432 hpd_rx_offload_wq[i].wq = 1433 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); 1434 1435 if (hpd_rx_offload_wq[i].wq == NULL) { 1436 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); 1437 goto out_err; 1438 } 1439 1440 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); 1441 } 1442 1443 return hpd_rx_offload_wq; 1444 1445 out_err: 1446 for (i = 0; i < max_caps; i++) { 1447 if (hpd_rx_offload_wq[i].wq) 1448 destroy_workqueue(hpd_rx_offload_wq[i].wq); 1449 } 1450 kfree(hpd_rx_offload_wq); 1451 return NULL; 1452 } 1453 1454 struct amdgpu_stutter_quirk { 1455 u16 chip_vendor; 1456 u16 chip_device; 1457 u16 subsys_vendor; 1458 u16 subsys_device; 1459 u8 revision; 1460 }; 1461 1462 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { 1463 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ 1464 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, 1465 { 0, 0, 0, 0, 0 }, 1466 }; 1467 1468 static bool dm_should_disable_stutter(struct pci_dev *pdev) 1469 { 1470 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; 1471 1472 while (p && p->chip_device != 0) { 1473 if (pdev->vendor == p->chip_vendor && 1474 pdev->device == p->chip_device && 1475 pdev->subsystem_vendor == p->subsys_vendor && 1476 pdev->subsystem_device == p->subsys_device && 1477 pdev->revision == p->revision) { 1478 return true; 1479 } 1480 ++p; 1481 } 1482 return false; 1483 } 1484 1485 static const struct dmi_system_id hpd_disconnect_quirk_table[] = { 1486 { 1487 .matches = { 1488 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1489 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), 1490 }, 1491 }, 1492 { 1493 .matches = { 1494 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1495 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), 1496 }, 1497 }, 1498 { 1499 .matches = { 1500 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1501 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), 1502 }, 1503 }, 1504 { 1505 .matches = { 1506 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1507 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), 1508 }, 1509 }, 1510 { 1511 .matches = { 1512 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1513 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), 1514 }, 1515 }, 1516 { 1517 .matches = { 1518 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1519 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), 1520 }, 1521 }, 1522 { 1523 .matches = { 1524 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1525 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), 1526 }, 1527 }, 1528 { 1529 .matches = { 1530 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1531 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), 1532 }, 1533 }, 1534 { 1535 .matches = { 1536 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1537 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), 1538 }, 1539 }, 1540 {} 1541 /* TODO: refactor this from a fixed table to a dynamic option */ 1542 }; 1543 1544 static void retrieve_dmi_info(struct amdgpu_display_manager *dm) 1545 { 1546 const struct dmi_system_id *dmi_id; 1547 1548 dm->aux_hpd_discon_quirk = false; 1549 1550 dmi_id = dmi_first_match(hpd_disconnect_quirk_table); 1551 if (dmi_id) { 1552 dm->aux_hpd_discon_quirk = true; 1553 DRM_INFO("aux_hpd_discon_quirk attached\n"); 1554 } 1555 } 1556 1557 static int amdgpu_dm_init(struct amdgpu_device *adev) 1558 { 1559 struct dc_init_data init_data; 1560 struct dc_callback_init init_params; 1561 int r; 1562 1563 adev->dm.ddev = adev_to_drm(adev); 1564 adev->dm.adev = adev; 1565 1566 /* Zero all the fields */ 1567 memset(&init_data, 0, sizeof(init_data)); 1568 memset(&init_params, 0, sizeof(init_params)); 1569 1570 mutex_init(&adev->dm.dpia_aux_lock); 1571 mutex_init(&adev->dm.dc_lock); 1572 mutex_init(&adev->dm.audio_lock); 1573 1574 if (amdgpu_dm_irq_init(adev)) { 1575 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); 1576 goto error; 1577 } 1578 1579 init_data.asic_id.chip_family = adev->family; 1580 1581 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1582 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 1583 init_data.asic_id.chip_id = adev->pdev->device; 1584 1585 init_data.asic_id.vram_width = adev->gmc.vram_width; 1586 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 1587 init_data.asic_id.atombios_base_address = 1588 adev->mode_info.atom_context->bios; 1589 1590 init_data.driver = adev; 1591 1592 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 1593 1594 if (!adev->dm.cgs_device) { 1595 DRM_ERROR("amdgpu: failed to create cgs device.\n"); 1596 goto error; 1597 } 1598 1599 init_data.cgs_device = adev->dm.cgs_device; 1600 1601 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 1602 1603 switch (adev->ip_versions[DCE_HWIP][0]) { 1604 case IP_VERSION(2, 1, 0): 1605 switch (adev->dm.dmcub_fw_version) { 1606 case 0: /* development */ 1607 case 0x1: /* linux-firmware.git hash 6d9f399 */ 1608 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ 1609 init_data.flags.disable_dmcu = false; 1610 break; 1611 default: 1612 init_data.flags.disable_dmcu = true; 1613 } 1614 break; 1615 case IP_VERSION(2, 0, 3): 1616 init_data.flags.disable_dmcu = true; 1617 break; 1618 default: 1619 break; 1620 } 1621 1622 switch (adev->asic_type) { 1623 case CHIP_CARRIZO: 1624 case CHIP_STONEY: 1625 init_data.flags.gpu_vm_support = true; 1626 break; 1627 default: 1628 switch (adev->ip_versions[DCE_HWIP][0]) { 1629 case IP_VERSION(1, 0, 0): 1630 case IP_VERSION(1, 0, 1): 1631 /* enable S/G on PCO and RV2 */ 1632 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || 1633 (adev->apu_flags & AMD_APU_IS_PICASSO)) 1634 init_data.flags.gpu_vm_support = true; 1635 break; 1636 case IP_VERSION(2, 1, 0): 1637 case IP_VERSION(3, 0, 1): 1638 case IP_VERSION(3, 1, 2): 1639 case IP_VERSION(3, 1, 3): 1640 case IP_VERSION(3, 1, 4): 1641 case IP_VERSION(3, 1, 5): 1642 case IP_VERSION(3, 1, 6): 1643 init_data.flags.gpu_vm_support = true; 1644 break; 1645 default: 1646 break; 1647 } 1648 break; 1649 } 1650 if (init_data.flags.gpu_vm_support && 1651 (amdgpu_sg_display == 0)) 1652 init_data.flags.gpu_vm_support = false; 1653 1654 if (init_data.flags.gpu_vm_support) 1655 adev->mode_info.gpu_vm_support = true; 1656 1657 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 1658 init_data.flags.fbc_support = true; 1659 1660 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) 1661 init_data.flags.multi_mon_pp_mclk_switch = true; 1662 1663 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) 1664 init_data.flags.disable_fractional_pwm = true; 1665 1666 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) 1667 init_data.flags.edp_no_power_sequencing = true; 1668 1669 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) 1670 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; 1671 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) 1672 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; 1673 1674 init_data.flags.seamless_boot_edp_requested = false; 1675 1676 if (check_seamless_boot_capability(adev)) { 1677 init_data.flags.seamless_boot_edp_requested = true; 1678 init_data.flags.allow_seamless_boot_optimization = true; 1679 DRM_INFO("Seamless boot condition check passed\n"); 1680 } 1681 1682 init_data.flags.enable_mipi_converter_optimization = true; 1683 1684 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; 1685 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; 1686 1687 INIT_LIST_HEAD(&adev->dm.da_list); 1688 1689 retrieve_dmi_info(&adev->dm); 1690 1691 /* Display Core create. */ 1692 adev->dm.dc = dc_create(&init_data); 1693 1694 if (adev->dm.dc) { 1695 DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, 1696 dce_version_to_string(adev->dm.dc->ctx->dce_version)); 1697 } else { 1698 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); 1699 goto error; 1700 } 1701 1702 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { 1703 adev->dm.dc->debug.force_single_disp_pipe_split = false; 1704 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 1705 } 1706 1707 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 1708 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 1709 if (dm_should_disable_stutter(adev->pdev)) 1710 adev->dm.dc->debug.disable_stutter = true; 1711 1712 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) 1713 adev->dm.dc->debug.disable_stutter = true; 1714 1715 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 1716 adev->dm.dc->debug.disable_dsc = true; 1717 1718 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) 1719 adev->dm.dc->debug.disable_clock_gate = true; 1720 1721 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) 1722 adev->dm.dc->debug.force_subvp_mclk_switch = true; 1723 1724 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; 1725 1726 /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ 1727 adev->dm.dc->debug.ignore_cable_id = true; 1728 1729 /* TODO: There is a new drm mst change where the freedom of 1730 * vc_next_start_slot update is revoked/moved into drm, instead of in 1731 * driver. This forces us to make sure to get vc_next_start_slot updated 1732 * in drm function each time without considering if mst_state is active 1733 * or not. Otherwise, next time hotplug will give wrong start_slot 1734 * number. We are implementing a temporary solution to even notify drm 1735 * mst deallocation when link is no longer of MST type when uncommitting 1736 * the stream so we will have more time to work on a proper solution. 1737 * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we 1738 * should notify drm to do a complete "reset" of its states and stop 1739 * calling further drm mst functions when link is no longer of an MST 1740 * type. This could happen when we unplug an MST hubs/displays. When 1741 * uncommit stream comes later after unplug, we should just reset 1742 * hardware states only. 1743 */ 1744 adev->dm.dc->debug.temp_mst_deallocation_sequence = true; 1745 1746 if (adev->dm.dc->caps.dp_hdmi21_pcon_support) 1747 DRM_INFO("DP-HDMI FRL PCON supported\n"); 1748 1749 r = dm_dmub_hw_init(adev); 1750 if (r) { 1751 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 1752 goto error; 1753 } 1754 1755 dc_hardware_init(adev->dm.dc); 1756 1757 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); 1758 if (!adev->dm.hpd_rx_offload_wq) { 1759 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); 1760 goto error; 1761 } 1762 1763 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { 1764 struct dc_phy_addr_space_config pa_config; 1765 1766 mmhub_read_system_context(adev, &pa_config); 1767 1768 // Call the DC init_memory func 1769 dc_setup_system_context(adev->dm.dc, &pa_config); 1770 } 1771 1772 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 1773 if (!adev->dm.freesync_module) { 1774 DRM_ERROR( 1775 "amdgpu: failed to initialize freesync_module.\n"); 1776 } else 1777 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", 1778 adev->dm.freesync_module); 1779 1780 amdgpu_dm_init_color_mod(); 1781 1782 if (adev->dm.dc->caps.max_links > 0) { 1783 adev->dm.vblank_control_workqueue = 1784 create_singlethread_workqueue("dm_vblank_control_workqueue"); 1785 if (!adev->dm.vblank_control_workqueue) 1786 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); 1787 } 1788 1789 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { 1790 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); 1791 1792 if (!adev->dm.hdcp_workqueue) 1793 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); 1794 else 1795 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); 1796 1797 dc_init_callbacks(adev->dm.dc, &init_params); 1798 } 1799 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 1800 init_completion(&adev->dm.dmub_aux_transfer_done); 1801 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); 1802 if (!adev->dm.dmub_notify) { 1803 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); 1804 goto error; 1805 } 1806 1807 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); 1808 if (!adev->dm.delayed_hpd_wq) { 1809 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); 1810 goto error; 1811 } 1812 1813 amdgpu_dm_outbox_init(adev); 1814 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, 1815 dmub_aux_setconfig_callback, false)) { 1816 DRM_ERROR("amdgpu: fail to register dmub aux callback"); 1817 goto error; 1818 } 1819 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. 1820 * It is expected that DMUB will resend any pending notifications at this point. Note 1821 * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to 1822 * align legacy interface initialization sequence. Connection status will be proactivly 1823 * detected once in the amdgpu_dm_initialize_drm_device. 1824 */ 1825 dc_enable_dmub_outbox(adev->dm.dc); 1826 1827 /* DPIA trace goes to dmesg logs only if outbox is enabled */ 1828 if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE) 1829 dc_dmub_srv_enable_dpia_trace(adev->dm.dc); 1830 } 1831 1832 if (amdgpu_dm_initialize_drm_device(adev)) { 1833 DRM_ERROR( 1834 "amdgpu: failed to initialize sw for display support.\n"); 1835 goto error; 1836 } 1837 1838 /* create fake encoders for MST */ 1839 dm_dp_create_fake_mst_encoders(adev); 1840 1841 /* TODO: Add_display_info? */ 1842 1843 /* TODO use dynamic cursor width */ 1844 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 1845 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 1846 1847 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { 1848 DRM_ERROR( 1849 "amdgpu: failed to initialize sw for display support.\n"); 1850 goto error; 1851 } 1852 1853 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1854 adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); 1855 if (!adev->dm.secure_display_ctxs) 1856 DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n"); 1857 #endif 1858 1859 DRM_DEBUG_DRIVER("KMS initialized.\n"); 1860 1861 return 0; 1862 error: 1863 amdgpu_dm_fini(adev); 1864 1865 return -EINVAL; 1866 } 1867 1868 static int amdgpu_dm_early_fini(void *handle) 1869 { 1870 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1871 1872 amdgpu_dm_audio_fini(adev); 1873 1874 return 0; 1875 } 1876 1877 static void amdgpu_dm_fini(struct amdgpu_device *adev) 1878 { 1879 int i; 1880 1881 if (adev->dm.vblank_control_workqueue) { 1882 destroy_workqueue(adev->dm.vblank_control_workqueue); 1883 adev->dm.vblank_control_workqueue = NULL; 1884 } 1885 1886 amdgpu_dm_destroy_drm_device(&adev->dm); 1887 1888 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1889 if (adev->dm.secure_display_ctxs) { 1890 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1891 if (adev->dm.secure_display_ctxs[i].crtc) { 1892 flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); 1893 flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); 1894 } 1895 } 1896 kfree(adev->dm.secure_display_ctxs); 1897 adev->dm.secure_display_ctxs = NULL; 1898 } 1899 #endif 1900 if (adev->dm.hdcp_workqueue) { 1901 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); 1902 adev->dm.hdcp_workqueue = NULL; 1903 } 1904 1905 if (adev->dm.dc) { 1906 dc_deinit_callbacks(adev->dm.dc); 1907 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 1908 if (dc_enable_dmub_notifications(adev->dm.dc)) { 1909 kfree(adev->dm.dmub_notify); 1910 adev->dm.dmub_notify = NULL; 1911 destroy_workqueue(adev->dm.delayed_hpd_wq); 1912 adev->dm.delayed_hpd_wq = NULL; 1913 } 1914 } 1915 1916 if (adev->dm.dmub_bo) 1917 amdgpu_bo_free_kernel(&adev->dm.dmub_bo, 1918 &adev->dm.dmub_bo_gpu_addr, 1919 &adev->dm.dmub_bo_cpu_addr); 1920 1921 if (adev->dm.hpd_rx_offload_wq) { 1922 for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 1923 if (adev->dm.hpd_rx_offload_wq[i].wq) { 1924 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); 1925 adev->dm.hpd_rx_offload_wq[i].wq = NULL; 1926 } 1927 } 1928 1929 kfree(adev->dm.hpd_rx_offload_wq); 1930 adev->dm.hpd_rx_offload_wq = NULL; 1931 } 1932 1933 /* DC Destroy TODO: Replace destroy DAL */ 1934 if (adev->dm.dc) 1935 dc_destroy(&adev->dm.dc); 1936 /* 1937 * TODO: pageflip, vlank interrupt 1938 * 1939 * amdgpu_dm_irq_fini(adev); 1940 */ 1941 1942 if (adev->dm.cgs_device) { 1943 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 1944 adev->dm.cgs_device = NULL; 1945 } 1946 if (adev->dm.freesync_module) { 1947 mod_freesync_destroy(adev->dm.freesync_module); 1948 adev->dm.freesync_module = NULL; 1949 } 1950 1951 mutex_destroy(&adev->dm.audio_lock); 1952 mutex_destroy(&adev->dm.dc_lock); 1953 mutex_destroy(&adev->dm.dpia_aux_lock); 1954 } 1955 1956 static int load_dmcu_fw(struct amdgpu_device *adev) 1957 { 1958 const char *fw_name_dmcu = NULL; 1959 int r; 1960 const struct dmcu_firmware_header_v1_0 *hdr; 1961 1962 switch (adev->asic_type) { 1963 #if defined(CONFIG_DRM_AMD_DC_SI) 1964 case CHIP_TAHITI: 1965 case CHIP_PITCAIRN: 1966 case CHIP_VERDE: 1967 case CHIP_OLAND: 1968 #endif 1969 case CHIP_BONAIRE: 1970 case CHIP_HAWAII: 1971 case CHIP_KAVERI: 1972 case CHIP_KABINI: 1973 case CHIP_MULLINS: 1974 case CHIP_TONGA: 1975 case CHIP_FIJI: 1976 case CHIP_CARRIZO: 1977 case CHIP_STONEY: 1978 case CHIP_POLARIS11: 1979 case CHIP_POLARIS10: 1980 case CHIP_POLARIS12: 1981 case CHIP_VEGAM: 1982 case CHIP_VEGA10: 1983 case CHIP_VEGA12: 1984 case CHIP_VEGA20: 1985 return 0; 1986 case CHIP_NAVI12: 1987 fw_name_dmcu = FIRMWARE_NAVI12_DMCU; 1988 break; 1989 case CHIP_RAVEN: 1990 if (ASICREV_IS_PICASSO(adev->external_rev_id)) 1991 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1992 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) 1993 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1994 else 1995 return 0; 1996 break; 1997 default: 1998 switch (adev->ip_versions[DCE_HWIP][0]) { 1999 case IP_VERSION(2, 0, 2): 2000 case IP_VERSION(2, 0, 3): 2001 case IP_VERSION(2, 0, 0): 2002 case IP_VERSION(2, 1, 0): 2003 case IP_VERSION(3, 0, 0): 2004 case IP_VERSION(3, 0, 2): 2005 case IP_VERSION(3, 0, 3): 2006 case IP_VERSION(3, 0, 1): 2007 case IP_VERSION(3, 1, 2): 2008 case IP_VERSION(3, 1, 3): 2009 case IP_VERSION(3, 1, 4): 2010 case IP_VERSION(3, 1, 5): 2011 case IP_VERSION(3, 1, 6): 2012 case IP_VERSION(3, 2, 0): 2013 case IP_VERSION(3, 2, 1): 2014 return 0; 2015 default: 2016 break; 2017 } 2018 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 2019 return -EINVAL; 2020 } 2021 2022 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2023 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); 2024 return 0; 2025 } 2026 2027 r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu); 2028 if (r == -ENODEV) { 2029 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ 2030 DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); 2031 adev->dm.fw_dmcu = NULL; 2032 return 0; 2033 } 2034 if (r) { 2035 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", 2036 fw_name_dmcu); 2037 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2038 return r; 2039 } 2040 2041 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; 2042 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; 2043 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; 2044 adev->firmware.fw_size += 2045 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2046 2047 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; 2048 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; 2049 adev->firmware.fw_size += 2050 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2051 2052 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); 2053 2054 DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); 2055 2056 return 0; 2057 } 2058 2059 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) 2060 { 2061 struct amdgpu_device *adev = ctx; 2062 2063 return dm_read_reg(adev->dm.dc->ctx, address); 2064 } 2065 2066 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, 2067 uint32_t value) 2068 { 2069 struct amdgpu_device *adev = ctx; 2070 2071 return dm_write_reg(adev->dm.dc->ctx, address, value); 2072 } 2073 2074 static int dm_dmub_sw_init(struct amdgpu_device *adev) 2075 { 2076 struct dmub_srv_create_params create_params; 2077 struct dmub_srv_region_params region_params; 2078 struct dmub_srv_region_info region_info; 2079 struct dmub_srv_memory_params memory_params; 2080 struct dmub_srv_fb_info *fb_info; 2081 struct dmub_srv *dmub_srv; 2082 const struct dmcub_firmware_header_v1_0 *hdr; 2083 enum dmub_asic dmub_asic; 2084 enum dmub_status status; 2085 int r; 2086 2087 switch (adev->ip_versions[DCE_HWIP][0]) { 2088 case IP_VERSION(2, 1, 0): 2089 dmub_asic = DMUB_ASIC_DCN21; 2090 break; 2091 case IP_VERSION(3, 0, 0): 2092 dmub_asic = DMUB_ASIC_DCN30; 2093 break; 2094 case IP_VERSION(3, 0, 1): 2095 dmub_asic = DMUB_ASIC_DCN301; 2096 break; 2097 case IP_VERSION(3, 0, 2): 2098 dmub_asic = DMUB_ASIC_DCN302; 2099 break; 2100 case IP_VERSION(3, 0, 3): 2101 dmub_asic = DMUB_ASIC_DCN303; 2102 break; 2103 case IP_VERSION(3, 1, 2): 2104 case IP_VERSION(3, 1, 3): 2105 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; 2106 break; 2107 case IP_VERSION(3, 1, 4): 2108 dmub_asic = DMUB_ASIC_DCN314; 2109 break; 2110 case IP_VERSION(3, 1, 5): 2111 dmub_asic = DMUB_ASIC_DCN315; 2112 break; 2113 case IP_VERSION(3, 1, 6): 2114 dmub_asic = DMUB_ASIC_DCN316; 2115 break; 2116 case IP_VERSION(3, 2, 0): 2117 dmub_asic = DMUB_ASIC_DCN32; 2118 break; 2119 case IP_VERSION(3, 2, 1): 2120 dmub_asic = DMUB_ASIC_DCN321; 2121 break; 2122 default: 2123 /* ASIC doesn't support DMUB. */ 2124 return 0; 2125 } 2126 2127 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; 2128 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); 2129 2130 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2131 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = 2132 AMDGPU_UCODE_ID_DMCUB; 2133 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = 2134 adev->dm.dmub_fw; 2135 adev->firmware.fw_size += 2136 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); 2137 2138 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", 2139 adev->dm.dmcub_fw_version); 2140 } 2141 2142 2143 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 2144 dmub_srv = adev->dm.dmub_srv; 2145 2146 if (!dmub_srv) { 2147 DRM_ERROR("Failed to allocate DMUB service!\n"); 2148 return -ENOMEM; 2149 } 2150 2151 memset(&create_params, 0, sizeof(create_params)); 2152 create_params.user_ctx = adev; 2153 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 2154 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 2155 create_params.asic = dmub_asic; 2156 2157 /* Create the DMUB service. */ 2158 status = dmub_srv_create(dmub_srv, &create_params); 2159 if (status != DMUB_STATUS_OK) { 2160 DRM_ERROR("Error creating DMUB service: %d\n", status); 2161 return -EINVAL; 2162 } 2163 2164 /* Calculate the size of all the regions for the DMUB service. */ 2165 memset(®ion_params, 0, sizeof(region_params)); 2166 2167 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 2168 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 2169 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 2170 region_params.vbios_size = adev->bios_size; 2171 region_params.fw_bss_data = region_params.bss_data_size ? 2172 adev->dm.dmub_fw->data + 2173 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2174 le32_to_cpu(hdr->inst_const_bytes) : NULL; 2175 region_params.fw_inst_const = 2176 adev->dm.dmub_fw->data + 2177 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2178 PSP_HEADER_BYTES; 2179 region_params.is_mailbox_in_inbox = false; 2180 2181 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, 2182 ®ion_info); 2183 2184 if (status != DMUB_STATUS_OK) { 2185 DRM_ERROR("Error calculating DMUB region info: %d\n", status); 2186 return -EINVAL; 2187 } 2188 2189 /* 2190 * Allocate a framebuffer based on the total size of all the regions. 2191 * TODO: Move this into GART. 2192 */ 2193 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 2194 AMDGPU_GEM_DOMAIN_VRAM | 2195 AMDGPU_GEM_DOMAIN_GTT, 2196 &adev->dm.dmub_bo, 2197 &adev->dm.dmub_bo_gpu_addr, 2198 &adev->dm.dmub_bo_cpu_addr); 2199 if (r) 2200 return r; 2201 2202 /* Rebase the regions on the framebuffer address. */ 2203 memset(&memory_params, 0, sizeof(memory_params)); 2204 memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr; 2205 memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr; 2206 memory_params.region_info = ®ion_info; 2207 2208 adev->dm.dmub_fb_info = 2209 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); 2210 fb_info = adev->dm.dmub_fb_info; 2211 2212 if (!fb_info) { 2213 DRM_ERROR( 2214 "Failed to allocate framebuffer info for DMUB service!\n"); 2215 return -ENOMEM; 2216 } 2217 2218 status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); 2219 if (status != DMUB_STATUS_OK) { 2220 DRM_ERROR("Error calculating DMUB FB info: %d\n", status); 2221 return -EINVAL; 2222 } 2223 2224 return 0; 2225 } 2226 2227 static int dm_sw_init(void *handle) 2228 { 2229 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2230 int r; 2231 2232 r = dm_dmub_sw_init(adev); 2233 if (r) 2234 return r; 2235 2236 return load_dmcu_fw(adev); 2237 } 2238 2239 static int dm_sw_fini(void *handle) 2240 { 2241 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2242 2243 kfree(adev->dm.dmub_fb_info); 2244 adev->dm.dmub_fb_info = NULL; 2245 2246 if (adev->dm.dmub_srv) { 2247 dmub_srv_destroy(adev->dm.dmub_srv); 2248 kfree(adev->dm.dmub_srv); 2249 adev->dm.dmub_srv = NULL; 2250 } 2251 2252 amdgpu_ucode_release(&adev->dm.dmub_fw); 2253 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2254 2255 return 0; 2256 } 2257 2258 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 2259 { 2260 struct amdgpu_dm_connector *aconnector; 2261 struct drm_connector *connector; 2262 struct drm_connector_list_iter iter; 2263 int ret = 0; 2264 2265 drm_connector_list_iter_begin(dev, &iter); 2266 drm_for_each_connector_iter(connector, &iter) { 2267 aconnector = to_amdgpu_dm_connector(connector); 2268 if (aconnector->dc_link->type == dc_connection_mst_branch && 2269 aconnector->mst_mgr.aux) { 2270 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", 2271 aconnector, 2272 aconnector->base.base.id); 2273 2274 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 2275 if (ret < 0) { 2276 DRM_ERROR("DM_MST: Failed to start MST\n"); 2277 aconnector->dc_link->type = 2278 dc_connection_single; 2279 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, 2280 aconnector->dc_link); 2281 break; 2282 } 2283 } 2284 } 2285 drm_connector_list_iter_end(&iter); 2286 2287 return ret; 2288 } 2289 2290 static int dm_late_init(void *handle) 2291 { 2292 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2293 2294 struct dmcu_iram_parameters params; 2295 unsigned int linear_lut[16]; 2296 int i; 2297 struct dmcu *dmcu = NULL; 2298 2299 dmcu = adev->dm.dc->res_pool->dmcu; 2300 2301 for (i = 0; i < 16; i++) 2302 linear_lut[i] = 0xFFFF * i / 15; 2303 2304 params.set = 0; 2305 params.backlight_ramping_override = false; 2306 params.backlight_ramping_start = 0xCCCC; 2307 params.backlight_ramping_reduction = 0xCCCCCCCC; 2308 params.backlight_lut_array_size = 16; 2309 params.backlight_lut_array = linear_lut; 2310 2311 /* Min backlight level after ABM reduction, Don't allow below 1% 2312 * 0xFFFF x 0.01 = 0x28F 2313 */ 2314 params.min_abm_backlight = 0x28F; 2315 /* In the case where abm is implemented on dmcub, 2316 * dmcu object will be null. 2317 * ABM 2.4 and up are implemented on dmcub. 2318 */ 2319 if (dmcu) { 2320 if (!dmcu_load_iram(dmcu, params)) 2321 return -EINVAL; 2322 } else if (adev->dm.dc->ctx->dmub_srv) { 2323 struct dc_link *edp_links[MAX_NUM_EDP]; 2324 int edp_num; 2325 2326 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num); 2327 for (i = 0; i < edp_num; i++) { 2328 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) 2329 return -EINVAL; 2330 } 2331 } 2332 2333 return detect_mst_link_for_all_connectors(adev_to_drm(adev)); 2334 } 2335 2336 static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) 2337 { 2338 int ret; 2339 u8 guid[16]; 2340 u64 tmp64; 2341 2342 mutex_lock(&mgr->lock); 2343 if (!mgr->mst_primary) 2344 goto out_fail; 2345 2346 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { 2347 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2348 goto out_fail; 2349 } 2350 2351 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2352 DP_MST_EN | 2353 DP_UP_REQ_EN | 2354 DP_UPSTREAM_IS_SRC); 2355 if (ret < 0) { 2356 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); 2357 goto out_fail; 2358 } 2359 2360 /* Some hubs forget their guids after they resume */ 2361 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); 2362 if (ret != 16) { 2363 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2364 goto out_fail; 2365 } 2366 2367 if (memchr_inv(guid, 0, 16) == NULL) { 2368 tmp64 = get_jiffies_64(); 2369 memcpy(&guid[0], &tmp64, sizeof(u64)); 2370 memcpy(&guid[8], &tmp64, sizeof(u64)); 2371 2372 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16); 2373 2374 if (ret != 16) { 2375 drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); 2376 goto out_fail; 2377 } 2378 } 2379 2380 memcpy(mgr->mst_primary->guid, guid, 16); 2381 2382 out_fail: 2383 mutex_unlock(&mgr->lock); 2384 } 2385 2386 static void s3_handle_mst(struct drm_device *dev, bool suspend) 2387 { 2388 struct amdgpu_dm_connector *aconnector; 2389 struct drm_connector *connector; 2390 struct drm_connector_list_iter iter; 2391 struct drm_dp_mst_topology_mgr *mgr; 2392 2393 drm_connector_list_iter_begin(dev, &iter); 2394 drm_for_each_connector_iter(connector, &iter) { 2395 aconnector = to_amdgpu_dm_connector(connector); 2396 if (aconnector->dc_link->type != dc_connection_mst_branch || 2397 aconnector->mst_root) 2398 continue; 2399 2400 mgr = &aconnector->mst_mgr; 2401 2402 if (suspend) { 2403 drm_dp_mst_topology_mgr_suspend(mgr); 2404 } else { 2405 /* if extended timeout is supported in hardware, 2406 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer 2407 * CTS 4.2.1.1 regression introduced by CTS specs requirement update. 2408 */ 2409 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); 2410 if (!dp_is_lttpr_present(aconnector->dc_link)) 2411 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); 2412 2413 /* TODO: move resume_mst_branch_status() into drm mst resume again 2414 * once topology probing work is pulled out from mst resume into mst 2415 * resume 2nd step. mst resume 2nd step should be called after old 2416 * state getting restored (i.e. drm_atomic_helper_resume()). 2417 */ 2418 resume_mst_branch_status(mgr); 2419 } 2420 } 2421 drm_connector_list_iter_end(&iter); 2422 } 2423 2424 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) 2425 { 2426 int ret = 0; 2427 2428 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends 2429 * on window driver dc implementation. 2430 * For Navi1x, clock settings of dcn watermarks are fixed. the settings 2431 * should be passed to smu during boot up and resume from s3. 2432 * boot up: dc calculate dcn watermark clock settings within dc_create, 2433 * dcn20_resource_construct 2434 * then call pplib functions below to pass the settings to smu: 2435 * smu_set_watermarks_for_clock_ranges 2436 * smu_set_watermarks_table 2437 * navi10_set_watermarks_table 2438 * smu_write_watermarks_table 2439 * 2440 * For Renoir, clock settings of dcn watermark are also fixed values. 2441 * dc has implemented different flow for window driver: 2442 * dc_hardware_init / dc_set_power_state 2443 * dcn10_init_hw 2444 * notify_wm_ranges 2445 * set_wm_ranges 2446 * -- Linux 2447 * smu_set_watermarks_for_clock_ranges 2448 * renoir_set_watermarks_table 2449 * smu_write_watermarks_table 2450 * 2451 * For Linux, 2452 * dc_hardware_init -> amdgpu_dm_init 2453 * dc_set_power_state --> dm_resume 2454 * 2455 * therefore, this function apply to navi10/12/14 but not Renoir 2456 * * 2457 */ 2458 switch (adev->ip_versions[DCE_HWIP][0]) { 2459 case IP_VERSION(2, 0, 2): 2460 case IP_VERSION(2, 0, 0): 2461 break; 2462 default: 2463 return 0; 2464 } 2465 2466 ret = amdgpu_dpm_write_watermarks_table(adev); 2467 if (ret) { 2468 DRM_ERROR("Failed to update WMTABLE!\n"); 2469 return ret; 2470 } 2471 2472 return 0; 2473 } 2474 2475 /** 2476 * dm_hw_init() - Initialize DC device 2477 * @handle: The base driver device containing the amdgpu_dm device. 2478 * 2479 * Initialize the &struct amdgpu_display_manager device. This involves calling 2480 * the initializers of each DM component, then populating the struct with them. 2481 * 2482 * Although the function implies hardware initialization, both hardware and 2483 * software are initialized here. Splitting them out to their relevant init 2484 * hooks is a future TODO item. 2485 * 2486 * Some notable things that are initialized here: 2487 * 2488 * - Display Core, both software and hardware 2489 * - DC modules that we need (freesync and color management) 2490 * - DRM software states 2491 * - Interrupt sources and handlers 2492 * - Vblank support 2493 * - Debug FS entries, if enabled 2494 */ 2495 static int dm_hw_init(void *handle) 2496 { 2497 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2498 /* Create DAL display manager */ 2499 amdgpu_dm_init(adev); 2500 amdgpu_dm_hpd_init(adev); 2501 2502 return 0; 2503 } 2504 2505 /** 2506 * dm_hw_fini() - Teardown DC device 2507 * @handle: The base driver device containing the amdgpu_dm device. 2508 * 2509 * Teardown components within &struct amdgpu_display_manager that require 2510 * cleanup. This involves cleaning up the DRM device, DC, and any modules that 2511 * were loaded. Also flush IRQ workqueues and disable them. 2512 */ 2513 static int dm_hw_fini(void *handle) 2514 { 2515 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2516 2517 amdgpu_dm_hpd_fini(adev); 2518 2519 amdgpu_dm_irq_fini(adev); 2520 amdgpu_dm_fini(adev); 2521 return 0; 2522 } 2523 2524 2525 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, 2526 struct dc_state *state, bool enable) 2527 { 2528 enum dc_irq_source irq_source; 2529 struct amdgpu_crtc *acrtc; 2530 int rc = -EBUSY; 2531 int i = 0; 2532 2533 for (i = 0; i < state->stream_count; i++) { 2534 acrtc = get_crtc_by_otg_inst( 2535 adev, state->stream_status[i].primary_otg_inst); 2536 2537 if (acrtc && state->stream_status[i].plane_count != 0) { 2538 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; 2539 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 2540 if (rc) 2541 DRM_WARN("Failed to %s pflip interrupts\n", 2542 enable ? "enable" : "disable"); 2543 2544 if (enable) { 2545 if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) 2546 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); 2547 } else 2548 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); 2549 2550 if (rc) 2551 DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); 2552 2553 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 2554 /* During gpu-reset we disable and then enable vblank irq, so 2555 * don't use amdgpu_irq_get/put() to avoid refcount change. 2556 */ 2557 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) 2558 DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); 2559 } 2560 } 2561 2562 } 2563 2564 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) 2565 { 2566 struct dc_state *context = NULL; 2567 enum dc_status res = DC_ERROR_UNEXPECTED; 2568 int i; 2569 struct dc_stream_state *del_streams[MAX_PIPES]; 2570 int del_streams_count = 0; 2571 2572 memset(del_streams, 0, sizeof(del_streams)); 2573 2574 context = dc_create_state(dc); 2575 if (context == NULL) 2576 goto context_alloc_fail; 2577 2578 dc_resource_state_copy_construct_current(dc, context); 2579 2580 /* First remove from context all streams */ 2581 for (i = 0; i < context->stream_count; i++) { 2582 struct dc_stream_state *stream = context->streams[i]; 2583 2584 del_streams[del_streams_count++] = stream; 2585 } 2586 2587 /* Remove all planes for removed streams and then remove the streams */ 2588 for (i = 0; i < del_streams_count; i++) { 2589 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { 2590 res = DC_FAIL_DETACH_SURFACES; 2591 goto fail; 2592 } 2593 2594 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); 2595 if (res != DC_OK) 2596 goto fail; 2597 } 2598 2599 res = dc_commit_streams(dc, context->streams, context->stream_count); 2600 2601 fail: 2602 dc_release_state(context); 2603 2604 context_alloc_fail: 2605 return res; 2606 } 2607 2608 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) 2609 { 2610 int i; 2611 2612 if (dm->hpd_rx_offload_wq) { 2613 for (i = 0; i < dm->dc->caps.max_links; i++) 2614 flush_workqueue(dm->hpd_rx_offload_wq[i].wq); 2615 } 2616 } 2617 2618 static int dm_suspend(void *handle) 2619 { 2620 struct amdgpu_device *adev = handle; 2621 struct amdgpu_display_manager *dm = &adev->dm; 2622 int ret = 0; 2623 2624 if (amdgpu_in_reset(adev)) { 2625 mutex_lock(&dm->dc_lock); 2626 2627 dc_allow_idle_optimizations(adev->dm.dc, false); 2628 2629 dm->cached_dc_state = dc_copy_state(dm->dc->current_state); 2630 2631 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); 2632 2633 amdgpu_dm_commit_zero_streams(dm->dc); 2634 2635 amdgpu_dm_irq_suspend(adev); 2636 2637 hpd_rx_irq_work_suspend(dm); 2638 2639 return ret; 2640 } 2641 2642 WARN_ON(adev->dm.cached_state); 2643 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); 2644 2645 s3_handle_mst(adev_to_drm(adev), true); 2646 2647 amdgpu_dm_irq_suspend(adev); 2648 2649 hpd_rx_irq_work_suspend(dm); 2650 2651 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 2652 2653 return 0; 2654 } 2655 2656 struct amdgpu_dm_connector * 2657 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 2658 struct drm_crtc *crtc) 2659 { 2660 u32 i; 2661 struct drm_connector_state *new_con_state; 2662 struct drm_connector *connector; 2663 struct drm_crtc *crtc_from_state; 2664 2665 for_each_new_connector_in_state(state, connector, new_con_state, i) { 2666 crtc_from_state = new_con_state->crtc; 2667 2668 if (crtc_from_state == crtc) 2669 return to_amdgpu_dm_connector(connector); 2670 } 2671 2672 return NULL; 2673 } 2674 2675 static void emulated_link_detect(struct dc_link *link) 2676 { 2677 struct dc_sink_init_data sink_init_data = { 0 }; 2678 struct display_sink_capability sink_caps = { 0 }; 2679 enum dc_edid_status edid_status; 2680 struct dc_context *dc_ctx = link->ctx; 2681 struct dc_sink *sink = NULL; 2682 struct dc_sink *prev_sink = NULL; 2683 2684 link->type = dc_connection_none; 2685 prev_sink = link->local_sink; 2686 2687 if (prev_sink) 2688 dc_sink_release(prev_sink); 2689 2690 switch (link->connector_signal) { 2691 case SIGNAL_TYPE_HDMI_TYPE_A: { 2692 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2693 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; 2694 break; 2695 } 2696 2697 case SIGNAL_TYPE_DVI_SINGLE_LINK: { 2698 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2699 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 2700 break; 2701 } 2702 2703 case SIGNAL_TYPE_DVI_DUAL_LINK: { 2704 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2705 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; 2706 break; 2707 } 2708 2709 case SIGNAL_TYPE_LVDS: { 2710 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2711 sink_caps.signal = SIGNAL_TYPE_LVDS; 2712 break; 2713 } 2714 2715 case SIGNAL_TYPE_EDP: { 2716 sink_caps.transaction_type = 2717 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2718 sink_caps.signal = SIGNAL_TYPE_EDP; 2719 break; 2720 } 2721 2722 case SIGNAL_TYPE_DISPLAY_PORT: { 2723 sink_caps.transaction_type = 2724 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2725 sink_caps.signal = SIGNAL_TYPE_VIRTUAL; 2726 break; 2727 } 2728 2729 default: 2730 DC_ERROR("Invalid connector type! signal:%d\n", 2731 link->connector_signal); 2732 return; 2733 } 2734 2735 sink_init_data.link = link; 2736 sink_init_data.sink_signal = sink_caps.signal; 2737 2738 sink = dc_sink_create(&sink_init_data); 2739 if (!sink) { 2740 DC_ERROR("Failed to create sink!\n"); 2741 return; 2742 } 2743 2744 /* dc_sink_create returns a new reference */ 2745 link->local_sink = sink; 2746 2747 edid_status = dm_helpers_read_local_edid( 2748 link->ctx, 2749 link, 2750 sink); 2751 2752 if (edid_status != EDID_OK) 2753 DC_ERROR("Failed to read EDID"); 2754 2755 } 2756 2757 static void dm_gpureset_commit_state(struct dc_state *dc_state, 2758 struct amdgpu_display_manager *dm) 2759 { 2760 struct { 2761 struct dc_surface_update surface_updates[MAX_SURFACES]; 2762 struct dc_plane_info plane_infos[MAX_SURFACES]; 2763 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 2764 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 2765 struct dc_stream_update stream_update; 2766 } *bundle; 2767 int k, m; 2768 2769 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 2770 2771 if (!bundle) { 2772 dm_error("Failed to allocate update bundle\n"); 2773 goto cleanup; 2774 } 2775 2776 for (k = 0; k < dc_state->stream_count; k++) { 2777 bundle->stream_update.stream = dc_state->streams[k]; 2778 2779 for (m = 0; m < dc_state->stream_status->plane_count; m++) { 2780 bundle->surface_updates[m].surface = 2781 dc_state->stream_status->plane_states[m]; 2782 bundle->surface_updates[m].surface->force_full_update = 2783 true; 2784 } 2785 2786 update_planes_and_stream_adapter(dm->dc, 2787 UPDATE_TYPE_FULL, 2788 dc_state->stream_status->plane_count, 2789 dc_state->streams[k], 2790 &bundle->stream_update, 2791 bundle->surface_updates); 2792 } 2793 2794 cleanup: 2795 kfree(bundle); 2796 } 2797 2798 static int dm_resume(void *handle) 2799 { 2800 struct amdgpu_device *adev = handle; 2801 struct drm_device *ddev = adev_to_drm(adev); 2802 struct amdgpu_display_manager *dm = &adev->dm; 2803 struct amdgpu_dm_connector *aconnector; 2804 struct drm_connector *connector; 2805 struct drm_connector_list_iter iter; 2806 struct drm_crtc *crtc; 2807 struct drm_crtc_state *new_crtc_state; 2808 struct dm_crtc_state *dm_new_crtc_state; 2809 struct drm_plane *plane; 2810 struct drm_plane_state *new_plane_state; 2811 struct dm_plane_state *dm_new_plane_state; 2812 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 2813 enum dc_connection_type new_connection_type = dc_connection_none; 2814 struct dc_state *dc_state; 2815 int i, r, j, ret; 2816 bool need_hotplug = false; 2817 2818 if (amdgpu_in_reset(adev)) { 2819 dc_state = dm->cached_dc_state; 2820 2821 /* 2822 * The dc->current_state is backed up into dm->cached_dc_state 2823 * before we commit 0 streams. 2824 * 2825 * DC will clear link encoder assignments on the real state 2826 * but the changes won't propagate over to the copy we made 2827 * before the 0 streams commit. 2828 * 2829 * DC expects that link encoder assignments are *not* valid 2830 * when committing a state, so as a workaround we can copy 2831 * off of the current state. 2832 * 2833 * We lose the previous assignments, but we had already 2834 * commit 0 streams anyway. 2835 */ 2836 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); 2837 2838 r = dm_dmub_hw_init(adev); 2839 if (r) 2840 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 2841 2842 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2843 dc_resume(dm->dc); 2844 2845 amdgpu_dm_irq_resume_early(adev); 2846 2847 for (i = 0; i < dc_state->stream_count; i++) { 2848 dc_state->streams[i]->mode_changed = true; 2849 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { 2850 dc_state->stream_status[i].plane_states[j]->update_flags.raw 2851 = 0xffffffff; 2852 } 2853 } 2854 2855 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 2856 amdgpu_dm_outbox_init(adev); 2857 dc_enable_dmub_outbox(adev->dm.dc); 2858 } 2859 2860 WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); 2861 2862 dm_gpureset_commit_state(dm->cached_dc_state, dm); 2863 2864 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); 2865 2866 dc_release_state(dm->cached_dc_state); 2867 dm->cached_dc_state = NULL; 2868 2869 amdgpu_dm_irq_resume_late(adev); 2870 2871 mutex_unlock(&dm->dc_lock); 2872 2873 return 0; 2874 } 2875 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 2876 dc_release_state(dm_state->context); 2877 dm_state->context = dc_create_state(dm->dc); 2878 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 2879 dc_resource_state_construct(dm->dc, dm_state->context); 2880 2881 /* Before powering on DC we need to re-initialize DMUB. */ 2882 dm_dmub_hw_resume(adev); 2883 2884 /* Re-enable outbox interrupts for DPIA. */ 2885 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 2886 amdgpu_dm_outbox_init(adev); 2887 dc_enable_dmub_outbox(adev->dm.dc); 2888 } 2889 2890 /* power on hardware */ 2891 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2892 2893 /* program HPD filter */ 2894 dc_resume(dm->dc); 2895 2896 /* 2897 * early enable HPD Rx IRQ, should be done before set mode as short 2898 * pulse interrupts are used for MST 2899 */ 2900 amdgpu_dm_irq_resume_early(adev); 2901 2902 /* On resume we need to rewrite the MSTM control bits to enable MST*/ 2903 s3_handle_mst(ddev, false); 2904 2905 /* Do detection*/ 2906 drm_connector_list_iter_begin(ddev, &iter); 2907 drm_for_each_connector_iter(connector, &iter) { 2908 aconnector = to_amdgpu_dm_connector(connector); 2909 2910 if (!aconnector->dc_link) 2911 continue; 2912 2913 /* 2914 * this is the case when traversing through already created end sink 2915 * MST connectors, should be skipped 2916 */ 2917 if (aconnector && aconnector->mst_root) 2918 continue; 2919 2920 mutex_lock(&aconnector->hpd_lock); 2921 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 2922 DRM_ERROR("KMS: Failed to detect connector\n"); 2923 2924 if (aconnector->base.force && new_connection_type == dc_connection_none) { 2925 emulated_link_detect(aconnector->dc_link); 2926 } else { 2927 mutex_lock(&dm->dc_lock); 2928 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 2929 mutex_unlock(&dm->dc_lock); 2930 } 2931 2932 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 2933 aconnector->fake_enable = false; 2934 2935 if (aconnector->dc_sink) 2936 dc_sink_release(aconnector->dc_sink); 2937 aconnector->dc_sink = NULL; 2938 amdgpu_dm_update_connector_after_detect(aconnector); 2939 mutex_unlock(&aconnector->hpd_lock); 2940 } 2941 drm_connector_list_iter_end(&iter); 2942 2943 /* Force mode set in atomic commit */ 2944 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) 2945 new_crtc_state->active_changed = true; 2946 2947 /* 2948 * atomic_check is expected to create the dc states. We need to release 2949 * them here, since they were duplicated as part of the suspend 2950 * procedure. 2951 */ 2952 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 2953 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 2954 if (dm_new_crtc_state->stream) { 2955 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 2956 dc_stream_release(dm_new_crtc_state->stream); 2957 dm_new_crtc_state->stream = NULL; 2958 } 2959 } 2960 2961 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { 2962 dm_new_plane_state = to_dm_plane_state(new_plane_state); 2963 if (dm_new_plane_state->dc_state) { 2964 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 2965 dc_plane_state_release(dm_new_plane_state->dc_state); 2966 dm_new_plane_state->dc_state = NULL; 2967 } 2968 } 2969 2970 drm_atomic_helper_resume(ddev, dm->cached_state); 2971 2972 dm->cached_state = NULL; 2973 2974 /* Do mst topology probing after resuming cached state*/ 2975 drm_connector_list_iter_begin(ddev, &iter); 2976 drm_for_each_connector_iter(connector, &iter) { 2977 2978 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 2979 continue; 2980 2981 aconnector = to_amdgpu_dm_connector(connector); 2982 if (aconnector->dc_link->type != dc_connection_mst_branch || 2983 aconnector->mst_root) 2984 continue; 2985 2986 ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); 2987 2988 if (ret < 0) { 2989 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, 2990 aconnector->dc_link); 2991 need_hotplug = true; 2992 } 2993 } 2994 drm_connector_list_iter_end(&iter); 2995 2996 if (need_hotplug) 2997 drm_kms_helper_hotplug_event(ddev); 2998 2999 amdgpu_dm_irq_resume_late(adev); 3000 3001 amdgpu_dm_smu_write_watermarks_table(adev); 3002 3003 return 0; 3004 } 3005 3006 /** 3007 * DOC: DM Lifecycle 3008 * 3009 * DM (and consequently DC) is registered in the amdgpu base driver as a IP 3010 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to 3011 * the base driver's device list to be initialized and torn down accordingly. 3012 * 3013 * The functions to do so are provided as hooks in &struct amd_ip_funcs. 3014 */ 3015 3016 static const struct amd_ip_funcs amdgpu_dm_funcs = { 3017 .name = "dm", 3018 .early_init = dm_early_init, 3019 .late_init = dm_late_init, 3020 .sw_init = dm_sw_init, 3021 .sw_fini = dm_sw_fini, 3022 .early_fini = amdgpu_dm_early_fini, 3023 .hw_init = dm_hw_init, 3024 .hw_fini = dm_hw_fini, 3025 .suspend = dm_suspend, 3026 .resume = dm_resume, 3027 .is_idle = dm_is_idle, 3028 .wait_for_idle = dm_wait_for_idle, 3029 .check_soft_reset = dm_check_soft_reset, 3030 .soft_reset = dm_soft_reset, 3031 .set_clockgating_state = dm_set_clockgating_state, 3032 .set_powergating_state = dm_set_powergating_state, 3033 }; 3034 3035 const struct amdgpu_ip_block_version dm_ip_block = { 3036 .type = AMD_IP_BLOCK_TYPE_DCE, 3037 .major = 1, 3038 .minor = 0, 3039 .rev = 0, 3040 .funcs = &amdgpu_dm_funcs, 3041 }; 3042 3043 3044 /** 3045 * DOC: atomic 3046 * 3047 * *WIP* 3048 */ 3049 3050 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 3051 .fb_create = amdgpu_display_user_framebuffer_create, 3052 .get_format_info = amdgpu_dm_plane_get_format_info, 3053 .atomic_check = amdgpu_dm_atomic_check, 3054 .atomic_commit = drm_atomic_helper_commit, 3055 }; 3056 3057 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 3058 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, 3059 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, 3060 }; 3061 3062 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) 3063 { 3064 struct amdgpu_dm_backlight_caps *caps; 3065 struct drm_connector *conn_base; 3066 struct amdgpu_device *adev; 3067 struct drm_luminance_range_info *luminance_range; 3068 3069 if (aconnector->bl_idx == -1 || 3070 aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) 3071 return; 3072 3073 conn_base = &aconnector->base; 3074 adev = drm_to_adev(conn_base->dev); 3075 3076 caps = &adev->dm.backlight_caps[aconnector->bl_idx]; 3077 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; 3078 caps->aux_support = false; 3079 3080 if (caps->ext_caps->bits.oled == 1 3081 /* 3082 * || 3083 * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || 3084 * caps->ext_caps->bits.hdr_aux_backlight_control == 1 3085 */) 3086 caps->aux_support = true; 3087 3088 if (amdgpu_backlight == 0) 3089 caps->aux_support = false; 3090 else if (amdgpu_backlight == 1) 3091 caps->aux_support = true; 3092 3093 luminance_range = &conn_base->display_info.luminance_range; 3094 3095 if (luminance_range->max_luminance) { 3096 caps->aux_min_input_signal = luminance_range->min_luminance; 3097 caps->aux_max_input_signal = luminance_range->max_luminance; 3098 } else { 3099 caps->aux_min_input_signal = 0; 3100 caps->aux_max_input_signal = 512; 3101 } 3102 } 3103 3104 void amdgpu_dm_update_connector_after_detect( 3105 struct amdgpu_dm_connector *aconnector) 3106 { 3107 struct drm_connector *connector = &aconnector->base; 3108 struct drm_device *dev = connector->dev; 3109 struct dc_sink *sink; 3110 3111 /* MST handled by drm_mst framework */ 3112 if (aconnector->mst_mgr.mst_state == true) 3113 return; 3114 3115 sink = aconnector->dc_link->local_sink; 3116 if (sink) 3117 dc_sink_retain(sink); 3118 3119 /* 3120 * Edid mgmt connector gets first update only in mode_valid hook and then 3121 * the connector sink is set to either fake or physical sink depends on link status. 3122 * Skip if already done during boot. 3123 */ 3124 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 3125 && aconnector->dc_em_sink) { 3126 3127 /* 3128 * For S3 resume with headless use eml_sink to fake stream 3129 * because on resume connector->sink is set to NULL 3130 */ 3131 mutex_lock(&dev->mode_config.mutex); 3132 3133 if (sink) { 3134 if (aconnector->dc_sink) { 3135 amdgpu_dm_update_freesync_caps(connector, NULL); 3136 /* 3137 * retain and release below are used to 3138 * bump up refcount for sink because the link doesn't point 3139 * to it anymore after disconnect, so on next crtc to connector 3140 * reshuffle by UMD we will get into unwanted dc_sink release 3141 */ 3142 dc_sink_release(aconnector->dc_sink); 3143 } 3144 aconnector->dc_sink = sink; 3145 dc_sink_retain(aconnector->dc_sink); 3146 amdgpu_dm_update_freesync_caps(connector, 3147 aconnector->edid); 3148 } else { 3149 amdgpu_dm_update_freesync_caps(connector, NULL); 3150 if (!aconnector->dc_sink) { 3151 aconnector->dc_sink = aconnector->dc_em_sink; 3152 dc_sink_retain(aconnector->dc_sink); 3153 } 3154 } 3155 3156 mutex_unlock(&dev->mode_config.mutex); 3157 3158 if (sink) 3159 dc_sink_release(sink); 3160 return; 3161 } 3162 3163 /* 3164 * TODO: temporary guard to look for proper fix 3165 * if this sink is MST sink, we should not do anything 3166 */ 3167 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 3168 dc_sink_release(sink); 3169 return; 3170 } 3171 3172 if (aconnector->dc_sink == sink) { 3173 /* 3174 * We got a DP short pulse (Link Loss, DP CTS, etc...). 3175 * Do nothing!! 3176 */ 3177 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", 3178 aconnector->connector_id); 3179 if (sink) 3180 dc_sink_release(sink); 3181 return; 3182 } 3183 3184 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 3185 aconnector->connector_id, aconnector->dc_sink, sink); 3186 3187 mutex_lock(&dev->mode_config.mutex); 3188 3189 /* 3190 * 1. Update status of the drm connector 3191 * 2. Send an event and let userspace tell us what to do 3192 */ 3193 if (sink) { 3194 /* 3195 * TODO: check if we still need the S3 mode update workaround. 3196 * If yes, put it here. 3197 */ 3198 if (aconnector->dc_sink) { 3199 amdgpu_dm_update_freesync_caps(connector, NULL); 3200 dc_sink_release(aconnector->dc_sink); 3201 } 3202 3203 aconnector->dc_sink = sink; 3204 dc_sink_retain(aconnector->dc_sink); 3205 if (sink->dc_edid.length == 0) { 3206 aconnector->edid = NULL; 3207 if (aconnector->dc_link->aux_mode) { 3208 drm_dp_cec_unset_edid( 3209 &aconnector->dm_dp_aux.aux); 3210 } 3211 } else { 3212 aconnector->edid = 3213 (struct edid *)sink->dc_edid.raw_edid; 3214 3215 if (aconnector->dc_link->aux_mode) 3216 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, 3217 aconnector->edid); 3218 } 3219 3220 if (!aconnector->timing_requested) { 3221 aconnector->timing_requested = 3222 kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); 3223 if (!aconnector->timing_requested) 3224 dm_error("failed to create aconnector->requested_timing\n"); 3225 } 3226 3227 drm_connector_update_edid_property(connector, aconnector->edid); 3228 amdgpu_dm_update_freesync_caps(connector, aconnector->edid); 3229 update_connector_ext_caps(aconnector); 3230 } else { 3231 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3232 amdgpu_dm_update_freesync_caps(connector, NULL); 3233 drm_connector_update_edid_property(connector, NULL); 3234 aconnector->num_modes = 0; 3235 dc_sink_release(aconnector->dc_sink); 3236 aconnector->dc_sink = NULL; 3237 aconnector->edid = NULL; 3238 kfree(aconnector->timing_requested); 3239 aconnector->timing_requested = NULL; 3240 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ 3241 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 3242 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 3243 } 3244 3245 mutex_unlock(&dev->mode_config.mutex); 3246 3247 update_subconnector_property(aconnector); 3248 3249 if (sink) 3250 dc_sink_release(sink); 3251 } 3252 3253 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) 3254 { 3255 struct drm_connector *connector = &aconnector->base; 3256 struct drm_device *dev = connector->dev; 3257 enum dc_connection_type new_connection_type = dc_connection_none; 3258 struct amdgpu_device *adev = drm_to_adev(dev); 3259 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 3260 bool ret = false; 3261 3262 if (adev->dm.disable_hpd_irq) 3263 return; 3264 3265 /* 3266 * In case of failure or MST no need to update connector status or notify the OS 3267 * since (for MST case) MST does this in its own context. 3268 */ 3269 mutex_lock(&aconnector->hpd_lock); 3270 3271 if (adev->dm.hdcp_workqueue) { 3272 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 3273 dm_con_state->update_hdcp = true; 3274 } 3275 if (aconnector->fake_enable) 3276 aconnector->fake_enable = false; 3277 3278 aconnector->timing_changed = false; 3279 3280 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 3281 DRM_ERROR("KMS: Failed to detect connector\n"); 3282 3283 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3284 emulated_link_detect(aconnector->dc_link); 3285 3286 drm_modeset_lock_all(dev); 3287 dm_restore_drm_connector_state(dev, connector); 3288 drm_modeset_unlock_all(dev); 3289 3290 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3291 drm_kms_helper_connector_hotplug_event(connector); 3292 } else { 3293 mutex_lock(&adev->dm.dc_lock); 3294 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 3295 mutex_unlock(&adev->dm.dc_lock); 3296 if (ret) { 3297 amdgpu_dm_update_connector_after_detect(aconnector); 3298 3299 drm_modeset_lock_all(dev); 3300 dm_restore_drm_connector_state(dev, connector); 3301 drm_modeset_unlock_all(dev); 3302 3303 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3304 drm_kms_helper_connector_hotplug_event(connector); 3305 } 3306 } 3307 mutex_unlock(&aconnector->hpd_lock); 3308 3309 } 3310 3311 static void handle_hpd_irq(void *param) 3312 { 3313 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3314 3315 handle_hpd_irq_helper(aconnector); 3316 3317 } 3318 3319 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, 3320 union hpd_irq_data hpd_irq_data) 3321 { 3322 struct hpd_rx_irq_offload_work *offload_work = 3323 kzalloc(sizeof(*offload_work), GFP_KERNEL); 3324 3325 if (!offload_work) { 3326 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); 3327 return; 3328 } 3329 3330 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); 3331 offload_work->data = hpd_irq_data; 3332 offload_work->offload_wq = offload_wq; 3333 3334 queue_work(offload_wq->wq, &offload_work->work); 3335 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); 3336 } 3337 3338 static void handle_hpd_rx_irq(void *param) 3339 { 3340 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3341 struct drm_connector *connector = &aconnector->base; 3342 struct drm_device *dev = connector->dev; 3343 struct dc_link *dc_link = aconnector->dc_link; 3344 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 3345 bool result = false; 3346 enum dc_connection_type new_connection_type = dc_connection_none; 3347 struct amdgpu_device *adev = drm_to_adev(dev); 3348 union hpd_irq_data hpd_irq_data; 3349 bool link_loss = false; 3350 bool has_left_work = false; 3351 int idx = dc_link->link_index; 3352 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; 3353 3354 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 3355 3356 if (adev->dm.disable_hpd_irq) 3357 return; 3358 3359 /* 3360 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio 3361 * conflict, after implement i2c helper, this mutex should be 3362 * retired. 3363 */ 3364 mutex_lock(&aconnector->hpd_lock); 3365 3366 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, 3367 &link_loss, true, &has_left_work); 3368 3369 if (!has_left_work) 3370 goto out; 3371 3372 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 3373 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3374 goto out; 3375 } 3376 3377 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { 3378 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 3379 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 3380 bool skip = false; 3381 3382 /* 3383 * DOWN_REP_MSG_RDY is also handled by polling method 3384 * mgr->cbs->poll_hpd_irq() 3385 */ 3386 spin_lock(&offload_wq->offload_lock); 3387 skip = offload_wq->is_handling_mst_msg_rdy_event; 3388 3389 if (!skip) 3390 offload_wq->is_handling_mst_msg_rdy_event = true; 3391 3392 spin_unlock(&offload_wq->offload_lock); 3393 3394 if (!skip) 3395 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3396 3397 goto out; 3398 } 3399 3400 if (link_loss) { 3401 bool skip = false; 3402 3403 spin_lock(&offload_wq->offload_lock); 3404 skip = offload_wq->is_handling_link_loss; 3405 3406 if (!skip) 3407 offload_wq->is_handling_link_loss = true; 3408 3409 spin_unlock(&offload_wq->offload_lock); 3410 3411 if (!skip) 3412 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3413 3414 goto out; 3415 } 3416 } 3417 3418 out: 3419 if (result && !is_mst_root_connector) { 3420 /* Downstream Port status changed. */ 3421 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 3422 DRM_ERROR("KMS: Failed to detect connector\n"); 3423 3424 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3425 emulated_link_detect(dc_link); 3426 3427 if (aconnector->fake_enable) 3428 aconnector->fake_enable = false; 3429 3430 amdgpu_dm_update_connector_after_detect(aconnector); 3431 3432 3433 drm_modeset_lock_all(dev); 3434 dm_restore_drm_connector_state(dev, connector); 3435 drm_modeset_unlock_all(dev); 3436 3437 drm_kms_helper_connector_hotplug_event(connector); 3438 } else { 3439 bool ret = false; 3440 3441 mutex_lock(&adev->dm.dc_lock); 3442 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); 3443 mutex_unlock(&adev->dm.dc_lock); 3444 3445 if (ret) { 3446 if (aconnector->fake_enable) 3447 aconnector->fake_enable = false; 3448 3449 amdgpu_dm_update_connector_after_detect(aconnector); 3450 3451 drm_modeset_lock_all(dev); 3452 dm_restore_drm_connector_state(dev, connector); 3453 drm_modeset_unlock_all(dev); 3454 3455 drm_kms_helper_connector_hotplug_event(connector); 3456 } 3457 } 3458 } 3459 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { 3460 if (adev->dm.hdcp_workqueue) 3461 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); 3462 } 3463 3464 if (dc_link->type != dc_connection_mst_branch) 3465 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); 3466 3467 mutex_unlock(&aconnector->hpd_lock); 3468 } 3469 3470 static void register_hpd_handlers(struct amdgpu_device *adev) 3471 { 3472 struct drm_device *dev = adev_to_drm(adev); 3473 struct drm_connector *connector; 3474 struct amdgpu_dm_connector *aconnector; 3475 const struct dc_link *dc_link; 3476 struct dc_interrupt_params int_params = {0}; 3477 3478 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3479 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3480 3481 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 3482 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) 3483 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 3484 3485 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) 3486 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 3487 } 3488 3489 list_for_each_entry(connector, 3490 &dev->mode_config.connector_list, head) { 3491 3492 aconnector = to_amdgpu_dm_connector(connector); 3493 dc_link = aconnector->dc_link; 3494 3495 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 3496 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3497 int_params.irq_source = dc_link->irq_source_hpd; 3498 3499 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3500 handle_hpd_irq, 3501 (void *) aconnector); 3502 } 3503 3504 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { 3505 3506 /* Also register for DP short pulse (hpd_rx). */ 3507 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3508 int_params.irq_source = dc_link->irq_source_hpd_rx; 3509 3510 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3511 handle_hpd_rx_irq, 3512 (void *) aconnector); 3513 } 3514 } 3515 } 3516 3517 #if defined(CONFIG_DRM_AMD_DC_SI) 3518 /* Register IRQ sources and initialize IRQ callbacks */ 3519 static int dce60_register_irq_handlers(struct amdgpu_device *adev) 3520 { 3521 struct dc *dc = adev->dm.dc; 3522 struct common_irq_params *c_irq_params; 3523 struct dc_interrupt_params int_params = {0}; 3524 int r; 3525 int i; 3526 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3527 3528 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3529 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3530 3531 /* 3532 * Actions of amdgpu_irq_add_id(): 3533 * 1. Register a set() function with base driver. 3534 * Base driver will call set() function to enable/disable an 3535 * interrupt in DC hardware. 3536 * 2. Register amdgpu_dm_irq_handler(). 3537 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3538 * coming from DC hardware. 3539 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3540 * for acknowledging and handling. 3541 */ 3542 3543 /* Use VBLANK interrupt */ 3544 for (i = 0; i < adev->mode_info.num_crtc; i++) { 3545 r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); 3546 if (r) { 3547 DRM_ERROR("Failed to add crtc irq id!\n"); 3548 return r; 3549 } 3550 3551 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3552 int_params.irq_source = 3553 dc_interrupt_to_irq_source(dc, i + 1, 0); 3554 3555 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3556 3557 c_irq_params->adev = adev; 3558 c_irq_params->irq_src = int_params.irq_source; 3559 3560 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3561 dm_crtc_high_irq, c_irq_params); 3562 } 3563 3564 /* Use GRPH_PFLIP interrupt */ 3565 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 3566 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 3567 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 3568 if (r) { 3569 DRM_ERROR("Failed to add page flip irq id!\n"); 3570 return r; 3571 } 3572 3573 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3574 int_params.irq_source = 3575 dc_interrupt_to_irq_source(dc, i, 0); 3576 3577 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3578 3579 c_irq_params->adev = adev; 3580 c_irq_params->irq_src = int_params.irq_source; 3581 3582 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3583 dm_pflip_high_irq, c_irq_params); 3584 3585 } 3586 3587 /* HPD */ 3588 r = amdgpu_irq_add_id(adev, client_id, 3589 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 3590 if (r) { 3591 DRM_ERROR("Failed to add hpd irq id!\n"); 3592 return r; 3593 } 3594 3595 register_hpd_handlers(adev); 3596 3597 return 0; 3598 } 3599 #endif 3600 3601 /* Register IRQ sources and initialize IRQ callbacks */ 3602 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 3603 { 3604 struct dc *dc = adev->dm.dc; 3605 struct common_irq_params *c_irq_params; 3606 struct dc_interrupt_params int_params = {0}; 3607 int r; 3608 int i; 3609 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3610 3611 if (adev->family >= AMDGPU_FAMILY_AI) 3612 client_id = SOC15_IH_CLIENTID_DCE; 3613 3614 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3615 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3616 3617 /* 3618 * Actions of amdgpu_irq_add_id(): 3619 * 1. Register a set() function with base driver. 3620 * Base driver will call set() function to enable/disable an 3621 * interrupt in DC hardware. 3622 * 2. Register amdgpu_dm_irq_handler(). 3623 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3624 * coming from DC hardware. 3625 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3626 * for acknowledging and handling. 3627 */ 3628 3629 /* Use VBLANK interrupt */ 3630 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { 3631 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); 3632 if (r) { 3633 DRM_ERROR("Failed to add crtc irq id!\n"); 3634 return r; 3635 } 3636 3637 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3638 int_params.irq_source = 3639 dc_interrupt_to_irq_source(dc, i, 0); 3640 3641 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3642 3643 c_irq_params->adev = adev; 3644 c_irq_params->irq_src = int_params.irq_source; 3645 3646 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3647 dm_crtc_high_irq, c_irq_params); 3648 } 3649 3650 /* Use VUPDATE interrupt */ 3651 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { 3652 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); 3653 if (r) { 3654 DRM_ERROR("Failed to add vupdate irq id!\n"); 3655 return r; 3656 } 3657 3658 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3659 int_params.irq_source = 3660 dc_interrupt_to_irq_source(dc, i, 0); 3661 3662 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3663 3664 c_irq_params->adev = adev; 3665 c_irq_params->irq_src = int_params.irq_source; 3666 3667 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3668 dm_vupdate_high_irq, c_irq_params); 3669 } 3670 3671 /* Use GRPH_PFLIP interrupt */ 3672 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 3673 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 3674 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 3675 if (r) { 3676 DRM_ERROR("Failed to add page flip irq id!\n"); 3677 return r; 3678 } 3679 3680 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3681 int_params.irq_source = 3682 dc_interrupt_to_irq_source(dc, i, 0); 3683 3684 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3685 3686 c_irq_params->adev = adev; 3687 c_irq_params->irq_src = int_params.irq_source; 3688 3689 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3690 dm_pflip_high_irq, c_irq_params); 3691 3692 } 3693 3694 /* HPD */ 3695 r = amdgpu_irq_add_id(adev, client_id, 3696 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 3697 if (r) { 3698 DRM_ERROR("Failed to add hpd irq id!\n"); 3699 return r; 3700 } 3701 3702 register_hpd_handlers(adev); 3703 3704 return 0; 3705 } 3706 3707 /* Register IRQ sources and initialize IRQ callbacks */ 3708 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 3709 { 3710 struct dc *dc = adev->dm.dc; 3711 struct common_irq_params *c_irq_params; 3712 struct dc_interrupt_params int_params = {0}; 3713 int r; 3714 int i; 3715 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3716 static const unsigned int vrtl_int_srcid[] = { 3717 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, 3718 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, 3719 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, 3720 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, 3721 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, 3722 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL 3723 }; 3724 #endif 3725 3726 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3727 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3728 3729 /* 3730 * Actions of amdgpu_irq_add_id(): 3731 * 1. Register a set() function with base driver. 3732 * Base driver will call set() function to enable/disable an 3733 * interrupt in DC hardware. 3734 * 2. Register amdgpu_dm_irq_handler(). 3735 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3736 * coming from DC hardware. 3737 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3738 * for acknowledging and handling. 3739 */ 3740 3741 /* Use VSTARTUP interrupt */ 3742 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 3743 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 3744 i++) { 3745 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); 3746 3747 if (r) { 3748 DRM_ERROR("Failed to add crtc irq id!\n"); 3749 return r; 3750 } 3751 3752 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3753 int_params.irq_source = 3754 dc_interrupt_to_irq_source(dc, i, 0); 3755 3756 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3757 3758 c_irq_params->adev = adev; 3759 c_irq_params->irq_src = int_params.irq_source; 3760 3761 amdgpu_dm_irq_register_interrupt( 3762 adev, &int_params, dm_crtc_high_irq, c_irq_params); 3763 } 3764 3765 /* Use otg vertical line interrupt */ 3766 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3767 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { 3768 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, 3769 vrtl_int_srcid[i], &adev->vline0_irq); 3770 3771 if (r) { 3772 DRM_ERROR("Failed to add vline0 irq id!\n"); 3773 return r; 3774 } 3775 3776 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3777 int_params.irq_source = 3778 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); 3779 3780 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) { 3781 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]); 3782 break; 3783 } 3784 3785 c_irq_params = &adev->dm.vline0_params[int_params.irq_source 3786 - DC_IRQ_SOURCE_DC1_VLINE0]; 3787 3788 c_irq_params->adev = adev; 3789 c_irq_params->irq_src = int_params.irq_source; 3790 3791 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3792 dm_dcn_vertical_interrupt0_high_irq, c_irq_params); 3793 } 3794 #endif 3795 3796 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to 3797 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx 3798 * to trigger at end of each vblank, regardless of state of the lock, 3799 * matching DCE behaviour. 3800 */ 3801 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; 3802 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; 3803 i++) { 3804 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); 3805 3806 if (r) { 3807 DRM_ERROR("Failed to add vupdate irq id!\n"); 3808 return r; 3809 } 3810 3811 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3812 int_params.irq_source = 3813 dc_interrupt_to_irq_source(dc, i, 0); 3814 3815 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3816 3817 c_irq_params->adev = adev; 3818 c_irq_params->irq_src = int_params.irq_source; 3819 3820 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3821 dm_vupdate_high_irq, c_irq_params); 3822 } 3823 3824 /* Use GRPH_PFLIP interrupt */ 3825 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 3826 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; 3827 i++) { 3828 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 3829 if (r) { 3830 DRM_ERROR("Failed to add page flip irq id!\n"); 3831 return r; 3832 } 3833 3834 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3835 int_params.irq_source = 3836 dc_interrupt_to_irq_source(dc, i, 0); 3837 3838 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3839 3840 c_irq_params->adev = adev; 3841 c_irq_params->irq_src = int_params.irq_source; 3842 3843 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3844 dm_pflip_high_irq, c_irq_params); 3845 3846 } 3847 3848 /* HPD */ 3849 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 3850 &adev->hpd_irq); 3851 if (r) { 3852 DRM_ERROR("Failed to add hpd irq id!\n"); 3853 return r; 3854 } 3855 3856 register_hpd_handlers(adev); 3857 3858 return 0; 3859 } 3860 /* Register Outbox IRQ sources and initialize IRQ callbacks */ 3861 static int register_outbox_irq_handlers(struct amdgpu_device *adev) 3862 { 3863 struct dc *dc = adev->dm.dc; 3864 struct common_irq_params *c_irq_params; 3865 struct dc_interrupt_params int_params = {0}; 3866 int r, i; 3867 3868 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3869 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3870 3871 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, 3872 &adev->dmub_outbox_irq); 3873 if (r) { 3874 DRM_ERROR("Failed to add outbox irq id!\n"); 3875 return r; 3876 } 3877 3878 if (dc->ctx->dmub_srv) { 3879 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; 3880 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3881 int_params.irq_source = 3882 dc_interrupt_to_irq_source(dc, i, 0); 3883 3884 c_irq_params = &adev->dm.dmub_outbox_params[0]; 3885 3886 c_irq_params->adev = adev; 3887 c_irq_params->irq_src = int_params.irq_source; 3888 3889 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3890 dm_dmub_outbox1_low_irq, c_irq_params); 3891 } 3892 3893 return 0; 3894 } 3895 3896 /* 3897 * Acquires the lock for the atomic state object and returns 3898 * the new atomic state. 3899 * 3900 * This should only be called during atomic check. 3901 */ 3902 int dm_atomic_get_state(struct drm_atomic_state *state, 3903 struct dm_atomic_state **dm_state) 3904 { 3905 struct drm_device *dev = state->dev; 3906 struct amdgpu_device *adev = drm_to_adev(dev); 3907 struct amdgpu_display_manager *dm = &adev->dm; 3908 struct drm_private_state *priv_state; 3909 3910 if (*dm_state) 3911 return 0; 3912 3913 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); 3914 if (IS_ERR(priv_state)) 3915 return PTR_ERR(priv_state); 3916 3917 *dm_state = to_dm_atomic_state(priv_state); 3918 3919 return 0; 3920 } 3921 3922 static struct dm_atomic_state * 3923 dm_atomic_get_new_state(struct drm_atomic_state *state) 3924 { 3925 struct drm_device *dev = state->dev; 3926 struct amdgpu_device *adev = drm_to_adev(dev); 3927 struct amdgpu_display_manager *dm = &adev->dm; 3928 struct drm_private_obj *obj; 3929 struct drm_private_state *new_obj_state; 3930 int i; 3931 3932 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { 3933 if (obj->funcs == dm->atomic_obj.funcs) 3934 return to_dm_atomic_state(new_obj_state); 3935 } 3936 3937 return NULL; 3938 } 3939 3940 static struct drm_private_state * 3941 dm_atomic_duplicate_state(struct drm_private_obj *obj) 3942 { 3943 struct dm_atomic_state *old_state, *new_state; 3944 3945 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); 3946 if (!new_state) 3947 return NULL; 3948 3949 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); 3950 3951 old_state = to_dm_atomic_state(obj->state); 3952 3953 if (old_state && old_state->context) 3954 new_state->context = dc_copy_state(old_state->context); 3955 3956 if (!new_state->context) { 3957 kfree(new_state); 3958 return NULL; 3959 } 3960 3961 return &new_state->base; 3962 } 3963 3964 static void dm_atomic_destroy_state(struct drm_private_obj *obj, 3965 struct drm_private_state *state) 3966 { 3967 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 3968 3969 if (dm_state && dm_state->context) 3970 dc_release_state(dm_state->context); 3971 3972 kfree(dm_state); 3973 } 3974 3975 static struct drm_private_state_funcs dm_atomic_state_funcs = { 3976 .atomic_duplicate_state = dm_atomic_duplicate_state, 3977 .atomic_destroy_state = dm_atomic_destroy_state, 3978 }; 3979 3980 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 3981 { 3982 struct dm_atomic_state *state; 3983 int r; 3984 3985 adev->mode_info.mode_config_initialized = true; 3986 3987 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 3988 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 3989 3990 adev_to_drm(adev)->mode_config.max_width = 16384; 3991 adev_to_drm(adev)->mode_config.max_height = 16384; 3992 3993 adev_to_drm(adev)->mode_config.preferred_depth = 24; 3994 if (adev->asic_type == CHIP_HAWAII) 3995 /* disable prefer shadow for now due to hibernation issues */ 3996 adev_to_drm(adev)->mode_config.prefer_shadow = 0; 3997 else 3998 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 3999 /* indicates support for immediate flip */ 4000 adev_to_drm(adev)->mode_config.async_page_flip = true; 4001 4002 state = kzalloc(sizeof(*state), GFP_KERNEL); 4003 if (!state) 4004 return -ENOMEM; 4005 4006 state->context = dc_create_state(adev->dm.dc); 4007 if (!state->context) { 4008 kfree(state); 4009 return -ENOMEM; 4010 } 4011 4012 dc_resource_state_copy_construct_current(adev->dm.dc, state->context); 4013 4014 drm_atomic_private_obj_init(adev_to_drm(adev), 4015 &adev->dm.atomic_obj, 4016 &state->base, 4017 &dm_atomic_state_funcs); 4018 4019 r = amdgpu_display_modeset_create_props(adev); 4020 if (r) { 4021 dc_release_state(state->context); 4022 kfree(state); 4023 return r; 4024 } 4025 4026 r = amdgpu_dm_audio_init(adev); 4027 if (r) { 4028 dc_release_state(state->context); 4029 kfree(state); 4030 return r; 4031 } 4032 4033 return 0; 4034 } 4035 4036 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 4037 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 4038 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 4039 4040 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, 4041 int bl_idx) 4042 { 4043 #if defined(CONFIG_ACPI) 4044 struct amdgpu_dm_backlight_caps caps; 4045 4046 memset(&caps, 0, sizeof(caps)); 4047 4048 if (dm->backlight_caps[bl_idx].caps_valid) 4049 return; 4050 4051 amdgpu_acpi_get_backlight_caps(&caps); 4052 if (caps.caps_valid) { 4053 dm->backlight_caps[bl_idx].caps_valid = true; 4054 if (caps.aux_support) 4055 return; 4056 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; 4057 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; 4058 } else { 4059 dm->backlight_caps[bl_idx].min_input_signal = 4060 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4061 dm->backlight_caps[bl_idx].max_input_signal = 4062 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4063 } 4064 #else 4065 if (dm->backlight_caps[bl_idx].aux_support) 4066 return; 4067 4068 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4069 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4070 #endif 4071 } 4072 4073 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, 4074 unsigned int *min, unsigned int *max) 4075 { 4076 if (!caps) 4077 return 0; 4078 4079 if (caps->aux_support) { 4080 // Firmware limits are in nits, DC API wants millinits. 4081 *max = 1000 * caps->aux_max_input_signal; 4082 *min = 1000 * caps->aux_min_input_signal; 4083 } else { 4084 // Firmware limits are 8-bit, PWM control is 16-bit. 4085 *max = 0x101 * caps->max_input_signal; 4086 *min = 0x101 * caps->min_input_signal; 4087 } 4088 return 1; 4089 } 4090 4091 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, 4092 uint32_t brightness) 4093 { 4094 unsigned int min, max; 4095 4096 if (!get_brightness_range(caps, &min, &max)) 4097 return brightness; 4098 4099 // Rescale 0..255 to min..max 4100 return min + DIV_ROUND_CLOSEST((max - min) * brightness, 4101 AMDGPU_MAX_BL_LEVEL); 4102 } 4103 4104 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, 4105 uint32_t brightness) 4106 { 4107 unsigned int min, max; 4108 4109 if (!get_brightness_range(caps, &min, &max)) 4110 return brightness; 4111 4112 if (brightness < min) 4113 return 0; 4114 // Rescale min..max to 0..255 4115 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), 4116 max - min); 4117 } 4118 4119 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 4120 int bl_idx, 4121 u32 user_brightness) 4122 { 4123 struct amdgpu_dm_backlight_caps caps; 4124 struct dc_link *link; 4125 u32 brightness; 4126 bool rc; 4127 4128 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4129 caps = dm->backlight_caps[bl_idx]; 4130 4131 dm->brightness[bl_idx] = user_brightness; 4132 /* update scratch register */ 4133 if (bl_idx == 0) 4134 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); 4135 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); 4136 link = (struct dc_link *)dm->backlight_link[bl_idx]; 4137 4138 /* Change brightness based on AUX property */ 4139 if (caps.aux_support) { 4140 rc = dc_link_set_backlight_level_nits(link, true, brightness, 4141 AUX_BL_DEFAULT_TRANSITION_TIME_MS); 4142 if (!rc) 4143 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); 4144 } else { 4145 rc = dc_link_set_backlight_level(link, brightness, 0); 4146 if (!rc) 4147 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); 4148 } 4149 4150 if (rc) 4151 dm->actual_brightness[bl_idx] = user_brightness; 4152 } 4153 4154 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 4155 { 4156 struct amdgpu_display_manager *dm = bl_get_data(bd); 4157 int i; 4158 4159 for (i = 0; i < dm->num_of_edps; i++) { 4160 if (bd == dm->backlight_dev[i]) 4161 break; 4162 } 4163 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4164 i = 0; 4165 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); 4166 4167 return 0; 4168 } 4169 4170 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, 4171 int bl_idx) 4172 { 4173 int ret; 4174 struct amdgpu_dm_backlight_caps caps; 4175 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; 4176 4177 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4178 caps = dm->backlight_caps[bl_idx]; 4179 4180 if (caps.aux_support) { 4181 u32 avg, peak; 4182 bool rc; 4183 4184 rc = dc_link_get_backlight_level_nits(link, &avg, &peak); 4185 if (!rc) 4186 return dm->brightness[bl_idx]; 4187 return convert_brightness_to_user(&caps, avg); 4188 } 4189 4190 ret = dc_link_get_backlight_level(link); 4191 4192 if (ret == DC_ERROR_UNEXPECTED) 4193 return dm->brightness[bl_idx]; 4194 4195 return convert_brightness_to_user(&caps, ret); 4196 } 4197 4198 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 4199 { 4200 struct amdgpu_display_manager *dm = bl_get_data(bd); 4201 int i; 4202 4203 for (i = 0; i < dm->num_of_edps; i++) { 4204 if (bd == dm->backlight_dev[i]) 4205 break; 4206 } 4207 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4208 i = 0; 4209 return amdgpu_dm_backlight_get_level(dm, i); 4210 } 4211 4212 static const struct backlight_ops amdgpu_dm_backlight_ops = { 4213 .options = BL_CORE_SUSPENDRESUME, 4214 .get_brightness = amdgpu_dm_backlight_get_brightness, 4215 .update_status = amdgpu_dm_backlight_update_status, 4216 }; 4217 4218 static void 4219 amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) 4220 { 4221 struct drm_device *drm = aconnector->base.dev; 4222 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; 4223 struct backlight_properties props = { 0 }; 4224 char bl_name[16]; 4225 4226 if (aconnector->bl_idx == -1) 4227 return; 4228 4229 if (!acpi_video_backlight_use_native()) { 4230 drm_info(drm, "Skipping amdgpu DM backlight registration\n"); 4231 /* Try registering an ACPI video backlight device instead. */ 4232 acpi_video_register_backlight(); 4233 return; 4234 } 4235 4236 props.max_brightness = AMDGPU_MAX_BL_LEVEL; 4237 props.brightness = AMDGPU_MAX_BL_LEVEL; 4238 props.type = BACKLIGHT_RAW; 4239 4240 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 4241 drm->primary->index + aconnector->bl_idx); 4242 4243 dm->backlight_dev[aconnector->bl_idx] = 4244 backlight_device_register(bl_name, aconnector->base.kdev, dm, 4245 &amdgpu_dm_backlight_ops, &props); 4246 4247 if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { 4248 DRM_ERROR("DM: Backlight registration failed!\n"); 4249 dm->backlight_dev[aconnector->bl_idx] = NULL; 4250 } else 4251 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); 4252 } 4253 4254 static int initialize_plane(struct amdgpu_display_manager *dm, 4255 struct amdgpu_mode_info *mode_info, int plane_id, 4256 enum drm_plane_type plane_type, 4257 const struct dc_plane_cap *plane_cap) 4258 { 4259 struct drm_plane *plane; 4260 unsigned long possible_crtcs; 4261 int ret = 0; 4262 4263 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); 4264 if (!plane) { 4265 DRM_ERROR("KMS: Failed to allocate plane\n"); 4266 return -ENOMEM; 4267 } 4268 plane->type = plane_type; 4269 4270 /* 4271 * HACK: IGT tests expect that the primary plane for a CRTC 4272 * can only have one possible CRTC. Only expose support for 4273 * any CRTC if they're not going to be used as a primary plane 4274 * for a CRTC - like overlay or underlay planes. 4275 */ 4276 possible_crtcs = 1 << plane_id; 4277 if (plane_id >= dm->dc->caps.max_streams) 4278 possible_crtcs = 0xff; 4279 4280 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); 4281 4282 if (ret) { 4283 DRM_ERROR("KMS: Failed to initialize plane\n"); 4284 kfree(plane); 4285 return ret; 4286 } 4287 4288 if (mode_info) 4289 mode_info->planes[plane_id] = plane; 4290 4291 return ret; 4292 } 4293 4294 4295 static void setup_backlight_device(struct amdgpu_display_manager *dm, 4296 struct amdgpu_dm_connector *aconnector) 4297 { 4298 struct dc_link *link = aconnector->dc_link; 4299 int bl_idx = dm->num_of_edps; 4300 4301 if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) || 4302 link->type == dc_connection_none) 4303 return; 4304 4305 if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) { 4306 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n"); 4307 return; 4308 } 4309 4310 aconnector->bl_idx = bl_idx; 4311 4312 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4313 dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL; 4314 dm->backlight_link[bl_idx] = link; 4315 dm->num_of_edps++; 4316 4317 update_connector_ext_caps(aconnector); 4318 } 4319 4320 static void amdgpu_set_panel_orientation(struct drm_connector *connector); 4321 4322 /* 4323 * In this architecture, the association 4324 * connector -> encoder -> crtc 4325 * id not really requried. The crtc and connector will hold the 4326 * display_index as an abstraction to use with DAL component 4327 * 4328 * Returns 0 on success 4329 */ 4330 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 4331 { 4332 struct amdgpu_display_manager *dm = &adev->dm; 4333 s32 i; 4334 struct amdgpu_dm_connector *aconnector = NULL; 4335 struct amdgpu_encoder *aencoder = NULL; 4336 struct amdgpu_mode_info *mode_info = &adev->mode_info; 4337 u32 link_cnt; 4338 s32 primary_planes; 4339 enum dc_connection_type new_connection_type = dc_connection_none; 4340 const struct dc_plane_cap *plane; 4341 bool psr_feature_enabled = false; 4342 int max_overlay = dm->dc->caps.max_slave_planes; 4343 4344 dm->display_indexes_num = dm->dc->caps.max_streams; 4345 /* Update the actual used number of crtc */ 4346 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 4347 4348 amdgpu_dm_set_irq_funcs(adev); 4349 4350 link_cnt = dm->dc->caps.max_links; 4351 if (amdgpu_dm_mode_config_init(dm->adev)) { 4352 DRM_ERROR("DM: Failed to initialize mode config\n"); 4353 return -EINVAL; 4354 } 4355 4356 /* There is one primary plane per CRTC */ 4357 primary_planes = dm->dc->caps.max_streams; 4358 ASSERT(primary_planes <= AMDGPU_MAX_PLANES); 4359 4360 /* 4361 * Initialize primary planes, implicit planes for legacy IOCTLS. 4362 * Order is reversed to match iteration order in atomic check. 4363 */ 4364 for (i = (primary_planes - 1); i >= 0; i--) { 4365 plane = &dm->dc->caps.planes[i]; 4366 4367 if (initialize_plane(dm, mode_info, i, 4368 DRM_PLANE_TYPE_PRIMARY, plane)) { 4369 DRM_ERROR("KMS: Failed to initialize primary plane\n"); 4370 goto fail; 4371 } 4372 } 4373 4374 /* 4375 * Initialize overlay planes, index starting after primary planes. 4376 * These planes have a higher DRM index than the primary planes since 4377 * they should be considered as having a higher z-order. 4378 * Order is reversed to match iteration order in atomic check. 4379 * 4380 * Only support DCN for now, and only expose one so we don't encourage 4381 * userspace to use up all the pipes. 4382 */ 4383 for (i = 0; i < dm->dc->caps.max_planes; ++i) { 4384 struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; 4385 4386 /* Do not create overlay if MPO disabled */ 4387 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO) 4388 break; 4389 4390 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) 4391 continue; 4392 4393 if (!plane->pixel_format_support.argb8888) 4394 continue; 4395 4396 if (max_overlay-- == 0) 4397 break; 4398 4399 if (initialize_plane(dm, NULL, primary_planes + i, 4400 DRM_PLANE_TYPE_OVERLAY, plane)) { 4401 DRM_ERROR("KMS: Failed to initialize overlay plane\n"); 4402 goto fail; 4403 } 4404 } 4405 4406 for (i = 0; i < dm->dc->caps.max_streams; i++) 4407 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { 4408 DRM_ERROR("KMS: Failed to initialize crtc\n"); 4409 goto fail; 4410 } 4411 4412 /* Use Outbox interrupt */ 4413 switch (adev->ip_versions[DCE_HWIP][0]) { 4414 case IP_VERSION(3, 0, 0): 4415 case IP_VERSION(3, 1, 2): 4416 case IP_VERSION(3, 1, 3): 4417 case IP_VERSION(3, 1, 4): 4418 case IP_VERSION(3, 1, 5): 4419 case IP_VERSION(3, 1, 6): 4420 case IP_VERSION(3, 2, 0): 4421 case IP_VERSION(3, 2, 1): 4422 case IP_VERSION(2, 1, 0): 4423 if (register_outbox_irq_handlers(dm->adev)) { 4424 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4425 goto fail; 4426 } 4427 break; 4428 default: 4429 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", 4430 adev->ip_versions[DCE_HWIP][0]); 4431 } 4432 4433 /* Determine whether to enable PSR support by default. */ 4434 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { 4435 switch (adev->ip_versions[DCE_HWIP][0]) { 4436 case IP_VERSION(3, 1, 2): 4437 case IP_VERSION(3, 1, 3): 4438 case IP_VERSION(3, 1, 4): 4439 case IP_VERSION(3, 1, 5): 4440 case IP_VERSION(3, 1, 6): 4441 case IP_VERSION(3, 2, 0): 4442 case IP_VERSION(3, 2, 1): 4443 psr_feature_enabled = true; 4444 break; 4445 default: 4446 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; 4447 break; 4448 } 4449 } 4450 4451 /* loops over all connectors on the board */ 4452 for (i = 0; i < link_cnt; i++) { 4453 struct dc_link *link = NULL; 4454 4455 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { 4456 DRM_ERROR( 4457 "KMS: Cannot support more than %d display indexes\n", 4458 AMDGPU_DM_MAX_DISPLAY_INDEX); 4459 continue; 4460 } 4461 4462 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 4463 if (!aconnector) 4464 goto fail; 4465 4466 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); 4467 if (!aencoder) 4468 goto fail; 4469 4470 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 4471 DRM_ERROR("KMS: Failed to initialize encoder\n"); 4472 goto fail; 4473 } 4474 4475 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 4476 DRM_ERROR("KMS: Failed to initialize connector\n"); 4477 goto fail; 4478 } 4479 4480 link = dc_get_link_at_index(dm->dc, i); 4481 4482 if (dm->hpd_rx_offload_wq) 4483 dm->hpd_rx_offload_wq[aconnector->base.index].aconnector = 4484 aconnector; 4485 4486 if (!dc_link_detect_connection_type(link, &new_connection_type)) 4487 DRM_ERROR("KMS: Failed to detect connector\n"); 4488 4489 if (aconnector->base.force && new_connection_type == dc_connection_none) { 4490 emulated_link_detect(link); 4491 amdgpu_dm_update_connector_after_detect(aconnector); 4492 } else { 4493 bool ret = false; 4494 4495 mutex_lock(&dm->dc_lock); 4496 ret = dc_link_detect(link, DETECT_REASON_BOOT); 4497 mutex_unlock(&dm->dc_lock); 4498 4499 if (ret) { 4500 amdgpu_dm_update_connector_after_detect(aconnector); 4501 setup_backlight_device(dm, aconnector); 4502 4503 if (psr_feature_enabled) 4504 amdgpu_dm_set_psr_caps(link); 4505 4506 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when 4507 * PSR is also supported. 4508 */ 4509 if (link->psr_settings.psr_feature_enabled) 4510 adev_to_drm(adev)->vblank_disable_immediate = false; 4511 } 4512 } 4513 amdgpu_set_panel_orientation(&aconnector->base); 4514 } 4515 4516 /* Software is initialized. Now we can register interrupt handlers. */ 4517 switch (adev->asic_type) { 4518 #if defined(CONFIG_DRM_AMD_DC_SI) 4519 case CHIP_TAHITI: 4520 case CHIP_PITCAIRN: 4521 case CHIP_VERDE: 4522 case CHIP_OLAND: 4523 if (dce60_register_irq_handlers(dm->adev)) { 4524 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4525 goto fail; 4526 } 4527 break; 4528 #endif 4529 case CHIP_BONAIRE: 4530 case CHIP_HAWAII: 4531 case CHIP_KAVERI: 4532 case CHIP_KABINI: 4533 case CHIP_MULLINS: 4534 case CHIP_TONGA: 4535 case CHIP_FIJI: 4536 case CHIP_CARRIZO: 4537 case CHIP_STONEY: 4538 case CHIP_POLARIS11: 4539 case CHIP_POLARIS10: 4540 case CHIP_POLARIS12: 4541 case CHIP_VEGAM: 4542 case CHIP_VEGA10: 4543 case CHIP_VEGA12: 4544 case CHIP_VEGA20: 4545 if (dce110_register_irq_handlers(dm->adev)) { 4546 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4547 goto fail; 4548 } 4549 break; 4550 default: 4551 switch (adev->ip_versions[DCE_HWIP][0]) { 4552 case IP_VERSION(1, 0, 0): 4553 case IP_VERSION(1, 0, 1): 4554 case IP_VERSION(2, 0, 2): 4555 case IP_VERSION(2, 0, 3): 4556 case IP_VERSION(2, 0, 0): 4557 case IP_VERSION(2, 1, 0): 4558 case IP_VERSION(3, 0, 0): 4559 case IP_VERSION(3, 0, 2): 4560 case IP_VERSION(3, 0, 3): 4561 case IP_VERSION(3, 0, 1): 4562 case IP_VERSION(3, 1, 2): 4563 case IP_VERSION(3, 1, 3): 4564 case IP_VERSION(3, 1, 4): 4565 case IP_VERSION(3, 1, 5): 4566 case IP_VERSION(3, 1, 6): 4567 case IP_VERSION(3, 2, 0): 4568 case IP_VERSION(3, 2, 1): 4569 if (dcn10_register_irq_handlers(dm->adev)) { 4570 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4571 goto fail; 4572 } 4573 break; 4574 default: 4575 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", 4576 adev->ip_versions[DCE_HWIP][0]); 4577 goto fail; 4578 } 4579 break; 4580 } 4581 4582 return 0; 4583 fail: 4584 kfree(aencoder); 4585 kfree(aconnector); 4586 4587 return -EINVAL; 4588 } 4589 4590 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 4591 { 4592 drm_atomic_private_obj_fini(&dm->atomic_obj); 4593 } 4594 4595 /****************************************************************************** 4596 * amdgpu_display_funcs functions 4597 *****************************************************************************/ 4598 4599 /* 4600 * dm_bandwidth_update - program display watermarks 4601 * 4602 * @adev: amdgpu_device pointer 4603 * 4604 * Calculate and program the display watermarks and line buffer allocation. 4605 */ 4606 static void dm_bandwidth_update(struct amdgpu_device *adev) 4607 { 4608 /* TODO: implement later */ 4609 } 4610 4611 static const struct amdgpu_display_funcs dm_display_funcs = { 4612 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 4613 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 4614 .backlight_set_level = NULL, /* never called for DC */ 4615 .backlight_get_level = NULL, /* never called for DC */ 4616 .hpd_sense = NULL,/* called unconditionally */ 4617 .hpd_set_polarity = NULL, /* called unconditionally */ 4618 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 4619 .page_flip_get_scanoutpos = 4620 dm_crtc_get_scanoutpos,/* called unconditionally */ 4621 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 4622 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 4623 }; 4624 4625 #if defined(CONFIG_DEBUG_KERNEL_DC) 4626 4627 static ssize_t s3_debug_store(struct device *device, 4628 struct device_attribute *attr, 4629 const char *buf, 4630 size_t count) 4631 { 4632 int ret; 4633 int s3_state; 4634 struct drm_device *drm_dev = dev_get_drvdata(device); 4635 struct amdgpu_device *adev = drm_to_adev(drm_dev); 4636 4637 ret = kstrtoint(buf, 0, &s3_state); 4638 4639 if (ret == 0) { 4640 if (s3_state) { 4641 dm_resume(adev); 4642 drm_kms_helper_hotplug_event(adev_to_drm(adev)); 4643 } else 4644 dm_suspend(adev); 4645 } 4646 4647 return ret == 0 ? count : 0; 4648 } 4649 4650 DEVICE_ATTR_WO(s3_debug); 4651 4652 #endif 4653 4654 static int dm_init_microcode(struct amdgpu_device *adev) 4655 { 4656 char *fw_name_dmub; 4657 int r; 4658 4659 switch (adev->ip_versions[DCE_HWIP][0]) { 4660 case IP_VERSION(2, 1, 0): 4661 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 4662 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 4663 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; 4664 break; 4665 case IP_VERSION(3, 0, 0): 4666 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) 4667 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 4668 else 4669 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; 4670 break; 4671 case IP_VERSION(3, 0, 1): 4672 fw_name_dmub = FIRMWARE_VANGOGH_DMUB; 4673 break; 4674 case IP_VERSION(3, 0, 2): 4675 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; 4676 break; 4677 case IP_VERSION(3, 0, 3): 4678 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; 4679 break; 4680 case IP_VERSION(3, 1, 2): 4681 case IP_VERSION(3, 1, 3): 4682 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; 4683 break; 4684 case IP_VERSION(3, 1, 4): 4685 fw_name_dmub = FIRMWARE_DCN_314_DMUB; 4686 break; 4687 case IP_VERSION(3, 1, 5): 4688 fw_name_dmub = FIRMWARE_DCN_315_DMUB; 4689 break; 4690 case IP_VERSION(3, 1, 6): 4691 fw_name_dmub = FIRMWARE_DCN316_DMUB; 4692 break; 4693 case IP_VERSION(3, 2, 0): 4694 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; 4695 break; 4696 case IP_VERSION(3, 2, 1): 4697 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; 4698 break; 4699 default: 4700 /* ASIC doesn't support DMUB. */ 4701 return 0; 4702 } 4703 r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub); 4704 if (r) 4705 DRM_ERROR("DMUB firmware loading failed: %d\n", r); 4706 return r; 4707 } 4708 4709 static int dm_early_init(void *handle) 4710 { 4711 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4712 struct amdgpu_mode_info *mode_info = &adev->mode_info; 4713 struct atom_context *ctx = mode_info->atom_context; 4714 int index = GetIndexIntoMasterTable(DATA, Object_Header); 4715 u16 data_offset; 4716 4717 /* if there is no object header, skip DM */ 4718 if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { 4719 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 4720 dev_info(adev->dev, "No object header, skipping DM\n"); 4721 return -ENOENT; 4722 } 4723 4724 switch (adev->asic_type) { 4725 #if defined(CONFIG_DRM_AMD_DC_SI) 4726 case CHIP_TAHITI: 4727 case CHIP_PITCAIRN: 4728 case CHIP_VERDE: 4729 adev->mode_info.num_crtc = 6; 4730 adev->mode_info.num_hpd = 6; 4731 adev->mode_info.num_dig = 6; 4732 break; 4733 case CHIP_OLAND: 4734 adev->mode_info.num_crtc = 2; 4735 adev->mode_info.num_hpd = 2; 4736 adev->mode_info.num_dig = 2; 4737 break; 4738 #endif 4739 case CHIP_BONAIRE: 4740 case CHIP_HAWAII: 4741 adev->mode_info.num_crtc = 6; 4742 adev->mode_info.num_hpd = 6; 4743 adev->mode_info.num_dig = 6; 4744 break; 4745 case CHIP_KAVERI: 4746 adev->mode_info.num_crtc = 4; 4747 adev->mode_info.num_hpd = 6; 4748 adev->mode_info.num_dig = 7; 4749 break; 4750 case CHIP_KABINI: 4751 case CHIP_MULLINS: 4752 adev->mode_info.num_crtc = 2; 4753 adev->mode_info.num_hpd = 6; 4754 adev->mode_info.num_dig = 6; 4755 break; 4756 case CHIP_FIJI: 4757 case CHIP_TONGA: 4758 adev->mode_info.num_crtc = 6; 4759 adev->mode_info.num_hpd = 6; 4760 adev->mode_info.num_dig = 7; 4761 break; 4762 case CHIP_CARRIZO: 4763 adev->mode_info.num_crtc = 3; 4764 adev->mode_info.num_hpd = 6; 4765 adev->mode_info.num_dig = 9; 4766 break; 4767 case CHIP_STONEY: 4768 adev->mode_info.num_crtc = 2; 4769 adev->mode_info.num_hpd = 6; 4770 adev->mode_info.num_dig = 9; 4771 break; 4772 case CHIP_POLARIS11: 4773 case CHIP_POLARIS12: 4774 adev->mode_info.num_crtc = 5; 4775 adev->mode_info.num_hpd = 5; 4776 adev->mode_info.num_dig = 5; 4777 break; 4778 case CHIP_POLARIS10: 4779 case CHIP_VEGAM: 4780 adev->mode_info.num_crtc = 6; 4781 adev->mode_info.num_hpd = 6; 4782 adev->mode_info.num_dig = 6; 4783 break; 4784 case CHIP_VEGA10: 4785 case CHIP_VEGA12: 4786 case CHIP_VEGA20: 4787 adev->mode_info.num_crtc = 6; 4788 adev->mode_info.num_hpd = 6; 4789 adev->mode_info.num_dig = 6; 4790 break; 4791 default: 4792 4793 switch (adev->ip_versions[DCE_HWIP][0]) { 4794 case IP_VERSION(2, 0, 2): 4795 case IP_VERSION(3, 0, 0): 4796 adev->mode_info.num_crtc = 6; 4797 adev->mode_info.num_hpd = 6; 4798 adev->mode_info.num_dig = 6; 4799 break; 4800 case IP_VERSION(2, 0, 0): 4801 case IP_VERSION(3, 0, 2): 4802 adev->mode_info.num_crtc = 5; 4803 adev->mode_info.num_hpd = 5; 4804 adev->mode_info.num_dig = 5; 4805 break; 4806 case IP_VERSION(2, 0, 3): 4807 case IP_VERSION(3, 0, 3): 4808 adev->mode_info.num_crtc = 2; 4809 adev->mode_info.num_hpd = 2; 4810 adev->mode_info.num_dig = 2; 4811 break; 4812 case IP_VERSION(1, 0, 0): 4813 case IP_VERSION(1, 0, 1): 4814 case IP_VERSION(3, 0, 1): 4815 case IP_VERSION(2, 1, 0): 4816 case IP_VERSION(3, 1, 2): 4817 case IP_VERSION(3, 1, 3): 4818 case IP_VERSION(3, 1, 4): 4819 case IP_VERSION(3, 1, 5): 4820 case IP_VERSION(3, 1, 6): 4821 case IP_VERSION(3, 2, 0): 4822 case IP_VERSION(3, 2, 1): 4823 adev->mode_info.num_crtc = 4; 4824 adev->mode_info.num_hpd = 4; 4825 adev->mode_info.num_dig = 4; 4826 break; 4827 default: 4828 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", 4829 adev->ip_versions[DCE_HWIP][0]); 4830 return -EINVAL; 4831 } 4832 break; 4833 } 4834 4835 if (adev->mode_info.funcs == NULL) 4836 adev->mode_info.funcs = &dm_display_funcs; 4837 4838 /* 4839 * Note: Do NOT change adev->audio_endpt_rreg and 4840 * adev->audio_endpt_wreg because they are initialised in 4841 * amdgpu_device_init() 4842 */ 4843 #if defined(CONFIG_DEBUG_KERNEL_DC) 4844 device_create_file( 4845 adev_to_drm(adev)->dev, 4846 &dev_attr_s3_debug); 4847 #endif 4848 adev->dc_enabled = true; 4849 4850 return dm_init_microcode(adev); 4851 } 4852 4853 static bool modereset_required(struct drm_crtc_state *crtc_state) 4854 { 4855 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 4856 } 4857 4858 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 4859 { 4860 drm_encoder_cleanup(encoder); 4861 kfree(encoder); 4862 } 4863 4864 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 4865 .destroy = amdgpu_dm_encoder_destroy, 4866 }; 4867 4868 static int 4869 fill_plane_color_attributes(const struct drm_plane_state *plane_state, 4870 const enum surface_pixel_format format, 4871 enum dc_color_space *color_space) 4872 { 4873 bool full_range; 4874 4875 *color_space = COLOR_SPACE_SRGB; 4876 4877 /* DRM color properties only affect non-RGB formats. */ 4878 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 4879 return 0; 4880 4881 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); 4882 4883 switch (plane_state->color_encoding) { 4884 case DRM_COLOR_YCBCR_BT601: 4885 if (full_range) 4886 *color_space = COLOR_SPACE_YCBCR601; 4887 else 4888 *color_space = COLOR_SPACE_YCBCR601_LIMITED; 4889 break; 4890 4891 case DRM_COLOR_YCBCR_BT709: 4892 if (full_range) 4893 *color_space = COLOR_SPACE_YCBCR709; 4894 else 4895 *color_space = COLOR_SPACE_YCBCR709_LIMITED; 4896 break; 4897 4898 case DRM_COLOR_YCBCR_BT2020: 4899 if (full_range) 4900 *color_space = COLOR_SPACE_2020_YCBCR; 4901 else 4902 return -EINVAL; 4903 break; 4904 4905 default: 4906 return -EINVAL; 4907 } 4908 4909 return 0; 4910 } 4911 4912 static int 4913 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 4914 const struct drm_plane_state *plane_state, 4915 const u64 tiling_flags, 4916 struct dc_plane_info *plane_info, 4917 struct dc_plane_address *address, 4918 bool tmz_surface, 4919 bool force_disable_dcc) 4920 { 4921 const struct drm_framebuffer *fb = plane_state->fb; 4922 const struct amdgpu_framebuffer *afb = 4923 to_amdgpu_framebuffer(plane_state->fb); 4924 int ret; 4925 4926 memset(plane_info, 0, sizeof(*plane_info)); 4927 4928 switch (fb->format->format) { 4929 case DRM_FORMAT_C8: 4930 plane_info->format = 4931 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 4932 break; 4933 case DRM_FORMAT_RGB565: 4934 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 4935 break; 4936 case DRM_FORMAT_XRGB8888: 4937 case DRM_FORMAT_ARGB8888: 4938 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 4939 break; 4940 case DRM_FORMAT_XRGB2101010: 4941 case DRM_FORMAT_ARGB2101010: 4942 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 4943 break; 4944 case DRM_FORMAT_XBGR2101010: 4945 case DRM_FORMAT_ABGR2101010: 4946 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 4947 break; 4948 case DRM_FORMAT_XBGR8888: 4949 case DRM_FORMAT_ABGR8888: 4950 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; 4951 break; 4952 case DRM_FORMAT_NV21: 4953 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 4954 break; 4955 case DRM_FORMAT_NV12: 4956 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 4957 break; 4958 case DRM_FORMAT_P010: 4959 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; 4960 break; 4961 case DRM_FORMAT_XRGB16161616F: 4962 case DRM_FORMAT_ARGB16161616F: 4963 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; 4964 break; 4965 case DRM_FORMAT_XBGR16161616F: 4966 case DRM_FORMAT_ABGR16161616F: 4967 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; 4968 break; 4969 case DRM_FORMAT_XRGB16161616: 4970 case DRM_FORMAT_ARGB16161616: 4971 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; 4972 break; 4973 case DRM_FORMAT_XBGR16161616: 4974 case DRM_FORMAT_ABGR16161616: 4975 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; 4976 break; 4977 default: 4978 DRM_ERROR( 4979 "Unsupported screen format %p4cc\n", 4980 &fb->format->format); 4981 return -EINVAL; 4982 } 4983 4984 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 4985 case DRM_MODE_ROTATE_0: 4986 plane_info->rotation = ROTATION_ANGLE_0; 4987 break; 4988 case DRM_MODE_ROTATE_90: 4989 plane_info->rotation = ROTATION_ANGLE_90; 4990 break; 4991 case DRM_MODE_ROTATE_180: 4992 plane_info->rotation = ROTATION_ANGLE_180; 4993 break; 4994 case DRM_MODE_ROTATE_270: 4995 plane_info->rotation = ROTATION_ANGLE_270; 4996 break; 4997 default: 4998 plane_info->rotation = ROTATION_ANGLE_0; 4999 break; 5000 } 5001 5002 5003 plane_info->visible = true; 5004 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; 5005 5006 plane_info->layer_index = plane_state->normalized_zpos; 5007 5008 ret = fill_plane_color_attributes(plane_state, plane_info->format, 5009 &plane_info->color_space); 5010 if (ret) 5011 return ret; 5012 5013 ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format, 5014 plane_info->rotation, tiling_flags, 5015 &plane_info->tiling_info, 5016 &plane_info->plane_size, 5017 &plane_info->dcc, address, 5018 tmz_surface, force_disable_dcc); 5019 if (ret) 5020 return ret; 5021 5022 amdgpu_dm_plane_fill_blending_from_plane_state( 5023 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, 5024 &plane_info->global_alpha, &plane_info->global_alpha_value); 5025 5026 return 0; 5027 } 5028 5029 static int fill_dc_plane_attributes(struct amdgpu_device *adev, 5030 struct dc_plane_state *dc_plane_state, 5031 struct drm_plane_state *plane_state, 5032 struct drm_crtc_state *crtc_state) 5033 { 5034 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5035 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; 5036 struct dc_scaling_info scaling_info; 5037 struct dc_plane_info plane_info; 5038 int ret; 5039 bool force_disable_dcc = false; 5040 5041 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info); 5042 if (ret) 5043 return ret; 5044 5045 dc_plane_state->src_rect = scaling_info.src_rect; 5046 dc_plane_state->dst_rect = scaling_info.dst_rect; 5047 dc_plane_state->clip_rect = scaling_info.clip_rect; 5048 dc_plane_state->scaling_quality = scaling_info.scaling_quality; 5049 5050 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; 5051 ret = fill_dc_plane_info_and_addr(adev, plane_state, 5052 afb->tiling_flags, 5053 &plane_info, 5054 &dc_plane_state->address, 5055 afb->tmz_surface, 5056 force_disable_dcc); 5057 if (ret) 5058 return ret; 5059 5060 dc_plane_state->format = plane_info.format; 5061 dc_plane_state->color_space = plane_info.color_space; 5062 dc_plane_state->format = plane_info.format; 5063 dc_plane_state->plane_size = plane_info.plane_size; 5064 dc_plane_state->rotation = plane_info.rotation; 5065 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; 5066 dc_plane_state->stereo_format = plane_info.stereo_format; 5067 dc_plane_state->tiling_info = plane_info.tiling_info; 5068 dc_plane_state->visible = plane_info.visible; 5069 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; 5070 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha; 5071 dc_plane_state->global_alpha = plane_info.global_alpha; 5072 dc_plane_state->global_alpha_value = plane_info.global_alpha_value; 5073 dc_plane_state->dcc = plane_info.dcc; 5074 dc_plane_state->layer_index = plane_info.layer_index; 5075 dc_plane_state->flip_int_enabled = true; 5076 5077 /* 5078 * Always set input transfer function, since plane state is refreshed 5079 * every time. 5080 */ 5081 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); 5082 if (ret) 5083 return ret; 5084 5085 return 0; 5086 } 5087 5088 static inline void fill_dc_dirty_rect(struct drm_plane *plane, 5089 struct rect *dirty_rect, int32_t x, 5090 s32 y, s32 width, s32 height, 5091 int *i, bool ffu) 5092 { 5093 WARN_ON(*i >= DC_MAX_DIRTY_RECTS); 5094 5095 dirty_rect->x = x; 5096 dirty_rect->y = y; 5097 dirty_rect->width = width; 5098 dirty_rect->height = height; 5099 5100 if (ffu) 5101 drm_dbg(plane->dev, 5102 "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", 5103 plane->base.id, width, height); 5104 else 5105 drm_dbg(plane->dev, 5106 "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", 5107 plane->base.id, x, y, width, height); 5108 5109 (*i)++; 5110 } 5111 5112 /** 5113 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates 5114 * 5115 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP 5116 * remote fb 5117 * @old_plane_state: Old state of @plane 5118 * @new_plane_state: New state of @plane 5119 * @crtc_state: New state of CRTC connected to the @plane 5120 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects 5121 * @dirty_regions_changed: dirty regions changed 5122 * 5123 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions 5124 * (referred to as "damage clips" in DRM nomenclature) that require updating on 5125 * the eDP remote buffer. The responsibility of specifying the dirty regions is 5126 * amdgpu_dm's. 5127 * 5128 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the 5129 * plane with regions that require flushing to the eDP remote buffer. In 5130 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - 5131 * implicitly provide damage clips without any client support via the plane 5132 * bounds. 5133 */ 5134 static void fill_dc_dirty_rects(struct drm_plane *plane, 5135 struct drm_plane_state *old_plane_state, 5136 struct drm_plane_state *new_plane_state, 5137 struct drm_crtc_state *crtc_state, 5138 struct dc_flip_addrs *flip_addrs, 5139 bool *dirty_regions_changed) 5140 { 5141 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5142 struct rect *dirty_rects = flip_addrs->dirty_rects; 5143 u32 num_clips; 5144 struct drm_mode_rect *clips; 5145 bool bb_changed; 5146 bool fb_changed; 5147 u32 i = 0; 5148 *dirty_regions_changed = false; 5149 5150 /* 5151 * Cursor plane has it's own dirty rect update interface. See 5152 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data 5153 */ 5154 if (plane->type == DRM_PLANE_TYPE_CURSOR) 5155 return; 5156 5157 if (new_plane_state->rotation != DRM_MODE_ROTATE_0) 5158 goto ffu; 5159 5160 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 5161 clips = drm_plane_get_damage_clips(new_plane_state); 5162 5163 if (!dm_crtc_state->mpo_requested) { 5164 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) 5165 goto ffu; 5166 5167 for (; flip_addrs->dirty_rect_count < num_clips; clips++) 5168 fill_dc_dirty_rect(new_plane_state->plane, 5169 &dirty_rects[flip_addrs->dirty_rect_count], 5170 clips->x1, clips->y1, 5171 clips->x2 - clips->x1, clips->y2 - clips->y1, 5172 &flip_addrs->dirty_rect_count, 5173 false); 5174 return; 5175 } 5176 5177 /* 5178 * MPO is requested. Add entire plane bounding box to dirty rects if 5179 * flipped to or damaged. 5180 * 5181 * If plane is moved or resized, also add old bounding box to dirty 5182 * rects. 5183 */ 5184 fb_changed = old_plane_state->fb->base.id != 5185 new_plane_state->fb->base.id; 5186 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || 5187 old_plane_state->crtc_y != new_plane_state->crtc_y || 5188 old_plane_state->crtc_w != new_plane_state->crtc_w || 5189 old_plane_state->crtc_h != new_plane_state->crtc_h); 5190 5191 drm_dbg(plane->dev, 5192 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", 5193 new_plane_state->plane->base.id, 5194 bb_changed, fb_changed, num_clips); 5195 5196 *dirty_regions_changed = bb_changed; 5197 5198 if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS) 5199 goto ffu; 5200 5201 if (bb_changed) { 5202 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5203 new_plane_state->crtc_x, 5204 new_plane_state->crtc_y, 5205 new_plane_state->crtc_w, 5206 new_plane_state->crtc_h, &i, false); 5207 5208 /* Add old plane bounding-box if plane is moved or resized */ 5209 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5210 old_plane_state->crtc_x, 5211 old_plane_state->crtc_y, 5212 old_plane_state->crtc_w, 5213 old_plane_state->crtc_h, &i, false); 5214 } 5215 5216 if (num_clips) { 5217 for (; i < num_clips; clips++) 5218 fill_dc_dirty_rect(new_plane_state->plane, 5219 &dirty_rects[i], clips->x1, 5220 clips->y1, clips->x2 - clips->x1, 5221 clips->y2 - clips->y1, &i, false); 5222 } else if (fb_changed && !bb_changed) { 5223 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5224 new_plane_state->crtc_x, 5225 new_plane_state->crtc_y, 5226 new_plane_state->crtc_w, 5227 new_plane_state->crtc_h, &i, false); 5228 } 5229 5230 flip_addrs->dirty_rect_count = i; 5231 return; 5232 5233 ffu: 5234 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0, 5235 dm_crtc_state->base.mode.crtc_hdisplay, 5236 dm_crtc_state->base.mode.crtc_vdisplay, 5237 &flip_addrs->dirty_rect_count, true); 5238 } 5239 5240 static void update_stream_scaling_settings(const struct drm_display_mode *mode, 5241 const struct dm_connector_state *dm_state, 5242 struct dc_stream_state *stream) 5243 { 5244 enum amdgpu_rmx_type rmx_type; 5245 5246 struct rect src = { 0 }; /* viewport in composition space*/ 5247 struct rect dst = { 0 }; /* stream addressable area */ 5248 5249 /* no mode. nothing to be done */ 5250 if (!mode) 5251 return; 5252 5253 /* Full screen scaling by default */ 5254 src.width = mode->hdisplay; 5255 src.height = mode->vdisplay; 5256 dst.width = stream->timing.h_addressable; 5257 dst.height = stream->timing.v_addressable; 5258 5259 if (dm_state) { 5260 rmx_type = dm_state->scaling; 5261 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 5262 if (src.width * dst.height < 5263 src.height * dst.width) { 5264 /* height needs less upscaling/more downscaling */ 5265 dst.width = src.width * 5266 dst.height / src.height; 5267 } else { 5268 /* width needs less upscaling/more downscaling */ 5269 dst.height = src.height * 5270 dst.width / src.width; 5271 } 5272 } else if (rmx_type == RMX_CENTER) { 5273 dst = src; 5274 } 5275 5276 dst.x = (stream->timing.h_addressable - dst.width) / 2; 5277 dst.y = (stream->timing.v_addressable - dst.height) / 2; 5278 5279 if (dm_state->underscan_enable) { 5280 dst.x += dm_state->underscan_hborder / 2; 5281 dst.y += dm_state->underscan_vborder / 2; 5282 dst.width -= dm_state->underscan_hborder; 5283 dst.height -= dm_state->underscan_vborder; 5284 } 5285 } 5286 5287 stream->src = src; 5288 stream->dst = dst; 5289 5290 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", 5291 dst.x, dst.y, dst.width, dst.height); 5292 5293 } 5294 5295 static enum dc_color_depth 5296 convert_color_depth_from_display_info(const struct drm_connector *connector, 5297 bool is_y420, int requested_bpc) 5298 { 5299 u8 bpc; 5300 5301 if (is_y420) { 5302 bpc = 8; 5303 5304 /* Cap display bpc based on HDMI 2.0 HF-VSDB */ 5305 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) 5306 bpc = 16; 5307 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) 5308 bpc = 12; 5309 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) 5310 bpc = 10; 5311 } else { 5312 bpc = (uint8_t)connector->display_info.bpc; 5313 /* Assume 8 bpc by default if no bpc is specified. */ 5314 bpc = bpc ? bpc : 8; 5315 } 5316 5317 if (requested_bpc > 0) { 5318 /* 5319 * Cap display bpc based on the user requested value. 5320 * 5321 * The value for state->max_bpc may not correctly updated 5322 * depending on when the connector gets added to the state 5323 * or if this was called outside of atomic check, so it 5324 * can't be used directly. 5325 */ 5326 bpc = min_t(u8, bpc, requested_bpc); 5327 5328 /* Round down to the nearest even number. */ 5329 bpc = bpc - (bpc & 1); 5330 } 5331 5332 switch (bpc) { 5333 case 0: 5334 /* 5335 * Temporary Work around, DRM doesn't parse color depth for 5336 * EDID revision before 1.4 5337 * TODO: Fix edid parsing 5338 */ 5339 return COLOR_DEPTH_888; 5340 case 6: 5341 return COLOR_DEPTH_666; 5342 case 8: 5343 return COLOR_DEPTH_888; 5344 case 10: 5345 return COLOR_DEPTH_101010; 5346 case 12: 5347 return COLOR_DEPTH_121212; 5348 case 14: 5349 return COLOR_DEPTH_141414; 5350 case 16: 5351 return COLOR_DEPTH_161616; 5352 default: 5353 return COLOR_DEPTH_UNDEFINED; 5354 } 5355 } 5356 5357 static enum dc_aspect_ratio 5358 get_aspect_ratio(const struct drm_display_mode *mode_in) 5359 { 5360 /* 1-1 mapping, since both enums follow the HDMI spec. */ 5361 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; 5362 } 5363 5364 static enum dc_color_space 5365 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, 5366 const struct drm_connector_state *connector_state) 5367 { 5368 enum dc_color_space color_space = COLOR_SPACE_SRGB; 5369 5370 switch (connector_state->colorspace) { 5371 case DRM_MODE_COLORIMETRY_BT601_YCC: 5372 if (dc_crtc_timing->flags.Y_ONLY) 5373 color_space = COLOR_SPACE_YCBCR601_LIMITED; 5374 else 5375 color_space = COLOR_SPACE_YCBCR601; 5376 break; 5377 case DRM_MODE_COLORIMETRY_BT709_YCC: 5378 if (dc_crtc_timing->flags.Y_ONLY) 5379 color_space = COLOR_SPACE_YCBCR709_LIMITED; 5380 else 5381 color_space = COLOR_SPACE_YCBCR709; 5382 break; 5383 case DRM_MODE_COLORIMETRY_OPRGB: 5384 color_space = COLOR_SPACE_ADOBERGB; 5385 break; 5386 case DRM_MODE_COLORIMETRY_BT2020_RGB: 5387 case DRM_MODE_COLORIMETRY_BT2020_YCC: 5388 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) 5389 color_space = COLOR_SPACE_2020_RGB_FULLRANGE; 5390 else 5391 color_space = COLOR_SPACE_2020_YCBCR; 5392 break; 5393 case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 5394 default: 5395 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { 5396 color_space = COLOR_SPACE_SRGB; 5397 /* 5398 * 27030khz is the separation point between HDTV and SDTV 5399 * according to HDMI spec, we use YCbCr709 and YCbCr601 5400 * respectively 5401 */ 5402 } else if (dc_crtc_timing->pix_clk_100hz > 270300) { 5403 if (dc_crtc_timing->flags.Y_ONLY) 5404 color_space = 5405 COLOR_SPACE_YCBCR709_LIMITED; 5406 else 5407 color_space = COLOR_SPACE_YCBCR709; 5408 } else { 5409 if (dc_crtc_timing->flags.Y_ONLY) 5410 color_space = 5411 COLOR_SPACE_YCBCR601_LIMITED; 5412 else 5413 color_space = COLOR_SPACE_YCBCR601; 5414 } 5415 break; 5416 } 5417 5418 return color_space; 5419 } 5420 5421 static bool adjust_colour_depth_from_display_info( 5422 struct dc_crtc_timing *timing_out, 5423 const struct drm_display_info *info) 5424 { 5425 enum dc_color_depth depth = timing_out->display_color_depth; 5426 int normalized_clk; 5427 5428 do { 5429 normalized_clk = timing_out->pix_clk_100hz / 10; 5430 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 5431 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 5432 normalized_clk /= 2; 5433 /* Adjusting pix clock following on HDMI spec based on colour depth */ 5434 switch (depth) { 5435 case COLOR_DEPTH_888: 5436 break; 5437 case COLOR_DEPTH_101010: 5438 normalized_clk = (normalized_clk * 30) / 24; 5439 break; 5440 case COLOR_DEPTH_121212: 5441 normalized_clk = (normalized_clk * 36) / 24; 5442 break; 5443 case COLOR_DEPTH_161616: 5444 normalized_clk = (normalized_clk * 48) / 24; 5445 break; 5446 default: 5447 /* The above depths are the only ones valid for HDMI. */ 5448 return false; 5449 } 5450 if (normalized_clk <= info->max_tmds_clock) { 5451 timing_out->display_color_depth = depth; 5452 return true; 5453 } 5454 } while (--depth > COLOR_DEPTH_666); 5455 return false; 5456 } 5457 5458 static void fill_stream_properties_from_drm_display_mode( 5459 struct dc_stream_state *stream, 5460 const struct drm_display_mode *mode_in, 5461 const struct drm_connector *connector, 5462 const struct drm_connector_state *connector_state, 5463 const struct dc_stream_state *old_stream, 5464 int requested_bpc) 5465 { 5466 struct dc_crtc_timing *timing_out = &stream->timing; 5467 const struct drm_display_info *info = &connector->display_info; 5468 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 5469 struct hdmi_vendor_infoframe hv_frame; 5470 struct hdmi_avi_infoframe avi_frame; 5471 5472 memset(&hv_frame, 0, sizeof(hv_frame)); 5473 memset(&avi_frame, 0, sizeof(avi_frame)); 5474 5475 timing_out->h_border_left = 0; 5476 timing_out->h_border_right = 0; 5477 timing_out->v_border_top = 0; 5478 timing_out->v_border_bottom = 0; 5479 /* TODO: un-hardcode */ 5480 if (drm_mode_is_420_only(info, mode_in) 5481 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5482 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5483 else if (drm_mode_is_420_also(info, mode_in) 5484 && aconnector->force_yuv420_output) 5485 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5486 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) 5487 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5488 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 5489 else 5490 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 5491 5492 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 5493 timing_out->display_color_depth = convert_color_depth_from_display_info( 5494 connector, 5495 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), 5496 requested_bpc); 5497 timing_out->scan_type = SCANNING_TYPE_NODATA; 5498 timing_out->hdmi_vic = 0; 5499 5500 if (old_stream) { 5501 timing_out->vic = old_stream->timing.vic; 5502 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; 5503 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; 5504 } else { 5505 timing_out->vic = drm_match_cea_mode(mode_in); 5506 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 5507 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 5508 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 5509 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 5510 } 5511 5512 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 5513 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); 5514 timing_out->vic = avi_frame.video_code; 5515 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); 5516 timing_out->hdmi_vic = hv_frame.vic; 5517 } 5518 5519 if (is_freesync_video_mode(mode_in, aconnector)) { 5520 timing_out->h_addressable = mode_in->hdisplay; 5521 timing_out->h_total = mode_in->htotal; 5522 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; 5523 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; 5524 timing_out->v_total = mode_in->vtotal; 5525 timing_out->v_addressable = mode_in->vdisplay; 5526 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; 5527 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; 5528 timing_out->pix_clk_100hz = mode_in->clock * 10; 5529 } else { 5530 timing_out->h_addressable = mode_in->crtc_hdisplay; 5531 timing_out->h_total = mode_in->crtc_htotal; 5532 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 5533 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 5534 timing_out->v_total = mode_in->crtc_vtotal; 5535 timing_out->v_addressable = mode_in->crtc_vdisplay; 5536 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 5537 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 5538 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; 5539 } 5540 5541 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 5542 5543 stream->out_transfer_func->type = TF_TYPE_PREDEFINED; 5544 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; 5545 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 5546 if (!adjust_colour_depth_from_display_info(timing_out, info) && 5547 drm_mode_is_420_also(info, mode_in) && 5548 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { 5549 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5550 adjust_colour_depth_from_display_info(timing_out, info); 5551 } 5552 } 5553 5554 stream->output_color_space = get_output_color_space(timing_out, connector_state); 5555 } 5556 5557 static void fill_audio_info(struct audio_info *audio_info, 5558 const struct drm_connector *drm_connector, 5559 const struct dc_sink *dc_sink) 5560 { 5561 int i = 0; 5562 int cea_revision = 0; 5563 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 5564 5565 audio_info->manufacture_id = edid_caps->manufacturer_id; 5566 audio_info->product_id = edid_caps->product_id; 5567 5568 cea_revision = drm_connector->display_info.cea_rev; 5569 5570 strscpy(audio_info->display_name, 5571 edid_caps->display_name, 5572 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 5573 5574 if (cea_revision >= 3) { 5575 audio_info->mode_count = edid_caps->audio_mode_count; 5576 5577 for (i = 0; i < audio_info->mode_count; ++i) { 5578 audio_info->modes[i].format_code = 5579 (enum audio_format_code) 5580 (edid_caps->audio_modes[i].format_code); 5581 audio_info->modes[i].channel_count = 5582 edid_caps->audio_modes[i].channel_count; 5583 audio_info->modes[i].sample_rates.all = 5584 edid_caps->audio_modes[i].sample_rate; 5585 audio_info->modes[i].sample_size = 5586 edid_caps->audio_modes[i].sample_size; 5587 } 5588 } 5589 5590 audio_info->flags.all = edid_caps->speaker_flags; 5591 5592 /* TODO: We only check for the progressive mode, check for interlace mode too */ 5593 if (drm_connector->latency_present[0]) { 5594 audio_info->video_latency = drm_connector->video_latency[0]; 5595 audio_info->audio_latency = drm_connector->audio_latency[0]; 5596 } 5597 5598 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 5599 5600 } 5601 5602 static void 5603 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 5604 struct drm_display_mode *dst_mode) 5605 { 5606 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 5607 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 5608 dst_mode->crtc_clock = src_mode->crtc_clock; 5609 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 5610 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 5611 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 5612 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 5613 dst_mode->crtc_htotal = src_mode->crtc_htotal; 5614 dst_mode->crtc_hskew = src_mode->crtc_hskew; 5615 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 5616 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 5617 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 5618 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 5619 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 5620 } 5621 5622 static void 5623 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 5624 const struct drm_display_mode *native_mode, 5625 bool scale_enabled) 5626 { 5627 if (scale_enabled) { 5628 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 5629 } else if (native_mode->clock == drm_mode->clock && 5630 native_mode->htotal == drm_mode->htotal && 5631 native_mode->vtotal == drm_mode->vtotal) { 5632 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 5633 } else { 5634 /* no scaling nor amdgpu inserted, no need to patch */ 5635 } 5636 } 5637 5638 static struct dc_sink * 5639 create_fake_sink(struct amdgpu_dm_connector *aconnector) 5640 { 5641 struct dc_sink_init_data sink_init_data = { 0 }; 5642 struct dc_sink *sink = NULL; 5643 5644 sink_init_data.link = aconnector->dc_link; 5645 sink_init_data.sink_signal = aconnector->dc_link->connector_signal; 5646 5647 sink = dc_sink_create(&sink_init_data); 5648 if (!sink) { 5649 DRM_ERROR("Failed to create sink!\n"); 5650 return NULL; 5651 } 5652 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 5653 5654 return sink; 5655 } 5656 5657 static void set_multisync_trigger_params( 5658 struct dc_stream_state *stream) 5659 { 5660 struct dc_stream_state *master = NULL; 5661 5662 if (stream->triggered_crtc_reset.enabled) { 5663 master = stream->triggered_crtc_reset.event_source; 5664 stream->triggered_crtc_reset.event = 5665 master->timing.flags.VSYNC_POSITIVE_POLARITY ? 5666 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; 5667 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; 5668 } 5669 } 5670 5671 static void set_master_stream(struct dc_stream_state *stream_set[], 5672 int stream_count) 5673 { 5674 int j, highest_rfr = 0, master_stream = 0; 5675 5676 for (j = 0; j < stream_count; j++) { 5677 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 5678 int refresh_rate = 0; 5679 5680 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ 5681 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 5682 if (refresh_rate > highest_rfr) { 5683 highest_rfr = refresh_rate; 5684 master_stream = j; 5685 } 5686 } 5687 } 5688 for (j = 0; j < stream_count; j++) { 5689 if (stream_set[j]) 5690 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 5691 } 5692 } 5693 5694 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 5695 { 5696 int i = 0; 5697 struct dc_stream_state *stream; 5698 5699 if (context->stream_count < 2) 5700 return; 5701 for (i = 0; i < context->stream_count ; i++) { 5702 if (!context->streams[i]) 5703 continue; 5704 /* 5705 * TODO: add a function to read AMD VSDB bits and set 5706 * crtc_sync_master.multi_sync_enabled flag 5707 * For now it's set to false 5708 */ 5709 } 5710 5711 set_master_stream(context->streams, context->stream_count); 5712 5713 for (i = 0; i < context->stream_count ; i++) { 5714 stream = context->streams[i]; 5715 5716 if (!stream) 5717 continue; 5718 5719 set_multisync_trigger_params(stream); 5720 } 5721 } 5722 5723 /** 5724 * DOC: FreeSync Video 5725 * 5726 * When a userspace application wants to play a video, the content follows a 5727 * standard format definition that usually specifies the FPS for that format. 5728 * The below list illustrates some video format and the expected FPS, 5729 * respectively: 5730 * 5731 * - TV/NTSC (23.976 FPS) 5732 * - Cinema (24 FPS) 5733 * - TV/PAL (25 FPS) 5734 * - TV/NTSC (29.97 FPS) 5735 * - TV/NTSC (30 FPS) 5736 * - Cinema HFR (48 FPS) 5737 * - TV/PAL (50 FPS) 5738 * - Commonly used (60 FPS) 5739 * - Multiples of 24 (48,72,96 FPS) 5740 * 5741 * The list of standards video format is not huge and can be added to the 5742 * connector modeset list beforehand. With that, userspace can leverage 5743 * FreeSync to extends the front porch in order to attain the target refresh 5744 * rate. Such a switch will happen seamlessly, without screen blanking or 5745 * reprogramming of the output in any other way. If the userspace requests a 5746 * modesetting change compatible with FreeSync modes that only differ in the 5747 * refresh rate, DC will skip the full update and avoid blink during the 5748 * transition. For example, the video player can change the modesetting from 5749 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without 5750 * causing any display blink. This same concept can be applied to a mode 5751 * setting change. 5752 */ 5753 static struct drm_display_mode * 5754 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, 5755 bool use_probed_modes) 5756 { 5757 struct drm_display_mode *m, *m_pref = NULL; 5758 u16 current_refresh, highest_refresh; 5759 struct list_head *list_head = use_probed_modes ? 5760 &aconnector->base.probed_modes : 5761 &aconnector->base.modes; 5762 5763 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 5764 return NULL; 5765 5766 if (aconnector->freesync_vid_base.clock != 0) 5767 return &aconnector->freesync_vid_base; 5768 5769 /* Find the preferred mode */ 5770 list_for_each_entry(m, list_head, head) { 5771 if (m->type & DRM_MODE_TYPE_PREFERRED) { 5772 m_pref = m; 5773 break; 5774 } 5775 } 5776 5777 if (!m_pref) { 5778 /* Probably an EDID with no preferred mode. Fallback to first entry */ 5779 m_pref = list_first_entry_or_null( 5780 &aconnector->base.modes, struct drm_display_mode, head); 5781 if (!m_pref) { 5782 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); 5783 return NULL; 5784 } 5785 } 5786 5787 highest_refresh = drm_mode_vrefresh(m_pref); 5788 5789 /* 5790 * Find the mode with highest refresh rate with same resolution. 5791 * For some monitors, preferred mode is not the mode with highest 5792 * supported refresh rate. 5793 */ 5794 list_for_each_entry(m, list_head, head) { 5795 current_refresh = drm_mode_vrefresh(m); 5796 5797 if (m->hdisplay == m_pref->hdisplay && 5798 m->vdisplay == m_pref->vdisplay && 5799 highest_refresh < current_refresh) { 5800 highest_refresh = current_refresh; 5801 m_pref = m; 5802 } 5803 } 5804 5805 drm_mode_copy(&aconnector->freesync_vid_base, m_pref); 5806 return m_pref; 5807 } 5808 5809 static bool is_freesync_video_mode(const struct drm_display_mode *mode, 5810 struct amdgpu_dm_connector *aconnector) 5811 { 5812 struct drm_display_mode *high_mode; 5813 int timing_diff; 5814 5815 high_mode = get_highest_refresh_rate_mode(aconnector, false); 5816 if (!high_mode || !mode) 5817 return false; 5818 5819 timing_diff = high_mode->vtotal - mode->vtotal; 5820 5821 if (high_mode->clock == 0 || high_mode->clock != mode->clock || 5822 high_mode->hdisplay != mode->hdisplay || 5823 high_mode->vdisplay != mode->vdisplay || 5824 high_mode->hsync_start != mode->hsync_start || 5825 high_mode->hsync_end != mode->hsync_end || 5826 high_mode->htotal != mode->htotal || 5827 high_mode->hskew != mode->hskew || 5828 high_mode->vscan != mode->vscan || 5829 high_mode->vsync_start - mode->vsync_start != timing_diff || 5830 high_mode->vsync_end - mode->vsync_end != timing_diff) 5831 return false; 5832 else 5833 return true; 5834 } 5835 5836 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, 5837 struct dc_sink *sink, struct dc_stream_state *stream, 5838 struct dsc_dec_dpcd_caps *dsc_caps) 5839 { 5840 stream->timing.flags.DSC = 0; 5841 dsc_caps->is_dsc_supported = false; 5842 5843 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 5844 sink->sink_signal == SIGNAL_TYPE_EDP)) { 5845 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || 5846 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 5847 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 5848 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 5849 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, 5850 dsc_caps); 5851 } 5852 } 5853 5854 5855 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, 5856 struct dc_sink *sink, struct dc_stream_state *stream, 5857 struct dsc_dec_dpcd_caps *dsc_caps, 5858 uint32_t max_dsc_target_bpp_limit_override) 5859 { 5860 const struct dc_link_settings *verified_link_cap = NULL; 5861 u32 link_bw_in_kbps; 5862 u32 edp_min_bpp_x16, edp_max_bpp_x16; 5863 struct dc *dc = sink->ctx->dc; 5864 struct dc_dsc_bw_range bw_range = {0}; 5865 struct dc_dsc_config dsc_cfg = {0}; 5866 struct dc_dsc_config_options dsc_options = {0}; 5867 5868 dc_dsc_get_default_config_option(dc, &dsc_options); 5869 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 5870 5871 verified_link_cap = dc_link_get_link_cap(stream->link); 5872 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); 5873 edp_min_bpp_x16 = 8 * 16; 5874 edp_max_bpp_x16 = 8 * 16; 5875 5876 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) 5877 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; 5878 5879 if (edp_max_bpp_x16 < edp_min_bpp_x16) 5880 edp_min_bpp_x16 = edp_max_bpp_x16; 5881 5882 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], 5883 dc->debug.dsc_min_slice_height_override, 5884 edp_min_bpp_x16, edp_max_bpp_x16, 5885 dsc_caps, 5886 &stream->timing, 5887 dc_link_get_highest_encoding_format(aconnector->dc_link), 5888 &bw_range)) { 5889 5890 if (bw_range.max_kbps < link_bw_in_kbps) { 5891 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 5892 dsc_caps, 5893 &dsc_options, 5894 0, 5895 &stream->timing, 5896 dc_link_get_highest_encoding_format(aconnector->dc_link), 5897 &dsc_cfg)) { 5898 stream->timing.dsc_cfg = dsc_cfg; 5899 stream->timing.flags.DSC = 1; 5900 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; 5901 } 5902 return; 5903 } 5904 } 5905 5906 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 5907 dsc_caps, 5908 &dsc_options, 5909 link_bw_in_kbps, 5910 &stream->timing, 5911 dc_link_get_highest_encoding_format(aconnector->dc_link), 5912 &dsc_cfg)) { 5913 stream->timing.dsc_cfg = dsc_cfg; 5914 stream->timing.flags.DSC = 1; 5915 } 5916 } 5917 5918 5919 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, 5920 struct dc_sink *sink, struct dc_stream_state *stream, 5921 struct dsc_dec_dpcd_caps *dsc_caps) 5922 { 5923 struct drm_connector *drm_connector = &aconnector->base; 5924 u32 link_bandwidth_kbps; 5925 struct dc *dc = sink->ctx->dc; 5926 u32 max_supported_bw_in_kbps, timing_bw_in_kbps; 5927 u32 dsc_max_supported_bw_in_kbps; 5928 u32 max_dsc_target_bpp_limit_override = 5929 drm_connector->display_info.max_dsc_bpp; 5930 struct dc_dsc_config_options dsc_options = {0}; 5931 5932 dc_dsc_get_default_config_option(dc, &dsc_options); 5933 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 5934 5935 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 5936 dc_link_get_link_cap(aconnector->dc_link)); 5937 5938 /* Set DSC policy according to dsc_clock_en */ 5939 dc_dsc_policy_set_enable_dsc_when_not_needed( 5940 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); 5941 5942 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && 5943 !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && 5944 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { 5945 5946 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); 5947 5948 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 5949 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 5950 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 5951 dsc_caps, 5952 &dsc_options, 5953 link_bandwidth_kbps, 5954 &stream->timing, 5955 dc_link_get_highest_encoding_format(aconnector->dc_link), 5956 &stream->timing.dsc_cfg)) { 5957 stream->timing.flags.DSC = 1; 5958 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); 5959 } 5960 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 5961 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, 5962 dc_link_get_highest_encoding_format(aconnector->dc_link)); 5963 max_supported_bw_in_kbps = link_bandwidth_kbps; 5964 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; 5965 5966 if (timing_bw_in_kbps > max_supported_bw_in_kbps && 5967 max_supported_bw_in_kbps > 0 && 5968 dsc_max_supported_bw_in_kbps > 0) 5969 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 5970 dsc_caps, 5971 &dsc_options, 5972 dsc_max_supported_bw_in_kbps, 5973 &stream->timing, 5974 dc_link_get_highest_encoding_format(aconnector->dc_link), 5975 &stream->timing.dsc_cfg)) { 5976 stream->timing.flags.DSC = 1; 5977 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", 5978 __func__, drm_connector->name); 5979 } 5980 } 5981 } 5982 5983 /* Overwrite the stream flag if DSC is enabled through debugfs */ 5984 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) 5985 stream->timing.flags.DSC = 1; 5986 5987 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) 5988 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 5989 5990 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) 5991 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 5992 5993 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) 5994 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; 5995 } 5996 5997 static struct dc_stream_state * 5998 create_stream_for_sink(struct amdgpu_dm_connector *aconnector, 5999 const struct drm_display_mode *drm_mode, 6000 const struct dm_connector_state *dm_state, 6001 const struct dc_stream_state *old_stream, 6002 int requested_bpc) 6003 { 6004 struct drm_display_mode *preferred_mode = NULL; 6005 struct drm_connector *drm_connector; 6006 const struct drm_connector_state *con_state = &dm_state->base; 6007 struct dc_stream_state *stream = NULL; 6008 struct drm_display_mode mode; 6009 struct drm_display_mode saved_mode; 6010 struct drm_display_mode *freesync_mode = NULL; 6011 bool native_mode_found = false; 6012 bool recalculate_timing = false; 6013 bool scale = dm_state->scaling != RMX_OFF; 6014 int mode_refresh; 6015 int preferred_refresh = 0; 6016 enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; 6017 struct dsc_dec_dpcd_caps dsc_caps; 6018 6019 struct dc_sink *sink = NULL; 6020 6021 drm_mode_init(&mode, drm_mode); 6022 memset(&saved_mode, 0, sizeof(saved_mode)); 6023 6024 if (aconnector == NULL) { 6025 DRM_ERROR("aconnector is NULL!\n"); 6026 return stream; 6027 } 6028 6029 drm_connector = &aconnector->base; 6030 6031 if (!aconnector->dc_sink) { 6032 sink = create_fake_sink(aconnector); 6033 if (!sink) 6034 return stream; 6035 } else { 6036 sink = aconnector->dc_sink; 6037 dc_sink_retain(sink); 6038 } 6039 6040 stream = dc_create_stream_for_sink(sink); 6041 6042 if (stream == NULL) { 6043 DRM_ERROR("Failed to create stream for sink!\n"); 6044 goto finish; 6045 } 6046 6047 stream->dm_stream_context = aconnector; 6048 6049 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 6050 drm_connector->display_info.hdmi.scdc.scrambling.low_rates; 6051 6052 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 6053 /* Search for preferred mode */ 6054 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 6055 native_mode_found = true; 6056 break; 6057 } 6058 } 6059 if (!native_mode_found) 6060 preferred_mode = list_first_entry_or_null( 6061 &aconnector->base.modes, 6062 struct drm_display_mode, 6063 head); 6064 6065 mode_refresh = drm_mode_vrefresh(&mode); 6066 6067 if (preferred_mode == NULL) { 6068 /* 6069 * This may not be an error, the use case is when we have no 6070 * usermode calls to reset and set mode upon hotplug. In this 6071 * case, we call set mode ourselves to restore the previous mode 6072 * and the modelist may not be filled in time. 6073 */ 6074 DRM_DEBUG_DRIVER("No preferred mode found\n"); 6075 } else { 6076 recalculate_timing = is_freesync_video_mode(&mode, aconnector); 6077 if (recalculate_timing) { 6078 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); 6079 drm_mode_copy(&saved_mode, &mode); 6080 saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio; 6081 drm_mode_copy(&mode, freesync_mode); 6082 mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio; 6083 } else { 6084 decide_crtc_timing_for_drm_display_mode( 6085 &mode, preferred_mode, scale); 6086 6087 preferred_refresh = drm_mode_vrefresh(preferred_mode); 6088 } 6089 } 6090 6091 if (recalculate_timing) 6092 drm_mode_set_crtcinfo(&saved_mode, 0); 6093 6094 /* 6095 * If scaling is enabled and refresh rate didn't change 6096 * we copy the vic and polarities of the old timings 6097 */ 6098 if (!scale || mode_refresh != preferred_refresh) 6099 fill_stream_properties_from_drm_display_mode( 6100 stream, &mode, &aconnector->base, con_state, NULL, 6101 requested_bpc); 6102 else 6103 fill_stream_properties_from_drm_display_mode( 6104 stream, &mode, &aconnector->base, con_state, old_stream, 6105 requested_bpc); 6106 6107 if (aconnector->timing_changed) { 6108 DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n", 6109 __func__, 6110 stream->timing.display_color_depth, 6111 aconnector->timing_requested->display_color_depth); 6112 stream->timing = *aconnector->timing_requested; 6113 } 6114 6115 /* SST DSC determination policy */ 6116 update_dsc_caps(aconnector, sink, stream, &dsc_caps); 6117 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) 6118 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); 6119 6120 update_stream_scaling_settings(&mode, dm_state, stream); 6121 6122 fill_audio_info( 6123 &stream->audio_info, 6124 drm_connector, 6125 sink); 6126 6127 update_stream_signal(stream, sink); 6128 6129 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6130 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); 6131 6132 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || 6133 stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || 6134 stream->signal == SIGNAL_TYPE_EDP) { 6135 // 6136 // should decide stream support vsc sdp colorimetry capability 6137 // before building vsc info packet 6138 // 6139 stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && 6140 stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED; 6141 6142 if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) 6143 tf = TRANSFER_FUNC_GAMMA_22; 6144 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); 6145 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 6146 6147 } 6148 finish: 6149 dc_sink_release(sink); 6150 6151 return stream; 6152 } 6153 6154 static enum drm_connector_status 6155 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 6156 { 6157 bool connected; 6158 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6159 6160 /* 6161 * Notes: 6162 * 1. This interface is NOT called in context of HPD irq. 6163 * 2. This interface *is called* in context of user-mode ioctl. Which 6164 * makes it a bad place for *any* MST-related activity. 6165 */ 6166 6167 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && 6168 !aconnector->fake_enable) 6169 connected = (aconnector->dc_sink != NULL); 6170 else 6171 connected = (aconnector->base.force == DRM_FORCE_ON || 6172 aconnector->base.force == DRM_FORCE_ON_DIGITAL); 6173 6174 update_subconnector_property(aconnector); 6175 6176 return (connected ? connector_status_connected : 6177 connector_status_disconnected); 6178 } 6179 6180 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 6181 struct drm_connector_state *connector_state, 6182 struct drm_property *property, 6183 uint64_t val) 6184 { 6185 struct drm_device *dev = connector->dev; 6186 struct amdgpu_device *adev = drm_to_adev(dev); 6187 struct dm_connector_state *dm_old_state = 6188 to_dm_connector_state(connector->state); 6189 struct dm_connector_state *dm_new_state = 6190 to_dm_connector_state(connector_state); 6191 6192 int ret = -EINVAL; 6193 6194 if (property == dev->mode_config.scaling_mode_property) { 6195 enum amdgpu_rmx_type rmx_type; 6196 6197 switch (val) { 6198 case DRM_MODE_SCALE_CENTER: 6199 rmx_type = RMX_CENTER; 6200 break; 6201 case DRM_MODE_SCALE_ASPECT: 6202 rmx_type = RMX_ASPECT; 6203 break; 6204 case DRM_MODE_SCALE_FULLSCREEN: 6205 rmx_type = RMX_FULL; 6206 break; 6207 case DRM_MODE_SCALE_NONE: 6208 default: 6209 rmx_type = RMX_OFF; 6210 break; 6211 } 6212 6213 if (dm_old_state->scaling == rmx_type) 6214 return 0; 6215 6216 dm_new_state->scaling = rmx_type; 6217 ret = 0; 6218 } else if (property == adev->mode_info.underscan_hborder_property) { 6219 dm_new_state->underscan_hborder = val; 6220 ret = 0; 6221 } else if (property == adev->mode_info.underscan_vborder_property) { 6222 dm_new_state->underscan_vborder = val; 6223 ret = 0; 6224 } else if (property == adev->mode_info.underscan_property) { 6225 dm_new_state->underscan_enable = val; 6226 ret = 0; 6227 } else if (property == adev->mode_info.abm_level_property) { 6228 dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE; 6229 ret = 0; 6230 } 6231 6232 return ret; 6233 } 6234 6235 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 6236 const struct drm_connector_state *state, 6237 struct drm_property *property, 6238 uint64_t *val) 6239 { 6240 struct drm_device *dev = connector->dev; 6241 struct amdgpu_device *adev = drm_to_adev(dev); 6242 struct dm_connector_state *dm_state = 6243 to_dm_connector_state(state); 6244 int ret = -EINVAL; 6245 6246 if (property == dev->mode_config.scaling_mode_property) { 6247 switch (dm_state->scaling) { 6248 case RMX_CENTER: 6249 *val = DRM_MODE_SCALE_CENTER; 6250 break; 6251 case RMX_ASPECT: 6252 *val = DRM_MODE_SCALE_ASPECT; 6253 break; 6254 case RMX_FULL: 6255 *val = DRM_MODE_SCALE_FULLSCREEN; 6256 break; 6257 case RMX_OFF: 6258 default: 6259 *val = DRM_MODE_SCALE_NONE; 6260 break; 6261 } 6262 ret = 0; 6263 } else if (property == adev->mode_info.underscan_hborder_property) { 6264 *val = dm_state->underscan_hborder; 6265 ret = 0; 6266 } else if (property == adev->mode_info.underscan_vborder_property) { 6267 *val = dm_state->underscan_vborder; 6268 ret = 0; 6269 } else if (property == adev->mode_info.underscan_property) { 6270 *val = dm_state->underscan_enable; 6271 ret = 0; 6272 } else if (property == adev->mode_info.abm_level_property) { 6273 *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ? 6274 dm_state->abm_level : 0; 6275 ret = 0; 6276 } 6277 6278 return ret; 6279 } 6280 6281 static void amdgpu_dm_connector_unregister(struct drm_connector *connector) 6282 { 6283 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 6284 6285 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); 6286 } 6287 6288 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 6289 { 6290 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6291 struct amdgpu_device *adev = drm_to_adev(connector->dev); 6292 struct amdgpu_display_manager *dm = &adev->dm; 6293 6294 /* 6295 * Call only if mst_mgr was initialized before since it's not done 6296 * for all connector types. 6297 */ 6298 if (aconnector->mst_mgr.dev) 6299 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); 6300 6301 if (aconnector->bl_idx != -1) { 6302 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]); 6303 dm->backlight_dev[aconnector->bl_idx] = NULL; 6304 } 6305 6306 if (aconnector->dc_em_sink) 6307 dc_sink_release(aconnector->dc_em_sink); 6308 aconnector->dc_em_sink = NULL; 6309 if (aconnector->dc_sink) 6310 dc_sink_release(aconnector->dc_sink); 6311 aconnector->dc_sink = NULL; 6312 6313 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 6314 drm_connector_unregister(connector); 6315 drm_connector_cleanup(connector); 6316 if (aconnector->i2c) { 6317 i2c_del_adapter(&aconnector->i2c->base); 6318 kfree(aconnector->i2c); 6319 } 6320 kfree(aconnector->dm_dp_aux.aux.name); 6321 6322 kfree(connector); 6323 } 6324 6325 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 6326 { 6327 struct dm_connector_state *state = 6328 to_dm_connector_state(connector->state); 6329 6330 if (connector->state) 6331 __drm_atomic_helper_connector_destroy_state(connector->state); 6332 6333 kfree(state); 6334 6335 state = kzalloc(sizeof(*state), GFP_KERNEL); 6336 6337 if (state) { 6338 state->scaling = RMX_OFF; 6339 state->underscan_enable = false; 6340 state->underscan_hborder = 0; 6341 state->underscan_vborder = 0; 6342 state->base.max_requested_bpc = 8; 6343 state->vcpi_slots = 0; 6344 state->pbn = 0; 6345 6346 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 6347 state->abm_level = amdgpu_dm_abm_level ?: 6348 ABM_LEVEL_IMMEDIATE_DISABLE; 6349 6350 __drm_atomic_helper_connector_reset(connector, &state->base); 6351 } 6352 } 6353 6354 struct drm_connector_state * 6355 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 6356 { 6357 struct dm_connector_state *state = 6358 to_dm_connector_state(connector->state); 6359 6360 struct dm_connector_state *new_state = 6361 kmemdup(state, sizeof(*state), GFP_KERNEL); 6362 6363 if (!new_state) 6364 return NULL; 6365 6366 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); 6367 6368 new_state->freesync_capable = state->freesync_capable; 6369 new_state->abm_level = state->abm_level; 6370 new_state->scaling = state->scaling; 6371 new_state->underscan_enable = state->underscan_enable; 6372 new_state->underscan_hborder = state->underscan_hborder; 6373 new_state->underscan_vborder = state->underscan_vborder; 6374 new_state->vcpi_slots = state->vcpi_slots; 6375 new_state->pbn = state->pbn; 6376 return &new_state->base; 6377 } 6378 6379 static int 6380 amdgpu_dm_connector_late_register(struct drm_connector *connector) 6381 { 6382 struct amdgpu_dm_connector *amdgpu_dm_connector = 6383 to_amdgpu_dm_connector(connector); 6384 int r; 6385 6386 amdgpu_dm_register_backlight_device(amdgpu_dm_connector); 6387 6388 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 6389 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 6390 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; 6391 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); 6392 if (r) 6393 return r; 6394 } 6395 6396 #if defined(CONFIG_DEBUG_FS) 6397 connector_debugfs_init(amdgpu_dm_connector); 6398 #endif 6399 6400 return 0; 6401 } 6402 6403 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) 6404 { 6405 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6406 struct dc_link *dc_link = aconnector->dc_link; 6407 struct dc_sink *dc_em_sink = aconnector->dc_em_sink; 6408 struct edid *edid; 6409 6410 if (!connector->edid_override) 6411 return; 6412 6413 drm_edid_override_connector_update(&aconnector->base); 6414 edid = aconnector->base.edid_blob_ptr->data; 6415 aconnector->edid = edid; 6416 6417 /* Update emulated (virtual) sink's EDID */ 6418 if (dc_em_sink && dc_link) { 6419 memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); 6420 memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH); 6421 dm_helpers_parse_edid_caps( 6422 dc_link, 6423 &dc_em_sink->dc_edid, 6424 &dc_em_sink->edid_caps); 6425 } 6426 } 6427 6428 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 6429 .reset = amdgpu_dm_connector_funcs_reset, 6430 .detect = amdgpu_dm_connector_detect, 6431 .fill_modes = drm_helper_probe_single_connector_modes, 6432 .destroy = amdgpu_dm_connector_destroy, 6433 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 6434 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6435 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 6436 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 6437 .late_register = amdgpu_dm_connector_late_register, 6438 .early_unregister = amdgpu_dm_connector_unregister, 6439 .force = amdgpu_dm_connector_funcs_force 6440 }; 6441 6442 static int get_modes(struct drm_connector *connector) 6443 { 6444 return amdgpu_dm_connector_get_modes(connector); 6445 } 6446 6447 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 6448 { 6449 struct dc_sink_init_data init_params = { 6450 .link = aconnector->dc_link, 6451 .sink_signal = SIGNAL_TYPE_VIRTUAL 6452 }; 6453 struct edid *edid; 6454 6455 if (!aconnector->base.edid_blob_ptr) { 6456 /* if connector->edid_override valid, pass 6457 * it to edid_override to edid_blob_ptr 6458 */ 6459 6460 drm_edid_override_connector_update(&aconnector->base); 6461 6462 if (!aconnector->base.edid_blob_ptr) { 6463 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", 6464 aconnector->base.name); 6465 6466 aconnector->base.force = DRM_FORCE_OFF; 6467 return; 6468 } 6469 } 6470 6471 edid = (struct edid *) aconnector->base.edid_blob_ptr->data; 6472 6473 aconnector->edid = edid; 6474 6475 aconnector->dc_em_sink = dc_link_add_remote_sink( 6476 aconnector->dc_link, 6477 (uint8_t *)edid, 6478 (edid->extensions + 1) * EDID_LENGTH, 6479 &init_params); 6480 6481 if (aconnector->base.force == DRM_FORCE_ON) { 6482 aconnector->dc_sink = aconnector->dc_link->local_sink ? 6483 aconnector->dc_link->local_sink : 6484 aconnector->dc_em_sink; 6485 dc_sink_retain(aconnector->dc_sink); 6486 } 6487 } 6488 6489 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 6490 { 6491 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 6492 6493 /* 6494 * In case of headless boot with force on for DP managed connector 6495 * Those settings have to be != 0 to get initial modeset 6496 */ 6497 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 6498 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 6499 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 6500 } 6501 6502 create_eml_sink(aconnector); 6503 } 6504 6505 static enum dc_status dm_validate_stream_and_context(struct dc *dc, 6506 struct dc_stream_state *stream) 6507 { 6508 enum dc_status dc_result = DC_ERROR_UNEXPECTED; 6509 struct dc_plane_state *dc_plane_state = NULL; 6510 struct dc_state *dc_state = NULL; 6511 6512 if (!stream) 6513 goto cleanup; 6514 6515 dc_plane_state = dc_create_plane_state(dc); 6516 if (!dc_plane_state) 6517 goto cleanup; 6518 6519 dc_state = dc_create_state(dc); 6520 if (!dc_state) 6521 goto cleanup; 6522 6523 /* populate stream to plane */ 6524 dc_plane_state->src_rect.height = stream->src.height; 6525 dc_plane_state->src_rect.width = stream->src.width; 6526 dc_plane_state->dst_rect.height = stream->src.height; 6527 dc_plane_state->dst_rect.width = stream->src.width; 6528 dc_plane_state->clip_rect.height = stream->src.height; 6529 dc_plane_state->clip_rect.width = stream->src.width; 6530 dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256; 6531 dc_plane_state->plane_size.surface_size.height = stream->src.height; 6532 dc_plane_state->plane_size.surface_size.width = stream->src.width; 6533 dc_plane_state->plane_size.chroma_size.height = stream->src.height; 6534 dc_plane_state->plane_size.chroma_size.width = stream->src.width; 6535 dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 6536 dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; 6537 dc_plane_state->rotation = ROTATION_ANGLE_0; 6538 dc_plane_state->is_tiling_rotated = false; 6539 dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL; 6540 6541 dc_result = dc_validate_stream(dc, stream); 6542 if (dc_result == DC_OK) 6543 dc_result = dc_validate_plane(dc, dc_plane_state); 6544 6545 if (dc_result == DC_OK) 6546 dc_result = dc_add_stream_to_ctx(dc, dc_state, stream); 6547 6548 if (dc_result == DC_OK && !dc_add_plane_to_context( 6549 dc, 6550 stream, 6551 dc_plane_state, 6552 dc_state)) 6553 dc_result = DC_FAIL_ATTACH_SURFACES; 6554 6555 if (dc_result == DC_OK) 6556 dc_result = dc_validate_global_state(dc, dc_state, true); 6557 6558 cleanup: 6559 if (dc_state) 6560 dc_release_state(dc_state); 6561 6562 if (dc_plane_state) 6563 dc_plane_state_release(dc_plane_state); 6564 6565 return dc_result; 6566 } 6567 6568 struct dc_stream_state * 6569 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 6570 const struct drm_display_mode *drm_mode, 6571 const struct dm_connector_state *dm_state, 6572 const struct dc_stream_state *old_stream) 6573 { 6574 struct drm_connector *connector = &aconnector->base; 6575 struct amdgpu_device *adev = drm_to_adev(connector->dev); 6576 struct dc_stream_state *stream; 6577 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; 6578 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; 6579 enum dc_status dc_result = DC_OK; 6580 6581 do { 6582 stream = create_stream_for_sink(aconnector, drm_mode, 6583 dm_state, old_stream, 6584 requested_bpc); 6585 if (stream == NULL) { 6586 DRM_ERROR("Failed to create stream for sink!\n"); 6587 break; 6588 } 6589 6590 dc_result = dc_validate_stream(adev->dm.dc, stream); 6591 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 6592 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); 6593 6594 if (dc_result == DC_OK) 6595 dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); 6596 6597 if (dc_result != DC_OK) { 6598 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", 6599 drm_mode->hdisplay, 6600 drm_mode->vdisplay, 6601 drm_mode->clock, 6602 dc_result, 6603 dc_status_to_str(dc_result)); 6604 6605 dc_stream_release(stream); 6606 stream = NULL; 6607 requested_bpc -= 2; /* lower bpc to retry validation */ 6608 } 6609 6610 } while (stream == NULL && requested_bpc >= 6); 6611 6612 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { 6613 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); 6614 6615 aconnector->force_yuv420_output = true; 6616 stream = create_validate_stream_for_sink(aconnector, drm_mode, 6617 dm_state, old_stream); 6618 aconnector->force_yuv420_output = false; 6619 } 6620 6621 return stream; 6622 } 6623 6624 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 6625 struct drm_display_mode *mode) 6626 { 6627 int result = MODE_ERROR; 6628 struct dc_sink *dc_sink; 6629 /* TODO: Unhardcode stream count */ 6630 struct dc_stream_state *stream; 6631 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6632 6633 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 6634 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 6635 return result; 6636 6637 /* 6638 * Only run this the first time mode_valid is called to initilialize 6639 * EDID mgmt 6640 */ 6641 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 6642 !aconnector->dc_em_sink) 6643 handle_edid_mgmt(aconnector); 6644 6645 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 6646 6647 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && 6648 aconnector->base.force != DRM_FORCE_ON) { 6649 DRM_ERROR("dc_sink is NULL!\n"); 6650 goto fail; 6651 } 6652 6653 drm_mode_set_crtcinfo(mode, 0); 6654 6655 stream = create_validate_stream_for_sink(aconnector, mode, 6656 to_dm_connector_state(connector->state), 6657 NULL); 6658 if (stream) { 6659 dc_stream_release(stream); 6660 result = MODE_OK; 6661 } 6662 6663 fail: 6664 /* TODO: error handling*/ 6665 return result; 6666 } 6667 6668 static int fill_hdr_info_packet(const struct drm_connector_state *state, 6669 struct dc_info_packet *out) 6670 { 6671 struct hdmi_drm_infoframe frame; 6672 unsigned char buf[30]; /* 26 + 4 */ 6673 ssize_t len; 6674 int ret, i; 6675 6676 memset(out, 0, sizeof(*out)); 6677 6678 if (!state->hdr_output_metadata) 6679 return 0; 6680 6681 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); 6682 if (ret) 6683 return ret; 6684 6685 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); 6686 if (len < 0) 6687 return (int)len; 6688 6689 /* Static metadata is a fixed 26 bytes + 4 byte header. */ 6690 if (len != 30) 6691 return -EINVAL; 6692 6693 /* Prepare the infopacket for DC. */ 6694 switch (state->connector->connector_type) { 6695 case DRM_MODE_CONNECTOR_HDMIA: 6696 out->hb0 = 0x87; /* type */ 6697 out->hb1 = 0x01; /* version */ 6698 out->hb2 = 0x1A; /* length */ 6699 out->sb[0] = buf[3]; /* checksum */ 6700 i = 1; 6701 break; 6702 6703 case DRM_MODE_CONNECTOR_DisplayPort: 6704 case DRM_MODE_CONNECTOR_eDP: 6705 out->hb0 = 0x00; /* sdp id, zero */ 6706 out->hb1 = 0x87; /* type */ 6707 out->hb2 = 0x1D; /* payload len - 1 */ 6708 out->hb3 = (0x13 << 2); /* sdp version */ 6709 out->sb[0] = 0x01; /* version */ 6710 out->sb[1] = 0x1A; /* length */ 6711 i = 2; 6712 break; 6713 6714 default: 6715 return -EINVAL; 6716 } 6717 6718 memcpy(&out->sb[i], &buf[4], 26); 6719 out->valid = true; 6720 6721 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, 6722 sizeof(out->sb), false); 6723 6724 return 0; 6725 } 6726 6727 static int 6728 amdgpu_dm_connector_atomic_check(struct drm_connector *conn, 6729 struct drm_atomic_state *state) 6730 { 6731 struct drm_connector_state *new_con_state = 6732 drm_atomic_get_new_connector_state(state, conn); 6733 struct drm_connector_state *old_con_state = 6734 drm_atomic_get_old_connector_state(state, conn); 6735 struct drm_crtc *crtc = new_con_state->crtc; 6736 struct drm_crtc_state *new_crtc_state; 6737 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); 6738 int ret; 6739 6740 trace_amdgpu_dm_connector_atomic_check(new_con_state); 6741 6742 if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 6743 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr); 6744 if (ret < 0) 6745 return ret; 6746 } 6747 6748 if (!crtc) 6749 return 0; 6750 6751 if (new_con_state->colorspace != old_con_state->colorspace) { 6752 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 6753 if (IS_ERR(new_crtc_state)) 6754 return PTR_ERR(new_crtc_state); 6755 6756 new_crtc_state->mode_changed = true; 6757 } 6758 6759 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { 6760 struct dc_info_packet hdr_infopacket; 6761 6762 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); 6763 if (ret) 6764 return ret; 6765 6766 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 6767 if (IS_ERR(new_crtc_state)) 6768 return PTR_ERR(new_crtc_state); 6769 6770 /* 6771 * DC considers the stream backends changed if the 6772 * static metadata changes. Forcing the modeset also 6773 * gives a simple way for userspace to switch from 6774 * 8bpc to 10bpc when setting the metadata to enter 6775 * or exit HDR. 6776 * 6777 * Changing the static metadata after it's been 6778 * set is permissible, however. So only force a 6779 * modeset if we're entering or exiting HDR. 6780 */ 6781 new_crtc_state->mode_changed = new_crtc_state->mode_changed || 6782 !old_con_state->hdr_output_metadata || 6783 !new_con_state->hdr_output_metadata; 6784 } 6785 6786 return 0; 6787 } 6788 6789 static const struct drm_connector_helper_funcs 6790 amdgpu_dm_connector_helper_funcs = { 6791 /* 6792 * If hotplugging a second bigger display in FB Con mode, bigger resolution 6793 * modes will be filtered by drm_mode_validate_size(), and those modes 6794 * are missing after user start lightdm. So we need to renew modes list. 6795 * in get_modes call back, not just return the modes count 6796 */ 6797 .get_modes = get_modes, 6798 .mode_valid = amdgpu_dm_connector_mode_valid, 6799 .atomic_check = amdgpu_dm_connector_atomic_check, 6800 }; 6801 6802 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 6803 { 6804 6805 } 6806 6807 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) 6808 { 6809 switch (display_color_depth) { 6810 case COLOR_DEPTH_666: 6811 return 6; 6812 case COLOR_DEPTH_888: 6813 return 8; 6814 case COLOR_DEPTH_101010: 6815 return 10; 6816 case COLOR_DEPTH_121212: 6817 return 12; 6818 case COLOR_DEPTH_141414: 6819 return 14; 6820 case COLOR_DEPTH_161616: 6821 return 16; 6822 default: 6823 break; 6824 } 6825 return 0; 6826 } 6827 6828 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 6829 struct drm_crtc_state *crtc_state, 6830 struct drm_connector_state *conn_state) 6831 { 6832 struct drm_atomic_state *state = crtc_state->state; 6833 struct drm_connector *connector = conn_state->connector; 6834 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6835 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); 6836 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 6837 struct drm_dp_mst_topology_mgr *mst_mgr; 6838 struct drm_dp_mst_port *mst_port; 6839 struct drm_dp_mst_topology_state *mst_state; 6840 enum dc_color_depth color_depth; 6841 int clock, bpp = 0; 6842 bool is_y420 = false; 6843 6844 if (!aconnector->mst_output_port) 6845 return 0; 6846 6847 mst_port = aconnector->mst_output_port; 6848 mst_mgr = &aconnector->mst_root->mst_mgr; 6849 6850 if (!crtc_state->connectors_changed && !crtc_state->mode_changed) 6851 return 0; 6852 6853 mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr); 6854 if (IS_ERR(mst_state)) 6855 return PTR_ERR(mst_state); 6856 6857 mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); 6858 6859 if (!state->duplicated) { 6860 int max_bpc = conn_state->max_requested_bpc; 6861 6862 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && 6863 aconnector->force_yuv420_output; 6864 color_depth = convert_color_depth_from_display_info(connector, 6865 is_y420, 6866 max_bpc); 6867 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 6868 clock = adjusted_mode->clock; 6869 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4); 6870 } 6871 6872 dm_new_connector_state->vcpi_slots = 6873 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port, 6874 dm_new_connector_state->pbn); 6875 if (dm_new_connector_state->vcpi_slots < 0) { 6876 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); 6877 return dm_new_connector_state->vcpi_slots; 6878 } 6879 return 0; 6880 } 6881 6882 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 6883 .disable = dm_encoder_helper_disable, 6884 .atomic_check = dm_encoder_helper_atomic_check 6885 }; 6886 6887 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, 6888 struct dc_state *dc_state, 6889 struct dsc_mst_fairness_vars *vars) 6890 { 6891 struct dc_stream_state *stream = NULL; 6892 struct drm_connector *connector; 6893 struct drm_connector_state *new_con_state; 6894 struct amdgpu_dm_connector *aconnector; 6895 struct dm_connector_state *dm_conn_state; 6896 int i, j, ret; 6897 int vcpi, pbn_div, pbn, slot_num = 0; 6898 6899 for_each_new_connector_in_state(state, connector, new_con_state, i) { 6900 6901 aconnector = to_amdgpu_dm_connector(connector); 6902 6903 if (!aconnector->mst_output_port) 6904 continue; 6905 6906 if (!new_con_state || !new_con_state->crtc) 6907 continue; 6908 6909 dm_conn_state = to_dm_connector_state(new_con_state); 6910 6911 for (j = 0; j < dc_state->stream_count; j++) { 6912 stream = dc_state->streams[j]; 6913 if (!stream) 6914 continue; 6915 6916 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector) 6917 break; 6918 6919 stream = NULL; 6920 } 6921 6922 if (!stream) 6923 continue; 6924 6925 pbn_div = dm_mst_get_pbn_divider(stream->link); 6926 /* pbn is calculated by compute_mst_dsc_configs_for_state*/ 6927 for (j = 0; j < dc_state->stream_count; j++) { 6928 if (vars[j].aconnector == aconnector) { 6929 pbn = vars[j].pbn; 6930 break; 6931 } 6932 } 6933 6934 if (j == dc_state->stream_count) 6935 continue; 6936 6937 slot_num = DIV_ROUND_UP(pbn, pbn_div); 6938 6939 if (stream->timing.flags.DSC != 1) { 6940 dm_conn_state->pbn = pbn; 6941 dm_conn_state->vcpi_slots = slot_num; 6942 6943 ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, 6944 dm_conn_state->pbn, false); 6945 if (ret < 0) 6946 return ret; 6947 6948 continue; 6949 } 6950 6951 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true); 6952 if (vcpi < 0) 6953 return vcpi; 6954 6955 dm_conn_state->pbn = pbn; 6956 dm_conn_state->vcpi_slots = vcpi; 6957 } 6958 return 0; 6959 } 6960 6961 static int to_drm_connector_type(enum signal_type st) 6962 { 6963 switch (st) { 6964 case SIGNAL_TYPE_HDMI_TYPE_A: 6965 return DRM_MODE_CONNECTOR_HDMIA; 6966 case SIGNAL_TYPE_EDP: 6967 return DRM_MODE_CONNECTOR_eDP; 6968 case SIGNAL_TYPE_LVDS: 6969 return DRM_MODE_CONNECTOR_LVDS; 6970 case SIGNAL_TYPE_RGB: 6971 return DRM_MODE_CONNECTOR_VGA; 6972 case SIGNAL_TYPE_DISPLAY_PORT: 6973 case SIGNAL_TYPE_DISPLAY_PORT_MST: 6974 return DRM_MODE_CONNECTOR_DisplayPort; 6975 case SIGNAL_TYPE_DVI_DUAL_LINK: 6976 case SIGNAL_TYPE_DVI_SINGLE_LINK: 6977 return DRM_MODE_CONNECTOR_DVID; 6978 case SIGNAL_TYPE_VIRTUAL: 6979 return DRM_MODE_CONNECTOR_VIRTUAL; 6980 6981 default: 6982 return DRM_MODE_CONNECTOR_Unknown; 6983 } 6984 } 6985 6986 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) 6987 { 6988 struct drm_encoder *encoder; 6989 6990 /* There is only one encoder per connector */ 6991 drm_connector_for_each_possible_encoder(connector, encoder) 6992 return encoder; 6993 6994 return NULL; 6995 } 6996 6997 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 6998 { 6999 struct drm_encoder *encoder; 7000 struct amdgpu_encoder *amdgpu_encoder; 7001 7002 encoder = amdgpu_dm_connector_to_encoder(connector); 7003 7004 if (encoder == NULL) 7005 return; 7006 7007 amdgpu_encoder = to_amdgpu_encoder(encoder); 7008 7009 amdgpu_encoder->native_mode.clock = 0; 7010 7011 if (!list_empty(&connector->probed_modes)) { 7012 struct drm_display_mode *preferred_mode = NULL; 7013 7014 list_for_each_entry(preferred_mode, 7015 &connector->probed_modes, 7016 head) { 7017 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 7018 amdgpu_encoder->native_mode = *preferred_mode; 7019 7020 break; 7021 } 7022 7023 } 7024 } 7025 7026 static struct drm_display_mode * 7027 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 7028 char *name, 7029 int hdisplay, int vdisplay) 7030 { 7031 struct drm_device *dev = encoder->dev; 7032 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7033 struct drm_display_mode *mode = NULL; 7034 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7035 7036 mode = drm_mode_duplicate(dev, native_mode); 7037 7038 if (mode == NULL) 7039 return NULL; 7040 7041 mode->hdisplay = hdisplay; 7042 mode->vdisplay = vdisplay; 7043 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 7044 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 7045 7046 return mode; 7047 7048 } 7049 7050 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 7051 struct drm_connector *connector) 7052 { 7053 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7054 struct drm_display_mode *mode = NULL; 7055 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7056 struct amdgpu_dm_connector *amdgpu_dm_connector = 7057 to_amdgpu_dm_connector(connector); 7058 int i; 7059 int n; 7060 struct mode_size { 7061 char name[DRM_DISPLAY_MODE_LEN]; 7062 int w; 7063 int h; 7064 } common_modes[] = { 7065 { "640x480", 640, 480}, 7066 { "800x600", 800, 600}, 7067 { "1024x768", 1024, 768}, 7068 { "1280x720", 1280, 720}, 7069 { "1280x800", 1280, 800}, 7070 {"1280x1024", 1280, 1024}, 7071 { "1440x900", 1440, 900}, 7072 {"1680x1050", 1680, 1050}, 7073 {"1600x1200", 1600, 1200}, 7074 {"1920x1080", 1920, 1080}, 7075 {"1920x1200", 1920, 1200} 7076 }; 7077 7078 n = ARRAY_SIZE(common_modes); 7079 7080 for (i = 0; i < n; i++) { 7081 struct drm_display_mode *curmode = NULL; 7082 bool mode_existed = false; 7083 7084 if (common_modes[i].w > native_mode->hdisplay || 7085 common_modes[i].h > native_mode->vdisplay || 7086 (common_modes[i].w == native_mode->hdisplay && 7087 common_modes[i].h == native_mode->vdisplay)) 7088 continue; 7089 7090 list_for_each_entry(curmode, &connector->probed_modes, head) { 7091 if (common_modes[i].w == curmode->hdisplay && 7092 common_modes[i].h == curmode->vdisplay) { 7093 mode_existed = true; 7094 break; 7095 } 7096 } 7097 7098 if (mode_existed) 7099 continue; 7100 7101 mode = amdgpu_dm_create_common_mode(encoder, 7102 common_modes[i].name, common_modes[i].w, 7103 common_modes[i].h); 7104 if (!mode) 7105 continue; 7106 7107 drm_mode_probed_add(connector, mode); 7108 amdgpu_dm_connector->num_modes++; 7109 } 7110 } 7111 7112 static void amdgpu_set_panel_orientation(struct drm_connector *connector) 7113 { 7114 struct drm_encoder *encoder; 7115 struct amdgpu_encoder *amdgpu_encoder; 7116 const struct drm_display_mode *native_mode; 7117 7118 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && 7119 connector->connector_type != DRM_MODE_CONNECTOR_LVDS) 7120 return; 7121 7122 mutex_lock(&connector->dev->mode_config.mutex); 7123 amdgpu_dm_connector_get_modes(connector); 7124 mutex_unlock(&connector->dev->mode_config.mutex); 7125 7126 encoder = amdgpu_dm_connector_to_encoder(connector); 7127 if (!encoder) 7128 return; 7129 7130 amdgpu_encoder = to_amdgpu_encoder(encoder); 7131 7132 native_mode = &amdgpu_encoder->native_mode; 7133 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) 7134 return; 7135 7136 drm_connector_set_panel_orientation_with_quirk(connector, 7137 DRM_MODE_PANEL_ORIENTATION_UNKNOWN, 7138 native_mode->hdisplay, 7139 native_mode->vdisplay); 7140 } 7141 7142 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 7143 struct edid *edid) 7144 { 7145 struct amdgpu_dm_connector *amdgpu_dm_connector = 7146 to_amdgpu_dm_connector(connector); 7147 7148 if (edid) { 7149 /* empty probed_modes */ 7150 INIT_LIST_HEAD(&connector->probed_modes); 7151 amdgpu_dm_connector->num_modes = 7152 drm_add_edid_modes(connector, edid); 7153 7154 /* sorting the probed modes before calling function 7155 * amdgpu_dm_get_native_mode() since EDID can have 7156 * more than one preferred mode. The modes that are 7157 * later in the probed mode list could be of higher 7158 * and preferred resolution. For example, 3840x2160 7159 * resolution in base EDID preferred timing and 4096x2160 7160 * preferred resolution in DID extension block later. 7161 */ 7162 drm_mode_sort(&connector->probed_modes); 7163 amdgpu_dm_get_native_mode(connector); 7164 7165 /* Freesync capabilities are reset by calling 7166 * drm_add_edid_modes() and need to be 7167 * restored here. 7168 */ 7169 amdgpu_dm_update_freesync_caps(connector, edid); 7170 } else { 7171 amdgpu_dm_connector->num_modes = 0; 7172 } 7173 } 7174 7175 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, 7176 struct drm_display_mode *mode) 7177 { 7178 struct drm_display_mode *m; 7179 7180 list_for_each_entry(m, &aconnector->base.probed_modes, head) { 7181 if (drm_mode_equal(m, mode)) 7182 return true; 7183 } 7184 7185 return false; 7186 } 7187 7188 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) 7189 { 7190 const struct drm_display_mode *m; 7191 struct drm_display_mode *new_mode; 7192 uint i; 7193 u32 new_modes_count = 0; 7194 7195 /* Standard FPS values 7196 * 7197 * 23.976 - TV/NTSC 7198 * 24 - Cinema 7199 * 25 - TV/PAL 7200 * 29.97 - TV/NTSC 7201 * 30 - TV/NTSC 7202 * 48 - Cinema HFR 7203 * 50 - TV/PAL 7204 * 60 - Commonly used 7205 * 48,72,96,120 - Multiples of 24 7206 */ 7207 static const u32 common_rates[] = { 7208 23976, 24000, 25000, 29970, 30000, 7209 48000, 50000, 60000, 72000, 96000, 120000 7210 }; 7211 7212 /* 7213 * Find mode with highest refresh rate with the same resolution 7214 * as the preferred mode. Some monitors report a preferred mode 7215 * with lower resolution than the highest refresh rate supported. 7216 */ 7217 7218 m = get_highest_refresh_rate_mode(aconnector, true); 7219 if (!m) 7220 return 0; 7221 7222 for (i = 0; i < ARRAY_SIZE(common_rates); i++) { 7223 u64 target_vtotal, target_vtotal_diff; 7224 u64 num, den; 7225 7226 if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) 7227 continue; 7228 7229 if (common_rates[i] < aconnector->min_vfreq * 1000 || 7230 common_rates[i] > aconnector->max_vfreq * 1000) 7231 continue; 7232 7233 num = (unsigned long long)m->clock * 1000 * 1000; 7234 den = common_rates[i] * (unsigned long long)m->htotal; 7235 target_vtotal = div_u64(num, den); 7236 target_vtotal_diff = target_vtotal - m->vtotal; 7237 7238 /* Check for illegal modes */ 7239 if (m->vsync_start + target_vtotal_diff < m->vdisplay || 7240 m->vsync_end + target_vtotal_diff < m->vsync_start || 7241 m->vtotal + target_vtotal_diff < m->vsync_end) 7242 continue; 7243 7244 new_mode = drm_mode_duplicate(aconnector->base.dev, m); 7245 if (!new_mode) 7246 goto out; 7247 7248 new_mode->vtotal += (u16)target_vtotal_diff; 7249 new_mode->vsync_start += (u16)target_vtotal_diff; 7250 new_mode->vsync_end += (u16)target_vtotal_diff; 7251 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; 7252 new_mode->type |= DRM_MODE_TYPE_DRIVER; 7253 7254 if (!is_duplicate_mode(aconnector, new_mode)) { 7255 drm_mode_probed_add(&aconnector->base, new_mode); 7256 new_modes_count += 1; 7257 } else 7258 drm_mode_destroy(aconnector->base.dev, new_mode); 7259 } 7260 out: 7261 return new_modes_count; 7262 } 7263 7264 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, 7265 struct edid *edid) 7266 { 7267 struct amdgpu_dm_connector *amdgpu_dm_connector = 7268 to_amdgpu_dm_connector(connector); 7269 7270 if (!edid) 7271 return; 7272 7273 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 7274 amdgpu_dm_connector->num_modes += 7275 add_fs_modes(amdgpu_dm_connector); 7276 } 7277 7278 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 7279 { 7280 struct amdgpu_dm_connector *amdgpu_dm_connector = 7281 to_amdgpu_dm_connector(connector); 7282 struct drm_encoder *encoder; 7283 struct edid *edid = amdgpu_dm_connector->edid; 7284 struct dc_link_settings *verified_link_cap = 7285 &amdgpu_dm_connector->dc_link->verified_link_cap; 7286 const struct dc *dc = amdgpu_dm_connector->dc_link->dc; 7287 7288 encoder = amdgpu_dm_connector_to_encoder(connector); 7289 7290 if (!drm_edid_is_valid(edid)) { 7291 amdgpu_dm_connector->num_modes = 7292 drm_add_modes_noedid(connector, 640, 480); 7293 if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) 7294 amdgpu_dm_connector->num_modes += 7295 drm_add_modes_noedid(connector, 1920, 1080); 7296 } else { 7297 amdgpu_dm_connector_ddc_get_modes(connector, edid); 7298 amdgpu_dm_connector_add_common_modes(encoder, connector); 7299 amdgpu_dm_connector_add_freesync_modes(connector, edid); 7300 } 7301 amdgpu_dm_fbc_init(connector); 7302 7303 return amdgpu_dm_connector->num_modes; 7304 } 7305 7306 static const u32 supported_colorspaces = 7307 BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | 7308 BIT(DRM_MODE_COLORIMETRY_OPRGB) | 7309 BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | 7310 BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); 7311 7312 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 7313 struct amdgpu_dm_connector *aconnector, 7314 int connector_type, 7315 struct dc_link *link, 7316 int link_index) 7317 { 7318 struct amdgpu_device *adev = drm_to_adev(dm->ddev); 7319 7320 /* 7321 * Some of the properties below require access to state, like bpc. 7322 * Allocate some default initial connector state with our reset helper. 7323 */ 7324 if (aconnector->base.funcs->reset) 7325 aconnector->base.funcs->reset(&aconnector->base); 7326 7327 aconnector->connector_id = link_index; 7328 aconnector->bl_idx = -1; 7329 aconnector->dc_link = link; 7330 aconnector->base.interlace_allowed = false; 7331 aconnector->base.doublescan_allowed = false; 7332 aconnector->base.stereo_allowed = false; 7333 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 7334 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 7335 aconnector->audio_inst = -1; 7336 aconnector->pack_sdp_v1_3 = false; 7337 aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; 7338 memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); 7339 mutex_init(&aconnector->hpd_lock); 7340 mutex_init(&aconnector->handle_mst_msg_ready); 7341 7342 /* 7343 * configure support HPD hot plug connector_>polled default value is 0 7344 * which means HPD hot plug not supported 7345 */ 7346 switch (connector_type) { 7347 case DRM_MODE_CONNECTOR_HDMIA: 7348 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 7349 aconnector->base.ycbcr_420_allowed = 7350 link->link_enc->features.hdmi_ycbcr420_supported ? true : false; 7351 break; 7352 case DRM_MODE_CONNECTOR_DisplayPort: 7353 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 7354 link->link_enc = link_enc_cfg_get_link_enc(link); 7355 ASSERT(link->link_enc); 7356 if (link->link_enc) 7357 aconnector->base.ycbcr_420_allowed = 7358 link->link_enc->features.dp_ycbcr420_supported ? true : false; 7359 break; 7360 case DRM_MODE_CONNECTOR_DVID: 7361 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 7362 break; 7363 default: 7364 break; 7365 } 7366 7367 drm_object_attach_property(&aconnector->base.base, 7368 dm->ddev->mode_config.scaling_mode_property, 7369 DRM_MODE_SCALE_NONE); 7370 7371 drm_object_attach_property(&aconnector->base.base, 7372 adev->mode_info.underscan_property, 7373 UNDERSCAN_OFF); 7374 drm_object_attach_property(&aconnector->base.base, 7375 adev->mode_info.underscan_hborder_property, 7376 0); 7377 drm_object_attach_property(&aconnector->base.base, 7378 adev->mode_info.underscan_vborder_property, 7379 0); 7380 7381 if (!aconnector->mst_root) 7382 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 7383 7384 aconnector->base.state->max_bpc = 16; 7385 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; 7386 7387 if (connector_type == DRM_MODE_CONNECTOR_eDP && 7388 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) { 7389 drm_object_attach_property(&aconnector->base.base, 7390 adev->mode_info.abm_level_property, 0); 7391 } 7392 7393 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 7394 if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) 7395 drm_connector_attach_colorspace_property(&aconnector->base); 7396 } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || 7397 connector_type == DRM_MODE_CONNECTOR_eDP) { 7398 if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) 7399 drm_connector_attach_colorspace_property(&aconnector->base); 7400 } 7401 7402 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 7403 connector_type == DRM_MODE_CONNECTOR_DisplayPort || 7404 connector_type == DRM_MODE_CONNECTOR_eDP) { 7405 drm_connector_attach_hdr_output_metadata_property(&aconnector->base); 7406 7407 if (!aconnector->mst_root) 7408 drm_connector_attach_vrr_capable_property(&aconnector->base); 7409 7410 if (adev->dm.hdcp_workqueue) 7411 drm_connector_attach_content_protection_property(&aconnector->base, true); 7412 } 7413 } 7414 7415 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 7416 struct i2c_msg *msgs, int num) 7417 { 7418 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 7419 struct ddc_service *ddc_service = i2c->ddc_service; 7420 struct i2c_command cmd; 7421 int i; 7422 int result = -EIO; 7423 7424 if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) 7425 return result; 7426 7427 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); 7428 7429 if (!cmd.payloads) 7430 return result; 7431 7432 cmd.number_of_payloads = num; 7433 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 7434 cmd.speed = 100; 7435 7436 for (i = 0; i < num; i++) { 7437 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 7438 cmd.payloads[i].address = msgs[i].addr; 7439 cmd.payloads[i].length = msgs[i].len; 7440 cmd.payloads[i].data = msgs[i].buf; 7441 } 7442 7443 if (dc_submit_i2c( 7444 ddc_service->ctx->dc, 7445 ddc_service->link->link_index, 7446 &cmd)) 7447 result = num; 7448 7449 kfree(cmd.payloads); 7450 return result; 7451 } 7452 7453 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 7454 { 7455 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 7456 } 7457 7458 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 7459 .master_xfer = amdgpu_dm_i2c_xfer, 7460 .functionality = amdgpu_dm_i2c_func, 7461 }; 7462 7463 static struct amdgpu_i2c_adapter * 7464 create_i2c(struct ddc_service *ddc_service, 7465 int link_index, 7466 int *res) 7467 { 7468 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 7469 struct amdgpu_i2c_adapter *i2c; 7470 7471 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); 7472 if (!i2c) 7473 return NULL; 7474 i2c->base.owner = THIS_MODULE; 7475 i2c->base.class = I2C_CLASS_DDC; 7476 i2c->base.dev.parent = &adev->pdev->dev; 7477 i2c->base.algo = &amdgpu_dm_i2c_algo; 7478 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); 7479 i2c_set_adapdata(&i2c->base, i2c); 7480 i2c->ddc_service = ddc_service; 7481 7482 return i2c; 7483 } 7484 7485 7486 /* 7487 * Note: this function assumes that dc_link_detect() was called for the 7488 * dc_link which will be represented by this aconnector. 7489 */ 7490 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 7491 struct amdgpu_dm_connector *aconnector, 7492 u32 link_index, 7493 struct amdgpu_encoder *aencoder) 7494 { 7495 int res = 0; 7496 int connector_type; 7497 struct dc *dc = dm->dc; 7498 struct dc_link *link = dc_get_link_at_index(dc, link_index); 7499 struct amdgpu_i2c_adapter *i2c; 7500 7501 link->priv = aconnector; 7502 7503 7504 i2c = create_i2c(link->ddc, link->link_index, &res); 7505 if (!i2c) { 7506 DRM_ERROR("Failed to create i2c adapter data\n"); 7507 return -ENOMEM; 7508 } 7509 7510 aconnector->i2c = i2c; 7511 res = i2c_add_adapter(&i2c->base); 7512 7513 if (res) { 7514 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); 7515 goto out_free; 7516 } 7517 7518 connector_type = to_drm_connector_type(link->connector_signal); 7519 7520 res = drm_connector_init_with_ddc( 7521 dm->ddev, 7522 &aconnector->base, 7523 &amdgpu_dm_connector_funcs, 7524 connector_type, 7525 &i2c->base); 7526 7527 if (res) { 7528 DRM_ERROR("connector_init failed\n"); 7529 aconnector->connector_id = -1; 7530 goto out_free; 7531 } 7532 7533 drm_connector_helper_add( 7534 &aconnector->base, 7535 &amdgpu_dm_connector_helper_funcs); 7536 7537 amdgpu_dm_connector_init_helper( 7538 dm, 7539 aconnector, 7540 connector_type, 7541 link, 7542 link_index); 7543 7544 drm_connector_attach_encoder( 7545 &aconnector->base, &aencoder->base); 7546 7547 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 7548 || connector_type == DRM_MODE_CONNECTOR_eDP) 7549 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); 7550 7551 out_free: 7552 if (res) { 7553 kfree(i2c); 7554 aconnector->i2c = NULL; 7555 } 7556 return res; 7557 } 7558 7559 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 7560 { 7561 switch (adev->mode_info.num_crtc) { 7562 case 1: 7563 return 0x1; 7564 case 2: 7565 return 0x3; 7566 case 3: 7567 return 0x7; 7568 case 4: 7569 return 0xf; 7570 case 5: 7571 return 0x1f; 7572 case 6: 7573 default: 7574 return 0x3f; 7575 } 7576 } 7577 7578 static int amdgpu_dm_encoder_init(struct drm_device *dev, 7579 struct amdgpu_encoder *aencoder, 7580 uint32_t link_index) 7581 { 7582 struct amdgpu_device *adev = drm_to_adev(dev); 7583 7584 int res = drm_encoder_init(dev, 7585 &aencoder->base, 7586 &amdgpu_dm_encoder_funcs, 7587 DRM_MODE_ENCODER_TMDS, 7588 NULL); 7589 7590 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 7591 7592 if (!res) 7593 aencoder->encoder_id = link_index; 7594 else 7595 aencoder->encoder_id = -1; 7596 7597 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 7598 7599 return res; 7600 } 7601 7602 static void manage_dm_interrupts(struct amdgpu_device *adev, 7603 struct amdgpu_crtc *acrtc, 7604 bool enable) 7605 { 7606 /* 7607 * We have no guarantee that the frontend index maps to the same 7608 * backend index - some even map to more than one. 7609 * 7610 * TODO: Use a different interrupt or check DC itself for the mapping. 7611 */ 7612 int irq_type = 7613 amdgpu_display_crtc_idx_to_irq_type( 7614 adev, 7615 acrtc->crtc_id); 7616 7617 if (enable) { 7618 drm_crtc_vblank_on(&acrtc->base); 7619 amdgpu_irq_get( 7620 adev, 7621 &adev->pageflip_irq, 7622 irq_type); 7623 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 7624 amdgpu_irq_get( 7625 adev, 7626 &adev->vline0_irq, 7627 irq_type); 7628 #endif 7629 } else { 7630 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 7631 amdgpu_irq_put( 7632 adev, 7633 &adev->vline0_irq, 7634 irq_type); 7635 #endif 7636 amdgpu_irq_put( 7637 adev, 7638 &adev->pageflip_irq, 7639 irq_type); 7640 drm_crtc_vblank_off(&acrtc->base); 7641 } 7642 } 7643 7644 static void dm_update_pflip_irq_state(struct amdgpu_device *adev, 7645 struct amdgpu_crtc *acrtc) 7646 { 7647 int irq_type = 7648 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 7649 7650 /** 7651 * This reads the current state for the IRQ and force reapplies 7652 * the setting to hardware. 7653 */ 7654 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); 7655 } 7656 7657 static bool 7658 is_scaling_state_different(const struct dm_connector_state *dm_state, 7659 const struct dm_connector_state *old_dm_state) 7660 { 7661 if (dm_state->scaling != old_dm_state->scaling) 7662 return true; 7663 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 7664 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 7665 return true; 7666 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 7667 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 7668 return true; 7669 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 7670 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 7671 return true; 7672 return false; 7673 } 7674 7675 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, 7676 struct drm_crtc_state *old_crtc_state, 7677 struct drm_connector_state *new_conn_state, 7678 struct drm_connector_state *old_conn_state, 7679 const struct drm_connector *connector, 7680 struct hdcp_workqueue *hdcp_w) 7681 { 7682 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7683 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 7684 7685 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 7686 connector->index, connector->status, connector->dpms); 7687 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 7688 old_conn_state->content_protection, new_conn_state->content_protection); 7689 7690 if (old_crtc_state) 7691 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 7692 old_crtc_state->enable, 7693 old_crtc_state->active, 7694 old_crtc_state->mode_changed, 7695 old_crtc_state->active_changed, 7696 old_crtc_state->connectors_changed); 7697 7698 if (new_crtc_state) 7699 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 7700 new_crtc_state->enable, 7701 new_crtc_state->active, 7702 new_crtc_state->mode_changed, 7703 new_crtc_state->active_changed, 7704 new_crtc_state->connectors_changed); 7705 7706 /* hdcp content type change */ 7707 if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && 7708 new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 7709 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7710 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); 7711 return true; 7712 } 7713 7714 /* CP is being re enabled, ignore this */ 7715 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 7716 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 7717 if (new_crtc_state && new_crtc_state->mode_changed) { 7718 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7719 pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); 7720 return true; 7721 } 7722 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 7723 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); 7724 return false; 7725 } 7726 7727 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED 7728 * 7729 * Handles: UNDESIRED -> ENABLED 7730 */ 7731 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 7732 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 7733 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7734 7735 /* Stream removed and re-enabled 7736 * 7737 * Can sometimes overlap with the HPD case, 7738 * thus set update_hdcp to false to avoid 7739 * setting HDCP multiple times. 7740 * 7741 * Handles: DESIRED -> DESIRED (Special case) 7742 */ 7743 if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && 7744 new_conn_state->crtc && new_conn_state->crtc->enabled && 7745 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 7746 dm_con_state->update_hdcp = false; 7747 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", 7748 __func__); 7749 return true; 7750 } 7751 7752 /* Hot-plug, headless s3, dpms 7753 * 7754 * Only start HDCP if the display is connected/enabled. 7755 * update_hdcp flag will be set to false until the next 7756 * HPD comes in. 7757 * 7758 * Handles: DESIRED -> DESIRED (Special case) 7759 */ 7760 if (dm_con_state->update_hdcp && 7761 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 7762 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 7763 dm_con_state->update_hdcp = false; 7764 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", 7765 __func__); 7766 return true; 7767 } 7768 7769 if (old_conn_state->content_protection == new_conn_state->content_protection) { 7770 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { 7771 if (new_crtc_state && new_crtc_state->mode_changed) { 7772 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", 7773 __func__); 7774 return true; 7775 } 7776 pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", 7777 __func__); 7778 return false; 7779 } 7780 7781 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); 7782 return false; 7783 } 7784 7785 if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { 7786 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", 7787 __func__); 7788 return true; 7789 } 7790 7791 pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); 7792 return false; 7793 } 7794 7795 static void remove_stream(struct amdgpu_device *adev, 7796 struct amdgpu_crtc *acrtc, 7797 struct dc_stream_state *stream) 7798 { 7799 /* this is the update mode case */ 7800 7801 acrtc->otg_inst = -1; 7802 acrtc->enabled = false; 7803 } 7804 7805 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 7806 { 7807 7808 assert_spin_locked(&acrtc->base.dev->event_lock); 7809 WARN_ON(acrtc->event); 7810 7811 acrtc->event = acrtc->base.state->event; 7812 7813 /* Set the flip status */ 7814 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 7815 7816 /* Mark this event as consumed */ 7817 acrtc->base.state->event = NULL; 7818 7819 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 7820 acrtc->crtc_id); 7821 } 7822 7823 static void update_freesync_state_on_stream( 7824 struct amdgpu_display_manager *dm, 7825 struct dm_crtc_state *new_crtc_state, 7826 struct dc_stream_state *new_stream, 7827 struct dc_plane_state *surface, 7828 u32 flip_timestamp_in_us) 7829 { 7830 struct mod_vrr_params vrr_params; 7831 struct dc_info_packet vrr_infopacket = {0}; 7832 struct amdgpu_device *adev = dm->adev; 7833 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 7834 unsigned long flags; 7835 bool pack_sdp_v1_3 = false; 7836 struct amdgpu_dm_connector *aconn; 7837 enum vrr_packet_type packet_type = PACKET_TYPE_VRR; 7838 7839 if (!new_stream) 7840 return; 7841 7842 /* 7843 * TODO: Determine why min/max totals and vrefresh can be 0 here. 7844 * For now it's sufficient to just guard against these conditions. 7845 */ 7846 7847 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 7848 return; 7849 7850 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 7851 vrr_params = acrtc->dm_irq_params.vrr_params; 7852 7853 if (surface) { 7854 mod_freesync_handle_preflip( 7855 dm->freesync_module, 7856 surface, 7857 new_stream, 7858 flip_timestamp_in_us, 7859 &vrr_params); 7860 7861 if (adev->family < AMDGPU_FAMILY_AI && 7862 amdgpu_dm_crtc_vrr_active(new_crtc_state)) { 7863 mod_freesync_handle_v_update(dm->freesync_module, 7864 new_stream, &vrr_params); 7865 7866 /* Need to call this before the frame ends. */ 7867 dc_stream_adjust_vmin_vmax(dm->dc, 7868 new_crtc_state->stream, 7869 &vrr_params.adjust); 7870 } 7871 } 7872 7873 aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; 7874 7875 if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) { 7876 pack_sdp_v1_3 = aconn->pack_sdp_v1_3; 7877 7878 if (aconn->vsdb_info.amd_vsdb_version == 1) 7879 packet_type = PACKET_TYPE_FS_V1; 7880 else if (aconn->vsdb_info.amd_vsdb_version == 2) 7881 packet_type = PACKET_TYPE_FS_V2; 7882 else if (aconn->vsdb_info.amd_vsdb_version == 3) 7883 packet_type = PACKET_TYPE_FS_V3; 7884 7885 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, 7886 &new_stream->adaptive_sync_infopacket); 7887 } 7888 7889 mod_freesync_build_vrr_infopacket( 7890 dm->freesync_module, 7891 new_stream, 7892 &vrr_params, 7893 packet_type, 7894 TRANSFER_FUNC_UNKNOWN, 7895 &vrr_infopacket, 7896 pack_sdp_v1_3); 7897 7898 new_crtc_state->freesync_vrr_info_changed |= 7899 (memcmp(&new_crtc_state->vrr_infopacket, 7900 &vrr_infopacket, 7901 sizeof(vrr_infopacket)) != 0); 7902 7903 acrtc->dm_irq_params.vrr_params = vrr_params; 7904 new_crtc_state->vrr_infopacket = vrr_infopacket; 7905 7906 new_stream->vrr_infopacket = vrr_infopacket; 7907 new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params); 7908 7909 if (new_crtc_state->freesync_vrr_info_changed) 7910 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", 7911 new_crtc_state->base.crtc->base.id, 7912 (int)new_crtc_state->base.vrr_enabled, 7913 (int)vrr_params.state); 7914 7915 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 7916 } 7917 7918 static void update_stream_irq_parameters( 7919 struct amdgpu_display_manager *dm, 7920 struct dm_crtc_state *new_crtc_state) 7921 { 7922 struct dc_stream_state *new_stream = new_crtc_state->stream; 7923 struct mod_vrr_params vrr_params; 7924 struct mod_freesync_config config = new_crtc_state->freesync_config; 7925 struct amdgpu_device *adev = dm->adev; 7926 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 7927 unsigned long flags; 7928 7929 if (!new_stream) 7930 return; 7931 7932 /* 7933 * TODO: Determine why min/max totals and vrefresh can be 0 here. 7934 * For now it's sufficient to just guard against these conditions. 7935 */ 7936 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 7937 return; 7938 7939 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 7940 vrr_params = acrtc->dm_irq_params.vrr_params; 7941 7942 if (new_crtc_state->vrr_supported && 7943 config.min_refresh_in_uhz && 7944 config.max_refresh_in_uhz) { 7945 /* 7946 * if freesync compatible mode was set, config.state will be set 7947 * in atomic check 7948 */ 7949 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && 7950 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || 7951 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { 7952 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; 7953 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; 7954 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; 7955 vrr_params.state = VRR_STATE_ACTIVE_FIXED; 7956 } else { 7957 config.state = new_crtc_state->base.vrr_enabled ? 7958 VRR_STATE_ACTIVE_VARIABLE : 7959 VRR_STATE_INACTIVE; 7960 } 7961 } else { 7962 config.state = VRR_STATE_UNSUPPORTED; 7963 } 7964 7965 mod_freesync_build_vrr_params(dm->freesync_module, 7966 new_stream, 7967 &config, &vrr_params); 7968 7969 new_crtc_state->freesync_config = config; 7970 /* Copy state for access from DM IRQ handler */ 7971 acrtc->dm_irq_params.freesync_config = config; 7972 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; 7973 acrtc->dm_irq_params.vrr_params = vrr_params; 7974 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 7975 } 7976 7977 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, 7978 struct dm_crtc_state *new_state) 7979 { 7980 bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state); 7981 bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state); 7982 7983 if (!old_vrr_active && new_vrr_active) { 7984 /* Transition VRR inactive -> active: 7985 * While VRR is active, we must not disable vblank irq, as a 7986 * reenable after disable would compute bogus vblank/pflip 7987 * timestamps if it likely happened inside display front-porch. 7988 * 7989 * We also need vupdate irq for the actual core vblank handling 7990 * at end of vblank. 7991 */ 7992 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); 7993 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); 7994 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", 7995 __func__, new_state->base.crtc->base.id); 7996 } else if (old_vrr_active && !new_vrr_active) { 7997 /* Transition VRR active -> inactive: 7998 * Allow vblank irq disable again for fixed refresh rate. 7999 */ 8000 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); 8001 drm_crtc_vblank_put(new_state->base.crtc); 8002 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", 8003 __func__, new_state->base.crtc->base.id); 8004 } 8005 } 8006 8007 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 8008 { 8009 struct drm_plane *plane; 8010 struct drm_plane_state *old_plane_state; 8011 int i; 8012 8013 /* 8014 * TODO: Make this per-stream so we don't issue redundant updates for 8015 * commits with multiple streams. 8016 */ 8017 for_each_old_plane_in_state(state, plane, old_plane_state, i) 8018 if (plane->type == DRM_PLANE_TYPE_CURSOR) 8019 amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state); 8020 } 8021 8022 static inline uint32_t get_mem_type(struct drm_framebuffer *fb) 8023 { 8024 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); 8025 8026 return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; 8027 } 8028 8029 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 8030 struct drm_device *dev, 8031 struct amdgpu_display_manager *dm, 8032 struct drm_crtc *pcrtc, 8033 bool wait_for_vblank) 8034 { 8035 u32 i; 8036 u64 timestamp_ns = ktime_get_ns(); 8037 struct drm_plane *plane; 8038 struct drm_plane_state *old_plane_state, *new_plane_state; 8039 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 8040 struct drm_crtc_state *new_pcrtc_state = 8041 drm_atomic_get_new_crtc_state(state, pcrtc); 8042 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 8043 struct dm_crtc_state *dm_old_crtc_state = 8044 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 8045 int planes_count = 0, vpos, hpos; 8046 unsigned long flags; 8047 u32 target_vblank, last_flip_vblank; 8048 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); 8049 bool cursor_update = false; 8050 bool pflip_present = false; 8051 bool dirty_rects_changed = false; 8052 struct { 8053 struct dc_surface_update surface_updates[MAX_SURFACES]; 8054 struct dc_plane_info plane_infos[MAX_SURFACES]; 8055 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 8056 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 8057 struct dc_stream_update stream_update; 8058 } *bundle; 8059 8060 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 8061 8062 if (!bundle) { 8063 dm_error("Failed to allocate update bundle\n"); 8064 goto cleanup; 8065 } 8066 8067 /* 8068 * Disable the cursor first if we're disabling all the planes. 8069 * It'll remain on the screen after the planes are re-enabled 8070 * if we don't. 8071 */ 8072 if (acrtc_state->active_planes == 0) 8073 amdgpu_dm_commit_cursors(state); 8074 8075 /* update planes when needed */ 8076 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 8077 struct drm_crtc *crtc = new_plane_state->crtc; 8078 struct drm_crtc_state *new_crtc_state; 8079 struct drm_framebuffer *fb = new_plane_state->fb; 8080 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; 8081 bool plane_needs_flip; 8082 struct dc_plane_state *dc_plane; 8083 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 8084 8085 /* Cursor plane is handled after stream updates */ 8086 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 8087 if ((fb && crtc == pcrtc) || 8088 (old_plane_state->fb && old_plane_state->crtc == pcrtc)) 8089 cursor_update = true; 8090 8091 continue; 8092 } 8093 8094 if (!fb || !crtc || pcrtc != crtc) 8095 continue; 8096 8097 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 8098 if (!new_crtc_state->active) 8099 continue; 8100 8101 dc_plane = dm_new_plane_state->dc_state; 8102 if (!dc_plane) 8103 continue; 8104 8105 bundle->surface_updates[planes_count].surface = dc_plane; 8106 if (new_pcrtc_state->color_mgmt_changed) { 8107 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; 8108 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; 8109 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; 8110 } 8111 8112 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, 8113 &bundle->scaling_infos[planes_count]); 8114 8115 bundle->surface_updates[planes_count].scaling_info = 8116 &bundle->scaling_infos[planes_count]; 8117 8118 plane_needs_flip = old_plane_state->fb && new_plane_state->fb; 8119 8120 pflip_present = pflip_present || plane_needs_flip; 8121 8122 if (!plane_needs_flip) { 8123 planes_count += 1; 8124 continue; 8125 } 8126 8127 fill_dc_plane_info_and_addr( 8128 dm->adev, new_plane_state, 8129 afb->tiling_flags, 8130 &bundle->plane_infos[planes_count], 8131 &bundle->flip_addrs[planes_count].address, 8132 afb->tmz_surface, false); 8133 8134 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", 8135 new_plane_state->plane->index, 8136 bundle->plane_infos[planes_count].dcc.enable); 8137 8138 bundle->surface_updates[planes_count].plane_info = 8139 &bundle->plane_infos[planes_count]; 8140 8141 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled || 8142 acrtc_state->stream->link->replay_settings.replay_feature_enabled) { 8143 fill_dc_dirty_rects(plane, old_plane_state, 8144 new_plane_state, new_crtc_state, 8145 &bundle->flip_addrs[planes_count], 8146 &dirty_rects_changed); 8147 8148 /* 8149 * If the dirty regions changed, PSR-SU need to be disabled temporarily 8150 * and enabled it again after dirty regions are stable to avoid video glitch. 8151 * PSR-SU will be enabled in vblank_control_worker() if user pause the video 8152 * during the PSR-SU was disabled. 8153 */ 8154 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 8155 acrtc_attach->dm_irq_params.allow_psr_entry && 8156 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 8157 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 8158 #endif 8159 dirty_rects_changed) { 8160 mutex_lock(&dm->dc_lock); 8161 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = 8162 timestamp_ns; 8163 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 8164 amdgpu_dm_psr_disable(acrtc_state->stream); 8165 mutex_unlock(&dm->dc_lock); 8166 } 8167 } 8168 8169 /* 8170 * Only allow immediate flips for fast updates that don't 8171 * change memory domain, FB pitch, DCC state, rotation or 8172 * mirroring. 8173 * 8174 * dm_crtc_helper_atomic_check() only accepts async flips with 8175 * fast updates. 8176 */ 8177 if (crtc->state->async_flip && 8178 (acrtc_state->update_type != UPDATE_TYPE_FAST || 8179 get_mem_type(old_plane_state->fb) != get_mem_type(fb))) 8180 drm_warn_once(state->dev, 8181 "[PLANE:%d:%s] async flip with non-fast update\n", 8182 plane->base.id, plane->name); 8183 8184 bundle->flip_addrs[planes_count].flip_immediate = 8185 crtc->state->async_flip && 8186 acrtc_state->update_type == UPDATE_TYPE_FAST && 8187 get_mem_type(old_plane_state->fb) == get_mem_type(fb); 8188 8189 timestamp_ns = ktime_get_ns(); 8190 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 8191 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; 8192 bundle->surface_updates[planes_count].surface = dc_plane; 8193 8194 if (!bundle->surface_updates[planes_count].surface) { 8195 DRM_ERROR("No surface for CRTC: id=%d\n", 8196 acrtc_attach->crtc_id); 8197 continue; 8198 } 8199 8200 if (plane == pcrtc->primary) 8201 update_freesync_state_on_stream( 8202 dm, 8203 acrtc_state, 8204 acrtc_state->stream, 8205 dc_plane, 8206 bundle->flip_addrs[planes_count].flip_timestamp_in_us); 8207 8208 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n", 8209 __func__, 8210 bundle->flip_addrs[planes_count].address.grph.addr.high_part, 8211 bundle->flip_addrs[planes_count].address.grph.addr.low_part); 8212 8213 planes_count += 1; 8214 8215 } 8216 8217 if (pflip_present) { 8218 if (!vrr_active) { 8219 /* Use old throttling in non-vrr fixed refresh rate mode 8220 * to keep flip scheduling based on target vblank counts 8221 * working in a backwards compatible way, e.g., for 8222 * clients using the GLX_OML_sync_control extension or 8223 * DRI3/Present extension with defined target_msc. 8224 */ 8225 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); 8226 } else { 8227 /* For variable refresh rate mode only: 8228 * Get vblank of last completed flip to avoid > 1 vrr 8229 * flips per video frame by use of throttling, but allow 8230 * flip programming anywhere in the possibly large 8231 * variable vrr vblank interval for fine-grained flip 8232 * timing control and more opportunity to avoid stutter 8233 * on late submission of flips. 8234 */ 8235 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8236 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; 8237 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8238 } 8239 8240 target_vblank = last_flip_vblank + wait_for_vblank; 8241 8242 /* 8243 * Wait until we're out of the vertical blank period before the one 8244 * targeted by the flip 8245 */ 8246 while ((acrtc_attach->enabled && 8247 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 8248 0, &vpos, &hpos, NULL, 8249 NULL, &pcrtc->hwmode) 8250 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 8251 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 8252 (int)(target_vblank - 8253 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { 8254 usleep_range(1000, 1100); 8255 } 8256 8257 /** 8258 * Prepare the flip event for the pageflip interrupt to handle. 8259 * 8260 * This only works in the case where we've already turned on the 8261 * appropriate hardware blocks (eg. HUBP) so in the transition case 8262 * from 0 -> n planes we have to skip a hardware generated event 8263 * and rely on sending it from software. 8264 */ 8265 if (acrtc_attach->base.state->event && 8266 acrtc_state->active_planes > 0) { 8267 drm_crtc_vblank_get(pcrtc); 8268 8269 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8270 8271 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); 8272 prepare_flip_isr(acrtc_attach); 8273 8274 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8275 } 8276 8277 if (acrtc_state->stream) { 8278 if (acrtc_state->freesync_vrr_info_changed) 8279 bundle->stream_update.vrr_infopacket = 8280 &acrtc_state->stream->vrr_infopacket; 8281 } 8282 } else if (cursor_update && acrtc_state->active_planes > 0 && 8283 acrtc_attach->base.state->event) { 8284 drm_crtc_vblank_get(pcrtc); 8285 8286 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8287 8288 acrtc_attach->event = acrtc_attach->base.state->event; 8289 acrtc_attach->base.state->event = NULL; 8290 8291 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8292 } 8293 8294 /* Update the planes if changed or disable if we don't have any. */ 8295 if ((planes_count || acrtc_state->active_planes == 0) && 8296 acrtc_state->stream) { 8297 /* 8298 * If PSR or idle optimizations are enabled then flush out 8299 * any pending work before hardware programming. 8300 */ 8301 if (dm->vblank_control_workqueue) 8302 flush_workqueue(dm->vblank_control_workqueue); 8303 8304 bundle->stream_update.stream = acrtc_state->stream; 8305 if (new_pcrtc_state->mode_changed) { 8306 bundle->stream_update.src = acrtc_state->stream->src; 8307 bundle->stream_update.dst = acrtc_state->stream->dst; 8308 } 8309 8310 if (new_pcrtc_state->color_mgmt_changed) { 8311 /* 8312 * TODO: This isn't fully correct since we've actually 8313 * already modified the stream in place. 8314 */ 8315 bundle->stream_update.gamut_remap = 8316 &acrtc_state->stream->gamut_remap_matrix; 8317 bundle->stream_update.output_csc_transform = 8318 &acrtc_state->stream->csc_color_matrix; 8319 bundle->stream_update.out_transfer_func = 8320 acrtc_state->stream->out_transfer_func; 8321 } 8322 8323 acrtc_state->stream->abm_level = acrtc_state->abm_level; 8324 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) 8325 bundle->stream_update.abm_level = &acrtc_state->abm_level; 8326 8327 mutex_lock(&dm->dc_lock); 8328 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 8329 acrtc_state->stream->link->psr_settings.psr_allow_active) 8330 amdgpu_dm_psr_disable(acrtc_state->stream); 8331 mutex_unlock(&dm->dc_lock); 8332 8333 /* 8334 * If FreeSync state on the stream has changed then we need to 8335 * re-adjust the min/max bounds now that DC doesn't handle this 8336 * as part of commit. 8337 */ 8338 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { 8339 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8340 dc_stream_adjust_vmin_vmax( 8341 dm->dc, acrtc_state->stream, 8342 &acrtc_attach->dm_irq_params.vrr_params.adjust); 8343 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8344 } 8345 mutex_lock(&dm->dc_lock); 8346 update_planes_and_stream_adapter(dm->dc, 8347 acrtc_state->update_type, 8348 planes_count, 8349 acrtc_state->stream, 8350 &bundle->stream_update, 8351 bundle->surface_updates); 8352 8353 /** 8354 * Enable or disable the interrupts on the backend. 8355 * 8356 * Most pipes are put into power gating when unused. 8357 * 8358 * When power gating is enabled on a pipe we lose the 8359 * interrupt enablement state when power gating is disabled. 8360 * 8361 * So we need to update the IRQ control state in hardware 8362 * whenever the pipe turns on (since it could be previously 8363 * power gated) or off (since some pipes can't be power gated 8364 * on some ASICs). 8365 */ 8366 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) 8367 dm_update_pflip_irq_state(drm_to_adev(dev), 8368 acrtc_attach); 8369 8370 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 8371 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && 8372 !acrtc_state->stream->link->psr_settings.psr_feature_enabled) 8373 amdgpu_dm_link_setup_psr(acrtc_state->stream); 8374 8375 /* Decrement skip count when PSR is enabled and we're doing fast updates. */ 8376 if (acrtc_state->update_type == UPDATE_TYPE_FAST && 8377 acrtc_state->stream->link->psr_settings.psr_feature_enabled) { 8378 struct amdgpu_dm_connector *aconn = 8379 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 8380 8381 if (aconn->psr_skip_count > 0) 8382 aconn->psr_skip_count--; 8383 8384 /* Allow PSR when skip count is 0. */ 8385 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; 8386 8387 /* 8388 * If sink supports PSR SU, there is no need to rely on 8389 * a vblank event disable request to enable PSR. PSR SU 8390 * can be enabled immediately once OS demonstrates an 8391 * adequate number of fast atomic commits to notify KMD 8392 * of update events. See `vblank_control_worker()`. 8393 */ 8394 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 8395 acrtc_attach->dm_irq_params.allow_psr_entry && 8396 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 8397 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 8398 #endif 8399 !acrtc_state->stream->link->psr_settings.psr_allow_active && 8400 (timestamp_ns - 8401 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > 8402 500000000) 8403 amdgpu_dm_psr_enable(acrtc_state->stream); 8404 } else { 8405 acrtc_attach->dm_irq_params.allow_psr_entry = false; 8406 } 8407 8408 mutex_unlock(&dm->dc_lock); 8409 } 8410 8411 /* 8412 * Update cursor state *after* programming all the planes. 8413 * This avoids redundant programming in the case where we're going 8414 * to be disabling a single plane - those pipes are being disabled. 8415 */ 8416 if (acrtc_state->active_planes) 8417 amdgpu_dm_commit_cursors(state); 8418 8419 cleanup: 8420 kfree(bundle); 8421 } 8422 8423 static void amdgpu_dm_commit_audio(struct drm_device *dev, 8424 struct drm_atomic_state *state) 8425 { 8426 struct amdgpu_device *adev = drm_to_adev(dev); 8427 struct amdgpu_dm_connector *aconnector; 8428 struct drm_connector *connector; 8429 struct drm_connector_state *old_con_state, *new_con_state; 8430 struct drm_crtc_state *new_crtc_state; 8431 struct dm_crtc_state *new_dm_crtc_state; 8432 const struct dc_stream_status *status; 8433 int i, inst; 8434 8435 /* Notify device removals. */ 8436 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8437 if (old_con_state->crtc != new_con_state->crtc) { 8438 /* CRTC changes require notification. */ 8439 goto notify; 8440 } 8441 8442 if (!new_con_state->crtc) 8443 continue; 8444 8445 new_crtc_state = drm_atomic_get_new_crtc_state( 8446 state, new_con_state->crtc); 8447 8448 if (!new_crtc_state) 8449 continue; 8450 8451 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 8452 continue; 8453 8454 notify: 8455 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 8456 continue; 8457 8458 aconnector = to_amdgpu_dm_connector(connector); 8459 8460 mutex_lock(&adev->dm.audio_lock); 8461 inst = aconnector->audio_inst; 8462 aconnector->audio_inst = -1; 8463 mutex_unlock(&adev->dm.audio_lock); 8464 8465 amdgpu_dm_audio_eld_notify(adev, inst); 8466 } 8467 8468 /* Notify audio device additions. */ 8469 for_each_new_connector_in_state(state, connector, new_con_state, i) { 8470 if (!new_con_state->crtc) 8471 continue; 8472 8473 new_crtc_state = drm_atomic_get_new_crtc_state( 8474 state, new_con_state->crtc); 8475 8476 if (!new_crtc_state) 8477 continue; 8478 8479 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 8480 continue; 8481 8482 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 8483 if (!new_dm_crtc_state->stream) 8484 continue; 8485 8486 status = dc_stream_get_status(new_dm_crtc_state->stream); 8487 if (!status) 8488 continue; 8489 8490 aconnector = to_amdgpu_dm_connector(connector); 8491 8492 mutex_lock(&adev->dm.audio_lock); 8493 inst = status->audio_inst; 8494 aconnector->audio_inst = inst; 8495 mutex_unlock(&adev->dm.audio_lock); 8496 8497 amdgpu_dm_audio_eld_notify(adev, inst); 8498 } 8499 } 8500 8501 /* 8502 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 8503 * @crtc_state: the DRM CRTC state 8504 * @stream_state: the DC stream state. 8505 * 8506 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 8507 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 8508 */ 8509 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 8510 struct dc_stream_state *stream_state) 8511 { 8512 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 8513 } 8514 8515 static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, 8516 struct dc_state *dc_state) 8517 { 8518 struct drm_device *dev = state->dev; 8519 struct amdgpu_device *adev = drm_to_adev(dev); 8520 struct amdgpu_display_manager *dm = &adev->dm; 8521 struct drm_crtc *crtc; 8522 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 8523 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 8524 bool mode_set_reset_required = false; 8525 u32 i; 8526 8527 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 8528 new_crtc_state, i) { 8529 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8530 8531 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8532 8533 if (old_crtc_state->active && 8534 (!new_crtc_state->active || 8535 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 8536 manage_dm_interrupts(adev, acrtc, false); 8537 dc_stream_release(dm_old_crtc_state->stream); 8538 } 8539 } 8540 8541 drm_atomic_helper_calc_timestamping_constants(state); 8542 8543 /* update changed items */ 8544 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 8545 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8546 8547 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8548 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8549 8550 drm_dbg_state(state->dev, 8551 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 8552 acrtc->crtc_id, 8553 new_crtc_state->enable, 8554 new_crtc_state->active, 8555 new_crtc_state->planes_changed, 8556 new_crtc_state->mode_changed, 8557 new_crtc_state->active_changed, 8558 new_crtc_state->connectors_changed); 8559 8560 /* Disable cursor if disabling crtc */ 8561 if (old_crtc_state->active && !new_crtc_state->active) { 8562 struct dc_cursor_position position; 8563 8564 memset(&position, 0, sizeof(position)); 8565 mutex_lock(&dm->dc_lock); 8566 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position); 8567 mutex_unlock(&dm->dc_lock); 8568 } 8569 8570 /* Copy all transient state flags into dc state */ 8571 if (dm_new_crtc_state->stream) { 8572 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 8573 dm_new_crtc_state->stream); 8574 } 8575 8576 /* handles headless hotplug case, updating new_state and 8577 * aconnector as needed 8578 */ 8579 8580 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 8581 8582 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); 8583 8584 if (!dm_new_crtc_state->stream) { 8585 /* 8586 * this could happen because of issues with 8587 * userspace notifications delivery. 8588 * In this case userspace tries to set mode on 8589 * display which is disconnected in fact. 8590 * dc_sink is NULL in this case on aconnector. 8591 * We expect reset mode will come soon. 8592 * 8593 * This can also happen when unplug is done 8594 * during resume sequence ended 8595 * 8596 * In this case, we want to pretend we still 8597 * have a sink to keep the pipe running so that 8598 * hw state is consistent with the sw state 8599 */ 8600 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 8601 __func__, acrtc->base.base.id); 8602 continue; 8603 } 8604 8605 if (dm_old_crtc_state->stream) 8606 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 8607 8608 pm_runtime_get_noresume(dev->dev); 8609 8610 acrtc->enabled = true; 8611 acrtc->hw_mode = new_crtc_state->mode; 8612 crtc->hwmode = new_crtc_state->mode; 8613 mode_set_reset_required = true; 8614 } else if (modereset_required(new_crtc_state)) { 8615 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); 8616 /* i.e. reset mode */ 8617 if (dm_old_crtc_state->stream) 8618 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 8619 8620 mode_set_reset_required = true; 8621 } 8622 } /* for_each_crtc_in_state() */ 8623 8624 /* if there mode set or reset, disable eDP PSR */ 8625 if (mode_set_reset_required) { 8626 if (dm->vblank_control_workqueue) 8627 flush_workqueue(dm->vblank_control_workqueue); 8628 8629 amdgpu_dm_psr_disable_all(dm); 8630 } 8631 8632 dm_enable_per_frame_crtc_master_sync(dc_state); 8633 mutex_lock(&dm->dc_lock); 8634 WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); 8635 8636 /* Allow idle optimization when vblank count is 0 for display off */ 8637 if (dm->active_vblank_irq_count == 0) 8638 dc_allow_idle_optimizations(dm->dc, true); 8639 mutex_unlock(&dm->dc_lock); 8640 8641 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 8642 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8643 8644 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8645 8646 if (dm_new_crtc_state->stream != NULL) { 8647 const struct dc_stream_status *status = 8648 dc_stream_get_status(dm_new_crtc_state->stream); 8649 8650 if (!status) 8651 status = dc_stream_get_status_from_state(dc_state, 8652 dm_new_crtc_state->stream); 8653 if (!status) 8654 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); 8655 else 8656 acrtc->otg_inst = status->primary_otg_inst; 8657 } 8658 } 8659 } 8660 8661 /** 8662 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 8663 * @state: The atomic state to commit 8664 * 8665 * This will tell DC to commit the constructed DC state from atomic_check, 8666 * programming the hardware. Any failures here implies a hardware failure, since 8667 * atomic check should have filtered anything non-kosher. 8668 */ 8669 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 8670 { 8671 struct drm_device *dev = state->dev; 8672 struct amdgpu_device *adev = drm_to_adev(dev); 8673 struct amdgpu_display_manager *dm = &adev->dm; 8674 struct dm_atomic_state *dm_state; 8675 struct dc_state *dc_state = NULL; 8676 u32 i, j; 8677 struct drm_crtc *crtc; 8678 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 8679 unsigned long flags; 8680 bool wait_for_vblank = true; 8681 struct drm_connector *connector; 8682 struct drm_connector_state *old_con_state, *new_con_state; 8683 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 8684 int crtc_disable_count = 0; 8685 8686 trace_amdgpu_dm_atomic_commit_tail_begin(state); 8687 8688 drm_atomic_helper_update_legacy_modeset_state(dev, state); 8689 drm_dp_mst_atomic_wait_for_dependencies(state); 8690 8691 dm_state = dm_atomic_get_new_state(state); 8692 if (dm_state && dm_state->context) { 8693 dc_state = dm_state->context; 8694 amdgpu_dm_commit_streams(state, dc_state); 8695 } 8696 8697 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8698 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 8699 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 8700 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8701 8702 if (!adev->dm.hdcp_workqueue) 8703 continue; 8704 8705 pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); 8706 8707 if (!connector) 8708 continue; 8709 8710 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 8711 connector->index, connector->status, connector->dpms); 8712 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 8713 old_con_state->content_protection, new_con_state->content_protection); 8714 8715 if (aconnector->dc_sink) { 8716 if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && 8717 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { 8718 pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", 8719 aconnector->dc_sink->edid_caps.display_name); 8720 } 8721 } 8722 8723 new_crtc_state = NULL; 8724 old_crtc_state = NULL; 8725 8726 if (acrtc) { 8727 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 8728 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 8729 } 8730 8731 if (old_crtc_state) 8732 pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8733 old_crtc_state->enable, 8734 old_crtc_state->active, 8735 old_crtc_state->mode_changed, 8736 old_crtc_state->active_changed, 8737 old_crtc_state->connectors_changed); 8738 8739 if (new_crtc_state) 8740 pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8741 new_crtc_state->enable, 8742 new_crtc_state->active, 8743 new_crtc_state->mode_changed, 8744 new_crtc_state->active_changed, 8745 new_crtc_state->connectors_changed); 8746 } 8747 8748 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8749 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 8750 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 8751 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8752 8753 if (!adev->dm.hdcp_workqueue) 8754 continue; 8755 8756 new_crtc_state = NULL; 8757 old_crtc_state = NULL; 8758 8759 if (acrtc) { 8760 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 8761 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 8762 } 8763 8764 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8765 8766 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && 8767 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 8768 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 8769 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8770 dm_new_con_state->update_hdcp = true; 8771 continue; 8772 } 8773 8774 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, 8775 old_con_state, connector, adev->dm.hdcp_workqueue)) { 8776 /* when display is unplugged from mst hub, connctor will 8777 * be destroyed within dm_dp_mst_connector_destroy. connector 8778 * hdcp perperties, like type, undesired, desired, enabled, 8779 * will be lost. So, save hdcp properties into hdcp_work within 8780 * amdgpu_dm_atomic_commit_tail. if the same display is 8781 * plugged back with same display index, its hdcp properties 8782 * will be retrieved from hdcp_work within dm_dp_mst_get_modes 8783 */ 8784 8785 bool enable_encryption = false; 8786 8787 if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) 8788 enable_encryption = true; 8789 8790 if (aconnector->dc_link && aconnector->dc_sink && 8791 aconnector->dc_link->type == dc_connection_mst_branch) { 8792 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; 8793 struct hdcp_workqueue *hdcp_w = 8794 &hdcp_work[aconnector->dc_link->link_index]; 8795 8796 hdcp_w->hdcp_content_type[connector->index] = 8797 new_con_state->hdcp_content_type; 8798 hdcp_w->content_protection[connector->index] = 8799 new_con_state->content_protection; 8800 } 8801 8802 if (new_crtc_state && new_crtc_state->mode_changed && 8803 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) 8804 enable_encryption = true; 8805 8806 DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); 8807 8808 hdcp_update_display( 8809 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, 8810 new_con_state->hdcp_content_type, enable_encryption); 8811 } 8812 } 8813 8814 /* Handle connector state changes */ 8815 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8816 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 8817 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 8818 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 8819 struct dc_surface_update *dummy_updates; 8820 struct dc_stream_update stream_update; 8821 struct dc_info_packet hdr_packet; 8822 struct dc_stream_status *status = NULL; 8823 bool abm_changed, hdr_changed, scaling_changed; 8824 8825 memset(&stream_update, 0, sizeof(stream_update)); 8826 8827 if (acrtc) { 8828 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 8829 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 8830 } 8831 8832 /* Skip any modesets/resets */ 8833 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 8834 continue; 8835 8836 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8837 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8838 8839 scaling_changed = is_scaling_state_different(dm_new_con_state, 8840 dm_old_con_state); 8841 8842 abm_changed = dm_new_crtc_state->abm_level != 8843 dm_old_crtc_state->abm_level; 8844 8845 hdr_changed = 8846 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); 8847 8848 if (!scaling_changed && !abm_changed && !hdr_changed) 8849 continue; 8850 8851 stream_update.stream = dm_new_crtc_state->stream; 8852 if (scaling_changed) { 8853 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 8854 dm_new_con_state, dm_new_crtc_state->stream); 8855 8856 stream_update.src = dm_new_crtc_state->stream->src; 8857 stream_update.dst = dm_new_crtc_state->stream->dst; 8858 } 8859 8860 if (abm_changed) { 8861 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 8862 8863 stream_update.abm_level = &dm_new_crtc_state->abm_level; 8864 } 8865 8866 if (hdr_changed) { 8867 fill_hdr_info_packet(new_con_state, &hdr_packet); 8868 stream_update.hdr_static_metadata = &hdr_packet; 8869 } 8870 8871 status = dc_stream_get_status(dm_new_crtc_state->stream); 8872 8873 if (WARN_ON(!status)) 8874 continue; 8875 8876 WARN_ON(!status->plane_count); 8877 8878 /* 8879 * TODO: DC refuses to perform stream updates without a dc_surface_update. 8880 * Here we create an empty update on each plane. 8881 * To fix this, DC should permit updating only stream properties. 8882 */ 8883 dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); 8884 for (j = 0; j < status->plane_count; j++) 8885 dummy_updates[j].surface = status->plane_states[0]; 8886 8887 8888 mutex_lock(&dm->dc_lock); 8889 dc_update_planes_and_stream(dm->dc, 8890 dummy_updates, 8891 status->plane_count, 8892 dm_new_crtc_state->stream, 8893 &stream_update); 8894 mutex_unlock(&dm->dc_lock); 8895 kfree(dummy_updates); 8896 } 8897 8898 /** 8899 * Enable interrupts for CRTCs that are newly enabled or went through 8900 * a modeset. It was intentionally deferred until after the front end 8901 * state was modified to wait until the OTG was on and so the IRQ 8902 * handlers didn't access stale or invalid state. 8903 */ 8904 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 8905 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8906 #ifdef CONFIG_DEBUG_FS 8907 enum amdgpu_dm_pipe_crc_source cur_crc_src; 8908 #endif 8909 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 8910 if (old_crtc_state->active && !new_crtc_state->active) 8911 crtc_disable_count++; 8912 8913 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8914 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8915 8916 /* For freesync config update on crtc state and params for irq */ 8917 update_stream_irq_parameters(dm, dm_new_crtc_state); 8918 8919 #ifdef CONFIG_DEBUG_FS 8920 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8921 cur_crc_src = acrtc->dm_irq_params.crc_src; 8922 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8923 #endif 8924 8925 if (new_crtc_state->active && 8926 (!old_crtc_state->active || 8927 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 8928 dc_stream_retain(dm_new_crtc_state->stream); 8929 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; 8930 manage_dm_interrupts(adev, acrtc, true); 8931 } 8932 /* Handle vrr on->off / off->on transitions */ 8933 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); 8934 8935 #ifdef CONFIG_DEBUG_FS 8936 if (new_crtc_state->active && 8937 (!old_crtc_state->active || 8938 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 8939 /** 8940 * Frontend may have changed so reapply the CRC capture 8941 * settings for the stream. 8942 */ 8943 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { 8944 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8945 if (amdgpu_dm_crc_window_is_activated(crtc)) { 8946 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8947 acrtc->dm_irq_params.window_param.update_win = true; 8948 8949 /** 8950 * It takes 2 frames for HW to stably generate CRC when 8951 * resuming from suspend, so we set skip_frame_cnt 2. 8952 */ 8953 acrtc->dm_irq_params.window_param.skip_frame_cnt = 2; 8954 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8955 } 8956 #endif 8957 if (amdgpu_dm_crtc_configure_crc_source( 8958 crtc, dm_new_crtc_state, cur_crc_src)) 8959 DRM_DEBUG_DRIVER("Failed to configure crc source"); 8960 } 8961 } 8962 #endif 8963 } 8964 8965 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 8966 if (new_crtc_state->async_flip) 8967 wait_for_vblank = false; 8968 8969 /* update planes when needed per crtc*/ 8970 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 8971 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8972 8973 if (dm_new_crtc_state->stream) 8974 amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); 8975 } 8976 8977 /* Update audio instances for each connector. */ 8978 amdgpu_dm_commit_audio(dev, state); 8979 8980 /* restore the backlight level */ 8981 for (i = 0; i < dm->num_of_edps; i++) { 8982 if (dm->backlight_dev[i] && 8983 (dm->actual_brightness[i] != dm->brightness[i])) 8984 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 8985 } 8986 8987 /* 8988 * send vblank event on all events not handled in flip and 8989 * mark consumed event for drm_atomic_helper_commit_hw_done 8990 */ 8991 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8992 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 8993 8994 if (new_crtc_state->event) 8995 drm_send_event_locked(dev, &new_crtc_state->event->base); 8996 8997 new_crtc_state->event = NULL; 8998 } 8999 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9000 9001 /* Signal HW programming completion */ 9002 drm_atomic_helper_commit_hw_done(state); 9003 9004 if (wait_for_vblank) 9005 drm_atomic_helper_wait_for_flip_done(dev, state); 9006 9007 drm_atomic_helper_cleanup_planes(dev, state); 9008 9009 /* Don't free the memory if we are hitting this as part of suspend. 9010 * This way we don't free any memory during suspend; see 9011 * amdgpu_bo_free_kernel(). The memory will be freed in the first 9012 * non-suspend modeset or when the driver is torn down. 9013 */ 9014 if (!adev->in_suspend) { 9015 /* return the stolen vga memory back to VRAM */ 9016 if (!adev->mman.keep_stolen_vga_memory) 9017 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 9018 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 9019 } 9020 9021 /* 9022 * Finally, drop a runtime PM reference for each newly disabled CRTC, 9023 * so we can put the GPU into runtime suspend if we're not driving any 9024 * displays anymore 9025 */ 9026 for (i = 0; i < crtc_disable_count; i++) 9027 pm_runtime_put_autosuspend(dev->dev); 9028 pm_runtime_mark_last_busy(dev->dev); 9029 } 9030 9031 static int dm_force_atomic_commit(struct drm_connector *connector) 9032 { 9033 int ret = 0; 9034 struct drm_device *ddev = connector->dev; 9035 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 9036 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 9037 struct drm_plane *plane = disconnected_acrtc->base.primary; 9038 struct drm_connector_state *conn_state; 9039 struct drm_crtc_state *crtc_state; 9040 struct drm_plane_state *plane_state; 9041 9042 if (!state) 9043 return -ENOMEM; 9044 9045 state->acquire_ctx = ddev->mode_config.acquire_ctx; 9046 9047 /* Construct an atomic state to restore previous display setting */ 9048 9049 /* 9050 * Attach connectors to drm_atomic_state 9051 */ 9052 conn_state = drm_atomic_get_connector_state(state, connector); 9053 9054 ret = PTR_ERR_OR_ZERO(conn_state); 9055 if (ret) 9056 goto out; 9057 9058 /* Attach crtc to drm_atomic_state*/ 9059 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 9060 9061 ret = PTR_ERR_OR_ZERO(crtc_state); 9062 if (ret) 9063 goto out; 9064 9065 /* force a restore */ 9066 crtc_state->mode_changed = true; 9067 9068 /* Attach plane to drm_atomic_state */ 9069 plane_state = drm_atomic_get_plane_state(state, plane); 9070 9071 ret = PTR_ERR_OR_ZERO(plane_state); 9072 if (ret) 9073 goto out; 9074 9075 /* Call commit internally with the state we just constructed */ 9076 ret = drm_atomic_commit(state); 9077 9078 out: 9079 drm_atomic_state_put(state); 9080 if (ret) 9081 DRM_ERROR("Restoring old state failed with %i\n", ret); 9082 9083 return ret; 9084 } 9085 9086 /* 9087 * This function handles all cases when set mode does not come upon hotplug. 9088 * This includes when a display is unplugged then plugged back into the 9089 * same port and when running without usermode desktop manager supprot 9090 */ 9091 void dm_restore_drm_connector_state(struct drm_device *dev, 9092 struct drm_connector *connector) 9093 { 9094 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9095 struct amdgpu_crtc *disconnected_acrtc; 9096 struct dm_crtc_state *acrtc_state; 9097 9098 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 9099 return; 9100 9101 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 9102 if (!disconnected_acrtc) 9103 return; 9104 9105 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 9106 if (!acrtc_state->stream) 9107 return; 9108 9109 /* 9110 * If the previous sink is not released and different from the current, 9111 * we deduce we are in a state where we can not rely on usermode call 9112 * to turn on the display, so we do it here 9113 */ 9114 if (acrtc_state->stream->sink != aconnector->dc_sink) 9115 dm_force_atomic_commit(&aconnector->base); 9116 } 9117 9118 /* 9119 * Grabs all modesetting locks to serialize against any blocking commits, 9120 * Waits for completion of all non blocking commits. 9121 */ 9122 static int do_aquire_global_lock(struct drm_device *dev, 9123 struct drm_atomic_state *state) 9124 { 9125 struct drm_crtc *crtc; 9126 struct drm_crtc_commit *commit; 9127 long ret; 9128 9129 /* 9130 * Adding all modeset locks to aquire_ctx will 9131 * ensure that when the framework release it the 9132 * extra locks we are locking here will get released to 9133 */ 9134 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 9135 if (ret) 9136 return ret; 9137 9138 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 9139 spin_lock(&crtc->commit_lock); 9140 commit = list_first_entry_or_null(&crtc->commit_list, 9141 struct drm_crtc_commit, commit_entry); 9142 if (commit) 9143 drm_crtc_commit_get(commit); 9144 spin_unlock(&crtc->commit_lock); 9145 9146 if (!commit) 9147 continue; 9148 9149 /* 9150 * Make sure all pending HW programming completed and 9151 * page flips done 9152 */ 9153 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 9154 9155 if (ret > 0) 9156 ret = wait_for_completion_interruptible_timeout( 9157 &commit->flip_done, 10*HZ); 9158 9159 if (ret == 0) 9160 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", 9161 crtc->base.id, crtc->name); 9162 9163 drm_crtc_commit_put(commit); 9164 } 9165 9166 return ret < 0 ? ret : 0; 9167 } 9168 9169 static void get_freesync_config_for_crtc( 9170 struct dm_crtc_state *new_crtc_state, 9171 struct dm_connector_state *new_con_state) 9172 { 9173 struct mod_freesync_config config = {0}; 9174 struct amdgpu_dm_connector *aconnector = 9175 to_amdgpu_dm_connector(new_con_state->base.connector); 9176 struct drm_display_mode *mode = &new_crtc_state->base.mode; 9177 int vrefresh = drm_mode_vrefresh(mode); 9178 bool fs_vid_mode = false; 9179 9180 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 9181 vrefresh >= aconnector->min_vfreq && 9182 vrefresh <= aconnector->max_vfreq; 9183 9184 if (new_crtc_state->vrr_supported) { 9185 new_crtc_state->stream->ignore_msa_timing_param = true; 9186 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 9187 9188 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; 9189 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; 9190 config.vsif_supported = true; 9191 config.btr = true; 9192 9193 if (fs_vid_mode) { 9194 config.state = VRR_STATE_ACTIVE_FIXED; 9195 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; 9196 goto out; 9197 } else if (new_crtc_state->base.vrr_enabled) { 9198 config.state = VRR_STATE_ACTIVE_VARIABLE; 9199 } else { 9200 config.state = VRR_STATE_INACTIVE; 9201 } 9202 } 9203 out: 9204 new_crtc_state->freesync_config = config; 9205 } 9206 9207 static void reset_freesync_config_for_crtc( 9208 struct dm_crtc_state *new_crtc_state) 9209 { 9210 new_crtc_state->vrr_supported = false; 9211 9212 memset(&new_crtc_state->vrr_infopacket, 0, 9213 sizeof(new_crtc_state->vrr_infopacket)); 9214 } 9215 9216 static bool 9217 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 9218 struct drm_crtc_state *new_crtc_state) 9219 { 9220 const struct drm_display_mode *old_mode, *new_mode; 9221 9222 if (!old_crtc_state || !new_crtc_state) 9223 return false; 9224 9225 old_mode = &old_crtc_state->mode; 9226 new_mode = &new_crtc_state->mode; 9227 9228 if (old_mode->clock == new_mode->clock && 9229 old_mode->hdisplay == new_mode->hdisplay && 9230 old_mode->vdisplay == new_mode->vdisplay && 9231 old_mode->htotal == new_mode->htotal && 9232 old_mode->vtotal != new_mode->vtotal && 9233 old_mode->hsync_start == new_mode->hsync_start && 9234 old_mode->vsync_start != new_mode->vsync_start && 9235 old_mode->hsync_end == new_mode->hsync_end && 9236 old_mode->vsync_end != new_mode->vsync_end && 9237 old_mode->hskew == new_mode->hskew && 9238 old_mode->vscan == new_mode->vscan && 9239 (old_mode->vsync_end - old_mode->vsync_start) == 9240 (new_mode->vsync_end - new_mode->vsync_start)) 9241 return true; 9242 9243 return false; 9244 } 9245 9246 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) 9247 { 9248 u64 num, den, res; 9249 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; 9250 9251 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; 9252 9253 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; 9254 den = (unsigned long long)new_crtc_state->mode.htotal * 9255 (unsigned long long)new_crtc_state->mode.vtotal; 9256 9257 res = div_u64(num, den); 9258 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; 9259 } 9260 9261 static int dm_update_crtc_state(struct amdgpu_display_manager *dm, 9262 struct drm_atomic_state *state, 9263 struct drm_crtc *crtc, 9264 struct drm_crtc_state *old_crtc_state, 9265 struct drm_crtc_state *new_crtc_state, 9266 bool enable, 9267 bool *lock_and_validation_needed) 9268 { 9269 struct dm_atomic_state *dm_state = NULL; 9270 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9271 struct dc_stream_state *new_stream; 9272 int ret = 0; 9273 9274 /* 9275 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 9276 * update changed items 9277 */ 9278 struct amdgpu_crtc *acrtc = NULL; 9279 struct amdgpu_dm_connector *aconnector = NULL; 9280 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 9281 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 9282 9283 new_stream = NULL; 9284 9285 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9286 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9287 acrtc = to_amdgpu_crtc(crtc); 9288 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 9289 9290 /* TODO This hack should go away */ 9291 if (aconnector && enable) { 9292 /* Make sure fake sink is created in plug-in scenario */ 9293 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 9294 &aconnector->base); 9295 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 9296 &aconnector->base); 9297 9298 if (IS_ERR(drm_new_conn_state)) { 9299 ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 9300 goto fail; 9301 } 9302 9303 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 9304 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 9305 9306 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9307 goto skip_modeset; 9308 9309 new_stream = create_validate_stream_for_sink(aconnector, 9310 &new_crtc_state->mode, 9311 dm_new_conn_state, 9312 dm_old_crtc_state->stream); 9313 9314 /* 9315 * we can have no stream on ACTION_SET if a display 9316 * was disconnected during S3, in this case it is not an 9317 * error, the OS will be updated after detection, and 9318 * will do the right thing on next atomic commit 9319 */ 9320 9321 if (!new_stream) { 9322 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 9323 __func__, acrtc->base.base.id); 9324 ret = -ENOMEM; 9325 goto fail; 9326 } 9327 9328 /* 9329 * TODO: Check VSDB bits to decide whether this should 9330 * be enabled or not. 9331 */ 9332 new_stream->triggered_crtc_reset.enabled = 9333 dm->force_timing_sync; 9334 9335 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 9336 9337 ret = fill_hdr_info_packet(drm_new_conn_state, 9338 &new_stream->hdr_static_metadata); 9339 if (ret) 9340 goto fail; 9341 9342 /* 9343 * If we already removed the old stream from the context 9344 * (and set the new stream to NULL) then we can't reuse 9345 * the old stream even if the stream and scaling are unchanged. 9346 * We'll hit the BUG_ON and black screen. 9347 * 9348 * TODO: Refactor this function to allow this check to work 9349 * in all conditions. 9350 */ 9351 if (dm_new_crtc_state->stream && 9352 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) 9353 goto skip_modeset; 9354 9355 if (dm_new_crtc_state->stream && 9356 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 9357 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 9358 new_crtc_state->mode_changed = false; 9359 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", 9360 new_crtc_state->mode_changed); 9361 } 9362 } 9363 9364 /* mode_changed flag may get updated above, need to check again */ 9365 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9366 goto skip_modeset; 9367 9368 drm_dbg_state(state->dev, 9369 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 9370 acrtc->crtc_id, 9371 new_crtc_state->enable, 9372 new_crtc_state->active, 9373 new_crtc_state->planes_changed, 9374 new_crtc_state->mode_changed, 9375 new_crtc_state->active_changed, 9376 new_crtc_state->connectors_changed); 9377 9378 /* Remove stream for any changed/disabled CRTC */ 9379 if (!enable) { 9380 9381 if (!dm_old_crtc_state->stream) 9382 goto skip_modeset; 9383 9384 /* Unset freesync video if it was active before */ 9385 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { 9386 dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; 9387 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; 9388 } 9389 9390 /* Now check if we should set freesync video mode */ 9391 if (dm_new_crtc_state->stream && 9392 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 9393 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && 9394 is_timing_unchanged_for_freesync(new_crtc_state, 9395 old_crtc_state)) { 9396 new_crtc_state->mode_changed = false; 9397 DRM_DEBUG_DRIVER( 9398 "Mode change not required for front porch change, setting mode_changed to %d", 9399 new_crtc_state->mode_changed); 9400 9401 set_freesync_fixed_config(dm_new_crtc_state); 9402 9403 goto skip_modeset; 9404 } else if (aconnector && 9405 is_freesync_video_mode(&new_crtc_state->mode, 9406 aconnector)) { 9407 struct drm_display_mode *high_mode; 9408 9409 high_mode = get_highest_refresh_rate_mode(aconnector, false); 9410 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) 9411 set_freesync_fixed_config(dm_new_crtc_state); 9412 } 9413 9414 ret = dm_atomic_get_state(state, &dm_state); 9415 if (ret) 9416 goto fail; 9417 9418 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 9419 crtc->base.id); 9420 9421 /* i.e. reset mode */ 9422 if (dc_remove_stream_from_ctx( 9423 dm->dc, 9424 dm_state->context, 9425 dm_old_crtc_state->stream) != DC_OK) { 9426 ret = -EINVAL; 9427 goto fail; 9428 } 9429 9430 dc_stream_release(dm_old_crtc_state->stream); 9431 dm_new_crtc_state->stream = NULL; 9432 9433 reset_freesync_config_for_crtc(dm_new_crtc_state); 9434 9435 *lock_and_validation_needed = true; 9436 9437 } else {/* Add stream for any updated/enabled CRTC */ 9438 /* 9439 * Quick fix to prevent NULL pointer on new_stream when 9440 * added MST connectors not found in existing crtc_state in the chained mode 9441 * TODO: need to dig out the root cause of that 9442 */ 9443 if (!aconnector) 9444 goto skip_modeset; 9445 9446 if (modereset_required(new_crtc_state)) 9447 goto skip_modeset; 9448 9449 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream, 9450 dm_old_crtc_state->stream)) { 9451 9452 WARN_ON(dm_new_crtc_state->stream); 9453 9454 ret = dm_atomic_get_state(state, &dm_state); 9455 if (ret) 9456 goto fail; 9457 9458 dm_new_crtc_state->stream = new_stream; 9459 9460 dc_stream_retain(new_stream); 9461 9462 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", 9463 crtc->base.id); 9464 9465 if (dc_add_stream_to_ctx( 9466 dm->dc, 9467 dm_state->context, 9468 dm_new_crtc_state->stream) != DC_OK) { 9469 ret = -EINVAL; 9470 goto fail; 9471 } 9472 9473 *lock_and_validation_needed = true; 9474 } 9475 } 9476 9477 skip_modeset: 9478 /* Release extra reference */ 9479 if (new_stream) 9480 dc_stream_release(new_stream); 9481 9482 /* 9483 * We want to do dc stream updates that do not require a 9484 * full modeset below. 9485 */ 9486 if (!(enable && aconnector && new_crtc_state->active)) 9487 return 0; 9488 /* 9489 * Given above conditions, the dc state cannot be NULL because: 9490 * 1. We're in the process of enabling CRTCs (just been added 9491 * to the dc context, or already is on the context) 9492 * 2. Has a valid connector attached, and 9493 * 3. Is currently active and enabled. 9494 * => The dc stream state currently exists. 9495 */ 9496 BUG_ON(dm_new_crtc_state->stream == NULL); 9497 9498 /* Scaling or underscan settings */ 9499 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || 9500 drm_atomic_crtc_needs_modeset(new_crtc_state)) 9501 update_stream_scaling_settings( 9502 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 9503 9504 /* ABM settings */ 9505 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 9506 9507 /* 9508 * Color management settings. We also update color properties 9509 * when a modeset is needed, to ensure it gets reprogrammed. 9510 */ 9511 if (dm_new_crtc_state->base.color_mgmt_changed || 9512 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 9513 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); 9514 if (ret) 9515 goto fail; 9516 } 9517 9518 /* Update Freesync settings. */ 9519 get_freesync_config_for_crtc(dm_new_crtc_state, 9520 dm_new_conn_state); 9521 9522 return ret; 9523 9524 fail: 9525 if (new_stream) 9526 dc_stream_release(new_stream); 9527 return ret; 9528 } 9529 9530 static bool should_reset_plane(struct drm_atomic_state *state, 9531 struct drm_plane *plane, 9532 struct drm_plane_state *old_plane_state, 9533 struct drm_plane_state *new_plane_state) 9534 { 9535 struct drm_plane *other; 9536 struct drm_plane_state *old_other_state, *new_other_state; 9537 struct drm_crtc_state *new_crtc_state; 9538 struct amdgpu_device *adev = drm_to_adev(plane->dev); 9539 int i; 9540 9541 /* 9542 * TODO: Remove this hack for all asics once it proves that the 9543 * fast updates works fine on DCN3.2+. 9544 */ 9545 if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset) 9546 return true; 9547 9548 /* Exit early if we know that we're adding or removing the plane. */ 9549 if (old_plane_state->crtc != new_plane_state->crtc) 9550 return true; 9551 9552 /* old crtc == new_crtc == NULL, plane not in context. */ 9553 if (!new_plane_state->crtc) 9554 return false; 9555 9556 new_crtc_state = 9557 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 9558 9559 if (!new_crtc_state) 9560 return true; 9561 9562 /* CRTC Degamma changes currently require us to recreate planes. */ 9563 if (new_crtc_state->color_mgmt_changed) 9564 return true; 9565 9566 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 9567 return true; 9568 9569 /* 9570 * If there are any new primary or overlay planes being added or 9571 * removed then the z-order can potentially change. To ensure 9572 * correct z-order and pipe acquisition the current DC architecture 9573 * requires us to remove and recreate all existing planes. 9574 * 9575 * TODO: Come up with a more elegant solution for this. 9576 */ 9577 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { 9578 struct amdgpu_framebuffer *old_afb, *new_afb; 9579 9580 if (other->type == DRM_PLANE_TYPE_CURSOR) 9581 continue; 9582 9583 if (old_other_state->crtc != new_plane_state->crtc && 9584 new_other_state->crtc != new_plane_state->crtc) 9585 continue; 9586 9587 if (old_other_state->crtc != new_other_state->crtc) 9588 return true; 9589 9590 /* Src/dst size and scaling updates. */ 9591 if (old_other_state->src_w != new_other_state->src_w || 9592 old_other_state->src_h != new_other_state->src_h || 9593 old_other_state->crtc_w != new_other_state->crtc_w || 9594 old_other_state->crtc_h != new_other_state->crtc_h) 9595 return true; 9596 9597 /* Rotation / mirroring updates. */ 9598 if (old_other_state->rotation != new_other_state->rotation) 9599 return true; 9600 9601 /* Blending updates. */ 9602 if (old_other_state->pixel_blend_mode != 9603 new_other_state->pixel_blend_mode) 9604 return true; 9605 9606 /* Alpha updates. */ 9607 if (old_other_state->alpha != new_other_state->alpha) 9608 return true; 9609 9610 /* Colorspace changes. */ 9611 if (old_other_state->color_range != new_other_state->color_range || 9612 old_other_state->color_encoding != new_other_state->color_encoding) 9613 return true; 9614 9615 /* Framebuffer checks fall at the end. */ 9616 if (!old_other_state->fb || !new_other_state->fb) 9617 continue; 9618 9619 /* Pixel format changes can require bandwidth updates. */ 9620 if (old_other_state->fb->format != new_other_state->fb->format) 9621 return true; 9622 9623 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; 9624 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; 9625 9626 /* Tiling and DCC changes also require bandwidth updates. */ 9627 if (old_afb->tiling_flags != new_afb->tiling_flags || 9628 old_afb->base.modifier != new_afb->base.modifier) 9629 return true; 9630 } 9631 9632 return false; 9633 } 9634 9635 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, 9636 struct drm_plane_state *new_plane_state, 9637 struct drm_framebuffer *fb) 9638 { 9639 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); 9640 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 9641 unsigned int pitch; 9642 bool linear; 9643 9644 if (fb->width > new_acrtc->max_cursor_width || 9645 fb->height > new_acrtc->max_cursor_height) { 9646 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n", 9647 new_plane_state->fb->width, 9648 new_plane_state->fb->height); 9649 return -EINVAL; 9650 } 9651 if (new_plane_state->src_w != fb->width << 16 || 9652 new_plane_state->src_h != fb->height << 16) { 9653 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 9654 return -EINVAL; 9655 } 9656 9657 /* Pitch in pixels */ 9658 pitch = fb->pitches[0] / fb->format->cpp[0]; 9659 9660 if (fb->width != pitch) { 9661 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d", 9662 fb->width, pitch); 9663 return -EINVAL; 9664 } 9665 9666 switch (pitch) { 9667 case 64: 9668 case 128: 9669 case 256: 9670 /* FB pitch is supported by cursor plane */ 9671 break; 9672 default: 9673 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch); 9674 return -EINVAL; 9675 } 9676 9677 /* Core DRM takes care of checking FB modifiers, so we only need to 9678 * check tiling flags when the FB doesn't have a modifier. 9679 */ 9680 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { 9681 if (adev->family < AMDGPU_FAMILY_AI) { 9682 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && 9683 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && 9684 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; 9685 } else { 9686 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; 9687 } 9688 if (!linear) { 9689 DRM_DEBUG_ATOMIC("Cursor FB not linear"); 9690 return -EINVAL; 9691 } 9692 } 9693 9694 return 0; 9695 } 9696 9697 static int dm_update_plane_state(struct dc *dc, 9698 struct drm_atomic_state *state, 9699 struct drm_plane *plane, 9700 struct drm_plane_state *old_plane_state, 9701 struct drm_plane_state *new_plane_state, 9702 bool enable, 9703 bool *lock_and_validation_needed, 9704 bool *is_top_most_overlay) 9705 { 9706 9707 struct dm_atomic_state *dm_state = NULL; 9708 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 9709 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 9710 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 9711 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 9712 struct amdgpu_crtc *new_acrtc; 9713 bool needs_reset; 9714 int ret = 0; 9715 9716 9717 new_plane_crtc = new_plane_state->crtc; 9718 old_plane_crtc = old_plane_state->crtc; 9719 dm_new_plane_state = to_dm_plane_state(new_plane_state); 9720 dm_old_plane_state = to_dm_plane_state(old_plane_state); 9721 9722 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 9723 if (!enable || !new_plane_crtc || 9724 drm_atomic_plane_disabling(plane->state, new_plane_state)) 9725 return 0; 9726 9727 new_acrtc = to_amdgpu_crtc(new_plane_crtc); 9728 9729 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { 9730 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 9731 return -EINVAL; 9732 } 9733 9734 if (new_plane_state->fb) { 9735 ret = dm_check_cursor_fb(new_acrtc, new_plane_state, 9736 new_plane_state->fb); 9737 if (ret) 9738 return ret; 9739 } 9740 9741 return 0; 9742 } 9743 9744 needs_reset = should_reset_plane(state, plane, old_plane_state, 9745 new_plane_state); 9746 9747 /* Remove any changed/removed planes */ 9748 if (!enable) { 9749 if (!needs_reset) 9750 return 0; 9751 9752 if (!old_plane_crtc) 9753 return 0; 9754 9755 old_crtc_state = drm_atomic_get_old_crtc_state( 9756 state, old_plane_crtc); 9757 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9758 9759 if (!dm_old_crtc_state->stream) 9760 return 0; 9761 9762 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 9763 plane->base.id, old_plane_crtc->base.id); 9764 9765 ret = dm_atomic_get_state(state, &dm_state); 9766 if (ret) 9767 return ret; 9768 9769 if (!dc_remove_plane_from_context( 9770 dc, 9771 dm_old_crtc_state->stream, 9772 dm_old_plane_state->dc_state, 9773 dm_state->context)) { 9774 9775 return -EINVAL; 9776 } 9777 9778 if (dm_old_plane_state->dc_state) 9779 dc_plane_state_release(dm_old_plane_state->dc_state); 9780 9781 dm_new_plane_state->dc_state = NULL; 9782 9783 *lock_and_validation_needed = true; 9784 9785 } else { /* Add new planes */ 9786 struct dc_plane_state *dc_new_plane_state; 9787 9788 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 9789 return 0; 9790 9791 if (!new_plane_crtc) 9792 return 0; 9793 9794 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 9795 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9796 9797 if (!dm_new_crtc_state->stream) 9798 return 0; 9799 9800 if (!needs_reset) 9801 return 0; 9802 9803 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); 9804 if (ret) 9805 return ret; 9806 9807 WARN_ON(dm_new_plane_state->dc_state); 9808 9809 dc_new_plane_state = dc_create_plane_state(dc); 9810 if (!dc_new_plane_state) 9811 return -ENOMEM; 9812 9813 /* Block top most plane from being a video plane */ 9814 if (plane->type == DRM_PLANE_TYPE_OVERLAY) { 9815 if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) 9816 return -EINVAL; 9817 9818 *is_top_most_overlay = false; 9819 } 9820 9821 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", 9822 plane->base.id, new_plane_crtc->base.id); 9823 9824 ret = fill_dc_plane_attributes( 9825 drm_to_adev(new_plane_crtc->dev), 9826 dc_new_plane_state, 9827 new_plane_state, 9828 new_crtc_state); 9829 if (ret) { 9830 dc_plane_state_release(dc_new_plane_state); 9831 return ret; 9832 } 9833 9834 ret = dm_atomic_get_state(state, &dm_state); 9835 if (ret) { 9836 dc_plane_state_release(dc_new_plane_state); 9837 return ret; 9838 } 9839 9840 /* 9841 * Any atomic check errors that occur after this will 9842 * not need a release. The plane state will be attached 9843 * to the stream, and therefore part of the atomic 9844 * state. It'll be released when the atomic state is 9845 * cleaned. 9846 */ 9847 if (!dc_add_plane_to_context( 9848 dc, 9849 dm_new_crtc_state->stream, 9850 dc_new_plane_state, 9851 dm_state->context)) { 9852 9853 dc_plane_state_release(dc_new_plane_state); 9854 return -EINVAL; 9855 } 9856 9857 dm_new_plane_state->dc_state = dc_new_plane_state; 9858 9859 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); 9860 9861 /* Tell DC to do a full surface update every time there 9862 * is a plane change. Inefficient, but works for now. 9863 */ 9864 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 9865 9866 *lock_and_validation_needed = true; 9867 } 9868 9869 9870 return ret; 9871 } 9872 9873 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, 9874 int *src_w, int *src_h) 9875 { 9876 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 9877 case DRM_MODE_ROTATE_90: 9878 case DRM_MODE_ROTATE_270: 9879 *src_w = plane_state->src_h >> 16; 9880 *src_h = plane_state->src_w >> 16; 9881 break; 9882 case DRM_MODE_ROTATE_0: 9883 case DRM_MODE_ROTATE_180: 9884 default: 9885 *src_w = plane_state->src_w >> 16; 9886 *src_h = plane_state->src_h >> 16; 9887 break; 9888 } 9889 } 9890 9891 static void 9892 dm_get_plane_scale(struct drm_plane_state *plane_state, 9893 int *out_plane_scale_w, int *out_plane_scale_h) 9894 { 9895 int plane_src_w, plane_src_h; 9896 9897 dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h); 9898 *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w; 9899 *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h; 9900 } 9901 9902 static int dm_check_crtc_cursor(struct drm_atomic_state *state, 9903 struct drm_crtc *crtc, 9904 struct drm_crtc_state *new_crtc_state) 9905 { 9906 struct drm_plane *cursor = crtc->cursor, *plane, *underlying; 9907 struct drm_plane_state *old_plane_state, *new_plane_state; 9908 struct drm_plane_state *new_cursor_state, *new_underlying_state; 9909 int i; 9910 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; 9911 bool any_relevant_change = false; 9912 9913 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a 9914 * cursor per pipe but it's going to inherit the scaling and 9915 * positioning from the underlying pipe. Check the cursor plane's 9916 * blending properties match the underlying planes'. 9917 */ 9918 9919 /* If no plane was enabled or changed scaling, no need to check again */ 9920 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 9921 int new_scale_w, new_scale_h, old_scale_w, old_scale_h; 9922 9923 if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc) 9924 continue; 9925 9926 if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) { 9927 any_relevant_change = true; 9928 break; 9929 } 9930 9931 if (new_plane_state->fb == old_plane_state->fb && 9932 new_plane_state->crtc_w == old_plane_state->crtc_w && 9933 new_plane_state->crtc_h == old_plane_state->crtc_h) 9934 continue; 9935 9936 dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h); 9937 dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); 9938 9939 if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { 9940 any_relevant_change = true; 9941 break; 9942 } 9943 } 9944 9945 if (!any_relevant_change) 9946 return 0; 9947 9948 new_cursor_state = drm_atomic_get_plane_state(state, cursor); 9949 if (IS_ERR(new_cursor_state)) 9950 return PTR_ERR(new_cursor_state); 9951 9952 if (!new_cursor_state->fb) 9953 return 0; 9954 9955 dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h); 9956 9957 /* Need to check all enabled planes, even if this commit doesn't change 9958 * their state 9959 */ 9960 i = drm_atomic_add_affected_planes(state, crtc); 9961 if (i) 9962 return i; 9963 9964 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { 9965 /* Narrow down to non-cursor planes on the same CRTC as the cursor */ 9966 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor) 9967 continue; 9968 9969 /* Ignore disabled planes */ 9970 if (!new_underlying_state->fb) 9971 continue; 9972 9973 dm_get_plane_scale(new_underlying_state, 9974 &underlying_scale_w, &underlying_scale_h); 9975 9976 if (cursor_scale_w != underlying_scale_w || 9977 cursor_scale_h != underlying_scale_h) { 9978 drm_dbg_atomic(crtc->dev, 9979 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n", 9980 cursor->base.id, cursor->name, underlying->base.id, underlying->name); 9981 return -EINVAL; 9982 } 9983 9984 /* If this plane covers the whole CRTC, no need to check planes underneath */ 9985 if (new_underlying_state->crtc_x <= 0 && 9986 new_underlying_state->crtc_y <= 0 && 9987 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay && 9988 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay) 9989 break; 9990 } 9991 9992 return 0; 9993 } 9994 9995 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) 9996 { 9997 struct drm_connector *connector; 9998 struct drm_connector_state *conn_state, *old_conn_state; 9999 struct amdgpu_dm_connector *aconnector = NULL; 10000 int i; 10001 10002 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { 10003 if (!conn_state->crtc) 10004 conn_state = old_conn_state; 10005 10006 if (conn_state->crtc != crtc) 10007 continue; 10008 10009 aconnector = to_amdgpu_dm_connector(connector); 10010 if (!aconnector->mst_output_port || !aconnector->mst_root) 10011 aconnector = NULL; 10012 else 10013 break; 10014 } 10015 10016 if (!aconnector) 10017 return 0; 10018 10019 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); 10020 } 10021 10022 /** 10023 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 10024 * 10025 * @dev: The DRM device 10026 * @state: The atomic state to commit 10027 * 10028 * Validate that the given atomic state is programmable by DC into hardware. 10029 * This involves constructing a &struct dc_state reflecting the new hardware 10030 * state we wish to commit, then querying DC to see if it is programmable. It's 10031 * important not to modify the existing DC state. Otherwise, atomic_check 10032 * may unexpectedly commit hardware changes. 10033 * 10034 * When validating the DC state, it's important that the right locks are 10035 * acquired. For full updates case which removes/adds/updates streams on one 10036 * CRTC while flipping on another CRTC, acquiring global lock will guarantee 10037 * that any such full update commit will wait for completion of any outstanding 10038 * flip using DRMs synchronization events. 10039 * 10040 * Note that DM adds the affected connectors for all CRTCs in state, when that 10041 * might not seem necessary. This is because DC stream creation requires the 10042 * DC sink, which is tied to the DRM connector state. Cleaning this up should 10043 * be possible but non-trivial - a possible TODO item. 10044 * 10045 * Return: -Error code if validation failed. 10046 */ 10047 static int amdgpu_dm_atomic_check(struct drm_device *dev, 10048 struct drm_atomic_state *state) 10049 { 10050 struct amdgpu_device *adev = drm_to_adev(dev); 10051 struct dm_atomic_state *dm_state = NULL; 10052 struct dc *dc = adev->dm.dc; 10053 struct drm_connector *connector; 10054 struct drm_connector_state *old_con_state, *new_con_state; 10055 struct drm_crtc *crtc; 10056 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10057 struct drm_plane *plane; 10058 struct drm_plane_state *old_plane_state, *new_plane_state; 10059 enum dc_status status; 10060 int ret, i; 10061 bool lock_and_validation_needed = false; 10062 bool is_top_most_overlay = true; 10063 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10064 struct drm_dp_mst_topology_mgr *mgr; 10065 struct drm_dp_mst_topology_state *mst_state; 10066 struct dsc_mst_fairness_vars vars[MAX_PIPES]; 10067 10068 trace_amdgpu_dm_atomic_check_begin(state); 10069 10070 ret = drm_atomic_helper_check_modeset(dev, state); 10071 if (ret) { 10072 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n"); 10073 goto fail; 10074 } 10075 10076 /* Check connector changes */ 10077 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10078 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 10079 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10080 10081 /* Skip connectors that are disabled or part of modeset already. */ 10082 if (!new_con_state->crtc) 10083 continue; 10084 10085 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); 10086 if (IS_ERR(new_crtc_state)) { 10087 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n"); 10088 ret = PTR_ERR(new_crtc_state); 10089 goto fail; 10090 } 10091 10092 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || 10093 dm_old_con_state->scaling != dm_new_con_state->scaling) 10094 new_crtc_state->connectors_changed = true; 10095 } 10096 10097 if (dc_resource_is_dsc_encoding_supported(dc)) { 10098 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10099 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 10100 ret = add_affected_mst_dsc_crtcs(state, crtc); 10101 if (ret) { 10102 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n"); 10103 goto fail; 10104 } 10105 } 10106 } 10107 } 10108 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10109 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10110 10111 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 10112 !new_crtc_state->color_mgmt_changed && 10113 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && 10114 dm_old_crtc_state->dsc_force_changed == false) 10115 continue; 10116 10117 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); 10118 if (ret) { 10119 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n"); 10120 goto fail; 10121 } 10122 10123 if (!new_crtc_state->enable) 10124 continue; 10125 10126 ret = drm_atomic_add_affected_connectors(state, crtc); 10127 if (ret) { 10128 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n"); 10129 goto fail; 10130 } 10131 10132 ret = drm_atomic_add_affected_planes(state, crtc); 10133 if (ret) { 10134 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n"); 10135 goto fail; 10136 } 10137 10138 if (dm_old_crtc_state->dsc_force_changed) 10139 new_crtc_state->mode_changed = true; 10140 } 10141 10142 /* 10143 * Add all primary and overlay planes on the CRTC to the state 10144 * whenever a plane is enabled to maintain correct z-ordering 10145 * and to enable fast surface updates. 10146 */ 10147 drm_for_each_crtc(crtc, dev) { 10148 bool modified = false; 10149 10150 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 10151 if (plane->type == DRM_PLANE_TYPE_CURSOR) 10152 continue; 10153 10154 if (new_plane_state->crtc == crtc || 10155 old_plane_state->crtc == crtc) { 10156 modified = true; 10157 break; 10158 } 10159 } 10160 10161 if (!modified) 10162 continue; 10163 10164 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 10165 if (plane->type == DRM_PLANE_TYPE_CURSOR) 10166 continue; 10167 10168 new_plane_state = 10169 drm_atomic_get_plane_state(state, plane); 10170 10171 if (IS_ERR(new_plane_state)) { 10172 ret = PTR_ERR(new_plane_state); 10173 DRM_DEBUG_DRIVER("new_plane_state is BAD\n"); 10174 goto fail; 10175 } 10176 } 10177 } 10178 10179 /* 10180 * DC consults the zpos (layer_index in DC terminology) to determine the 10181 * hw plane on which to enable the hw cursor (see 10182 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in 10183 * atomic state, so call drm helper to normalize zpos. 10184 */ 10185 ret = drm_atomic_normalize_zpos(dev, state); 10186 if (ret) { 10187 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); 10188 goto fail; 10189 } 10190 10191 /* Remove exiting planes if they are modified */ 10192 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 10193 if (old_plane_state->fb && new_plane_state->fb && 10194 get_mem_type(old_plane_state->fb) != 10195 get_mem_type(new_plane_state->fb)) 10196 lock_and_validation_needed = true; 10197 10198 ret = dm_update_plane_state(dc, state, plane, 10199 old_plane_state, 10200 new_plane_state, 10201 false, 10202 &lock_and_validation_needed, 10203 &is_top_most_overlay); 10204 if (ret) { 10205 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); 10206 goto fail; 10207 } 10208 } 10209 10210 /* Disable all crtcs which require disable */ 10211 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10212 ret = dm_update_crtc_state(&adev->dm, state, crtc, 10213 old_crtc_state, 10214 new_crtc_state, 10215 false, 10216 &lock_and_validation_needed); 10217 if (ret) { 10218 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n"); 10219 goto fail; 10220 } 10221 } 10222 10223 /* Enable all crtcs which require enable */ 10224 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10225 ret = dm_update_crtc_state(&adev->dm, state, crtc, 10226 old_crtc_state, 10227 new_crtc_state, 10228 true, 10229 &lock_and_validation_needed); 10230 if (ret) { 10231 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n"); 10232 goto fail; 10233 } 10234 } 10235 10236 /* Add new/modified planes */ 10237 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 10238 ret = dm_update_plane_state(dc, state, plane, 10239 old_plane_state, 10240 new_plane_state, 10241 true, 10242 &lock_and_validation_needed, 10243 &is_top_most_overlay); 10244 if (ret) { 10245 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); 10246 goto fail; 10247 } 10248 } 10249 10250 if (dc_resource_is_dsc_encoding_supported(dc)) { 10251 ret = pre_validate_dsc(state, &dm_state, vars); 10252 if (ret != 0) 10253 goto fail; 10254 } 10255 10256 /* Run this here since we want to validate the streams we created */ 10257 ret = drm_atomic_helper_check_planes(dev, state); 10258 if (ret) { 10259 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n"); 10260 goto fail; 10261 } 10262 10263 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10264 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10265 if (dm_new_crtc_state->mpo_requested) 10266 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc); 10267 } 10268 10269 /* Check cursor planes scaling */ 10270 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10271 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); 10272 if (ret) { 10273 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n"); 10274 goto fail; 10275 } 10276 } 10277 10278 if (state->legacy_cursor_update) { 10279 /* 10280 * This is a fast cursor update coming from the plane update 10281 * helper, check if it can be done asynchronously for better 10282 * performance. 10283 */ 10284 state->async_update = 10285 !drm_atomic_helper_async_check(dev, state); 10286 10287 /* 10288 * Skip the remaining global validation if this is an async 10289 * update. Cursor updates can be done without affecting 10290 * state or bandwidth calcs and this avoids the performance 10291 * penalty of locking the private state object and 10292 * allocating a new dc_state. 10293 */ 10294 if (state->async_update) 10295 return 0; 10296 } 10297 10298 /* Check scaling and underscan changes*/ 10299 /* TODO Removed scaling changes validation due to inability to commit 10300 * new stream into context w\o causing full reset. Need to 10301 * decide how to handle. 10302 */ 10303 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10304 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 10305 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10306 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10307 10308 /* Skip any modesets/resets */ 10309 if (!acrtc || drm_atomic_crtc_needs_modeset( 10310 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 10311 continue; 10312 10313 /* Skip any thing not scale or underscan changes */ 10314 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 10315 continue; 10316 10317 lock_and_validation_needed = true; 10318 } 10319 10320 /* set the slot info for each mst_state based on the link encoding format */ 10321 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 10322 struct amdgpu_dm_connector *aconnector; 10323 struct drm_connector *connector; 10324 struct drm_connector_list_iter iter; 10325 u8 link_coding_cap; 10326 10327 drm_connector_list_iter_begin(dev, &iter); 10328 drm_for_each_connector_iter(connector, &iter) { 10329 if (connector->index == mst_state->mgr->conn_base_id) { 10330 aconnector = to_amdgpu_dm_connector(connector); 10331 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 10332 drm_dp_mst_update_slots(mst_state, link_coding_cap); 10333 10334 break; 10335 } 10336 } 10337 drm_connector_list_iter_end(&iter); 10338 } 10339 10340 /** 10341 * Streams and planes are reset when there are changes that affect 10342 * bandwidth. Anything that affects bandwidth needs to go through 10343 * DC global validation to ensure that the configuration can be applied 10344 * to hardware. 10345 * 10346 * We have to currently stall out here in atomic_check for outstanding 10347 * commits to finish in this case because our IRQ handlers reference 10348 * DRM state directly - we can end up disabling interrupts too early 10349 * if we don't. 10350 * 10351 * TODO: Remove this stall and drop DM state private objects. 10352 */ 10353 if (lock_and_validation_needed) { 10354 ret = dm_atomic_get_state(state, &dm_state); 10355 if (ret) { 10356 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n"); 10357 goto fail; 10358 } 10359 10360 ret = do_aquire_global_lock(dev, state); 10361 if (ret) { 10362 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n"); 10363 goto fail; 10364 } 10365 10366 if (dc_resource_is_dsc_encoding_supported(dc)) { 10367 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); 10368 if (ret) { 10369 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); 10370 ret = -EINVAL; 10371 goto fail; 10372 } 10373 } 10374 10375 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 10376 if (ret) { 10377 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n"); 10378 goto fail; 10379 } 10380 10381 /* 10382 * Perform validation of MST topology in the state: 10383 * We need to perform MST atomic check before calling 10384 * dc_validate_global_state(), or there is a chance 10385 * to get stuck in an infinite loop and hang eventually. 10386 */ 10387 ret = drm_dp_mst_atomic_check(state); 10388 if (ret) { 10389 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n"); 10390 goto fail; 10391 } 10392 status = dc_validate_global_state(dc, dm_state->context, true); 10393 if (status != DC_OK) { 10394 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)", 10395 dc_status_to_str(status), status); 10396 ret = -EINVAL; 10397 goto fail; 10398 } 10399 } else { 10400 /* 10401 * The commit is a fast update. Fast updates shouldn't change 10402 * the DC context, affect global validation, and can have their 10403 * commit work done in parallel with other commits not touching 10404 * the same resource. If we have a new DC context as part of 10405 * the DM atomic state from validation we need to free it and 10406 * retain the existing one instead. 10407 * 10408 * Furthermore, since the DM atomic state only contains the DC 10409 * context and can safely be annulled, we can free the state 10410 * and clear the associated private object now to free 10411 * some memory and avoid a possible use-after-free later. 10412 */ 10413 10414 for (i = 0; i < state->num_private_objs; i++) { 10415 struct drm_private_obj *obj = state->private_objs[i].ptr; 10416 10417 if (obj->funcs == adev->dm.atomic_obj.funcs) { 10418 int j = state->num_private_objs-1; 10419 10420 dm_atomic_destroy_state(obj, 10421 state->private_objs[i].state); 10422 10423 /* If i is not at the end of the array then the 10424 * last element needs to be moved to where i was 10425 * before the array can safely be truncated. 10426 */ 10427 if (i != j) 10428 state->private_objs[i] = 10429 state->private_objs[j]; 10430 10431 state->private_objs[j].ptr = NULL; 10432 state->private_objs[j].state = NULL; 10433 state->private_objs[j].old_state = NULL; 10434 state->private_objs[j].new_state = NULL; 10435 10436 state->num_private_objs = j; 10437 break; 10438 } 10439 } 10440 } 10441 10442 /* Store the overall update type for use later in atomic check. */ 10443 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10444 struct dm_crtc_state *dm_new_crtc_state = 10445 to_dm_crtc_state(new_crtc_state); 10446 10447 /* 10448 * Only allow async flips for fast updates that don't change 10449 * the FB pitch, the DCC state, rotation, etc. 10450 */ 10451 if (new_crtc_state->async_flip && lock_and_validation_needed) { 10452 drm_dbg_atomic(crtc->dev, 10453 "[CRTC:%d:%s] async flips are only supported for fast updates\n", 10454 crtc->base.id, crtc->name); 10455 ret = -EINVAL; 10456 goto fail; 10457 } 10458 10459 dm_new_crtc_state->update_type = lock_and_validation_needed ? 10460 UPDATE_TYPE_FULL : UPDATE_TYPE_FAST; 10461 } 10462 10463 /* Must be success */ 10464 WARN_ON(ret); 10465 10466 trace_amdgpu_dm_atomic_check_finish(state, ret); 10467 10468 return ret; 10469 10470 fail: 10471 if (ret == -EDEADLK) 10472 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n"); 10473 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 10474 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); 10475 else 10476 DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret); 10477 10478 trace_amdgpu_dm_atomic_check_finish(state, ret); 10479 10480 return ret; 10481 } 10482 10483 static bool is_dp_capable_without_timing_msa(struct dc *dc, 10484 struct amdgpu_dm_connector *amdgpu_dm_connector) 10485 { 10486 u8 dpcd_data; 10487 bool capable = false; 10488 10489 if (amdgpu_dm_connector->dc_link && 10490 dm_helpers_dp_read_dpcd( 10491 NULL, 10492 amdgpu_dm_connector->dc_link, 10493 DP_DOWN_STREAM_PORT_COUNT, 10494 &dpcd_data, 10495 sizeof(dpcd_data))) { 10496 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; 10497 } 10498 10499 return capable; 10500 } 10501 10502 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, 10503 unsigned int offset, 10504 unsigned int total_length, 10505 u8 *data, 10506 unsigned int length, 10507 struct amdgpu_hdmi_vsdb_info *vsdb) 10508 { 10509 bool res; 10510 union dmub_rb_cmd cmd; 10511 struct dmub_cmd_send_edid_cea *input; 10512 struct dmub_cmd_edid_cea_output *output; 10513 10514 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) 10515 return false; 10516 10517 memset(&cmd, 0, sizeof(cmd)); 10518 10519 input = &cmd.edid_cea.data.input; 10520 10521 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; 10522 cmd.edid_cea.header.sub_type = 0; 10523 cmd.edid_cea.header.payload_bytes = 10524 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); 10525 input->offset = offset; 10526 input->length = length; 10527 input->cea_total_length = total_length; 10528 memcpy(input->payload, data, length); 10529 10530 res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); 10531 if (!res) { 10532 DRM_ERROR("EDID CEA parser failed\n"); 10533 return false; 10534 } 10535 10536 output = &cmd.edid_cea.data.output; 10537 10538 if (output->type == DMUB_CMD__EDID_CEA_ACK) { 10539 if (!output->ack.success) { 10540 DRM_ERROR("EDID CEA ack failed at offset %d\n", 10541 output->ack.offset); 10542 } 10543 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { 10544 if (!output->amd_vsdb.vsdb_found) 10545 return false; 10546 10547 vsdb->freesync_supported = output->amd_vsdb.freesync_supported; 10548 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; 10549 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; 10550 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; 10551 } else { 10552 DRM_WARN("Unknown EDID CEA parser results\n"); 10553 return false; 10554 } 10555 10556 return true; 10557 } 10558 10559 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, 10560 u8 *edid_ext, int len, 10561 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10562 { 10563 int i; 10564 10565 /* send extension block to DMCU for parsing */ 10566 for (i = 0; i < len; i += 8) { 10567 bool res; 10568 int offset; 10569 10570 /* send 8 bytes a time */ 10571 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) 10572 return false; 10573 10574 if (i+8 == len) { 10575 /* EDID block sent completed, expect result */ 10576 int version, min_rate, max_rate; 10577 10578 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); 10579 if (res) { 10580 /* amd vsdb found */ 10581 vsdb_info->freesync_supported = 1; 10582 vsdb_info->amd_vsdb_version = version; 10583 vsdb_info->min_refresh_rate_hz = min_rate; 10584 vsdb_info->max_refresh_rate_hz = max_rate; 10585 return true; 10586 } 10587 /* not amd vsdb */ 10588 return false; 10589 } 10590 10591 /* check for ack*/ 10592 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); 10593 if (!res) 10594 return false; 10595 } 10596 10597 return false; 10598 } 10599 10600 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, 10601 u8 *edid_ext, int len, 10602 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10603 { 10604 int i; 10605 10606 /* send extension block to DMCU for parsing */ 10607 for (i = 0; i < len; i += 8) { 10608 /* send 8 bytes a time */ 10609 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) 10610 return false; 10611 } 10612 10613 return vsdb_info->freesync_supported; 10614 } 10615 10616 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, 10617 u8 *edid_ext, int len, 10618 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10619 { 10620 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 10621 bool ret; 10622 10623 mutex_lock(&adev->dm.dc_lock); 10624 if (adev->dm.dmub_srv) 10625 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); 10626 else 10627 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); 10628 mutex_unlock(&adev->dm.dc_lock); 10629 return ret; 10630 } 10631 10632 static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, 10633 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 10634 { 10635 u8 *edid_ext = NULL; 10636 int i; 10637 int j = 0; 10638 10639 if (edid == NULL || edid->extensions == 0) 10640 return -ENODEV; 10641 10642 /* Find DisplayID extension */ 10643 for (i = 0; i < edid->extensions; i++) { 10644 edid_ext = (void *)(edid + (i + 1)); 10645 if (edid_ext[0] == DISPLAYID_EXT) 10646 break; 10647 } 10648 10649 while (j < EDID_LENGTH) { 10650 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; 10651 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); 10652 10653 if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID && 10654 amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) { 10655 vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false; 10656 vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3; 10657 DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode); 10658 10659 return true; 10660 } 10661 j++; 10662 } 10663 10664 return false; 10665 } 10666 10667 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, 10668 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 10669 { 10670 u8 *edid_ext = NULL; 10671 int i; 10672 bool valid_vsdb_found = false; 10673 10674 /*----- drm_find_cea_extension() -----*/ 10675 /* No EDID or EDID extensions */ 10676 if (edid == NULL || edid->extensions == 0) 10677 return -ENODEV; 10678 10679 /* Find CEA extension */ 10680 for (i = 0; i < edid->extensions; i++) { 10681 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); 10682 if (edid_ext[0] == CEA_EXT) 10683 break; 10684 } 10685 10686 if (i == edid->extensions) 10687 return -ENODEV; 10688 10689 /*----- cea_db_offsets() -----*/ 10690 if (edid_ext[0] != CEA_EXT) 10691 return -ENODEV; 10692 10693 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); 10694 10695 return valid_vsdb_found ? i : -ENODEV; 10696 } 10697 10698 /** 10699 * amdgpu_dm_update_freesync_caps - Update Freesync capabilities 10700 * 10701 * @connector: Connector to query. 10702 * @edid: EDID from monitor 10703 * 10704 * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep 10705 * track of some of the display information in the internal data struct used by 10706 * amdgpu_dm. This function checks which type of connector we need to set the 10707 * FreeSync parameters. 10708 */ 10709 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 10710 struct edid *edid) 10711 { 10712 int i = 0; 10713 struct detailed_timing *timing; 10714 struct detailed_non_pixel *data; 10715 struct detailed_data_monitor_range *range; 10716 struct amdgpu_dm_connector *amdgpu_dm_connector = 10717 to_amdgpu_dm_connector(connector); 10718 struct dm_connector_state *dm_con_state = NULL; 10719 struct dc_sink *sink; 10720 10721 struct drm_device *dev = connector->dev; 10722 struct amdgpu_device *adev = drm_to_adev(dev); 10723 struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; 10724 bool freesync_capable = false; 10725 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; 10726 10727 if (!connector->state) { 10728 DRM_ERROR("%s - Connector has no state", __func__); 10729 goto update; 10730 } 10731 10732 sink = amdgpu_dm_connector->dc_sink ? 10733 amdgpu_dm_connector->dc_sink : 10734 amdgpu_dm_connector->dc_em_sink; 10735 10736 if (!edid || !sink) { 10737 dm_con_state = to_dm_connector_state(connector->state); 10738 10739 amdgpu_dm_connector->min_vfreq = 0; 10740 amdgpu_dm_connector->max_vfreq = 0; 10741 amdgpu_dm_connector->pixel_clock_mhz = 0; 10742 connector->display_info.monitor_range.min_vfreq = 0; 10743 connector->display_info.monitor_range.max_vfreq = 0; 10744 freesync_capable = false; 10745 10746 goto update; 10747 } 10748 10749 dm_con_state = to_dm_connector_state(connector->state); 10750 10751 if (!adev->dm.freesync_module) 10752 goto update; 10753 10754 if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 10755 sink->sink_signal == SIGNAL_TYPE_EDP)) { 10756 bool edid_check_required = false; 10757 10758 if (is_dp_capable_without_timing_msa(adev->dm.dc, 10759 amdgpu_dm_connector)) { 10760 if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) { 10761 freesync_capable = true; 10762 amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; 10763 amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; 10764 } else { 10765 edid_check_required = edid->version > 1 || 10766 (edid->version == 1 && 10767 edid->revision > 1); 10768 } 10769 } 10770 10771 if (edid_check_required) { 10772 for (i = 0; i < 4; i++) { 10773 10774 timing = &edid->detailed_timings[i]; 10775 data = &timing->data.other_data; 10776 range = &data->data.range; 10777 /* 10778 * Check if monitor has continuous frequency mode 10779 */ 10780 if (data->type != EDID_DETAIL_MONITOR_RANGE) 10781 continue; 10782 /* 10783 * Check for flag range limits only. If flag == 1 then 10784 * no additional timing information provided. 10785 * Default GTF, GTF Secondary curve and CVT are not 10786 * supported 10787 */ 10788 if (range->flags != 1) 10789 continue; 10790 10791 connector->display_info.monitor_range.min_vfreq = range->min_vfreq; 10792 connector->display_info.monitor_range.max_vfreq = range->max_vfreq; 10793 10794 if (edid->revision >= 4) { 10795 if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ) 10796 connector->display_info.monitor_range.min_vfreq += 255; 10797 if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ) 10798 connector->display_info.monitor_range.max_vfreq += 255; 10799 } 10800 10801 amdgpu_dm_connector->min_vfreq = 10802 connector->display_info.monitor_range.min_vfreq; 10803 amdgpu_dm_connector->max_vfreq = 10804 connector->display_info.monitor_range.max_vfreq; 10805 amdgpu_dm_connector->pixel_clock_mhz = 10806 range->pixel_clock_mhz * 10; 10807 10808 break; 10809 } 10810 10811 if (amdgpu_dm_connector->max_vfreq - 10812 amdgpu_dm_connector->min_vfreq > 10) { 10813 10814 freesync_capable = true; 10815 } 10816 } 10817 parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 10818 10819 if (vsdb_info.replay_mode) { 10820 amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode; 10821 amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version; 10822 amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; 10823 } 10824 10825 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { 10826 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 10827 if (i >= 0 && vsdb_info.freesync_supported) { 10828 timing = &edid->detailed_timings[i]; 10829 data = &timing->data.other_data; 10830 10831 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 10832 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 10833 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 10834 freesync_capable = true; 10835 10836 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 10837 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 10838 } 10839 } 10840 10841 as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); 10842 10843 if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { 10844 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 10845 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { 10846 10847 amdgpu_dm_connector->pack_sdp_v1_3 = true; 10848 amdgpu_dm_connector->as_type = as_type; 10849 amdgpu_dm_connector->vsdb_info = vsdb_info; 10850 10851 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 10852 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 10853 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 10854 freesync_capable = true; 10855 10856 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 10857 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 10858 } 10859 } 10860 10861 update: 10862 if (dm_con_state) 10863 dm_con_state->freesync_capable = freesync_capable; 10864 10865 if (connector->vrr_capable_property) 10866 drm_connector_set_vrr_capable_property(connector, 10867 freesync_capable); 10868 } 10869 10870 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) 10871 { 10872 struct amdgpu_device *adev = drm_to_adev(dev); 10873 struct dc *dc = adev->dm.dc; 10874 int i; 10875 10876 mutex_lock(&adev->dm.dc_lock); 10877 if (dc->current_state) { 10878 for (i = 0; i < dc->current_state->stream_count; ++i) 10879 dc->current_state->streams[i] 10880 ->triggered_crtc_reset.enabled = 10881 adev->dm.force_timing_sync; 10882 10883 dm_enable_per_frame_crtc_master_sync(dc->current_state); 10884 dc_trigger_sync(dc, dc->current_state); 10885 } 10886 mutex_unlock(&adev->dm.dc_lock); 10887 } 10888 10889 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 10890 u32 value, const char *func_name) 10891 { 10892 #ifdef DM_CHECK_ADDR_0 10893 if (address == 0) { 10894 DC_ERR("invalid register write. address = 0"); 10895 return; 10896 } 10897 #endif 10898 cgs_write_register(ctx->cgs_device, address, value); 10899 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); 10900 } 10901 10902 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, 10903 const char *func_name) 10904 { 10905 u32 value; 10906 #ifdef DM_CHECK_ADDR_0 10907 if (address == 0) { 10908 DC_ERR("invalid register read; address = 0\n"); 10909 return 0; 10910 } 10911 #endif 10912 10913 if (ctx->dmub_srv && 10914 ctx->dmub_srv->reg_helper_offload.gather_in_progress && 10915 !ctx->dmub_srv->reg_helper_offload.should_burst_write) { 10916 ASSERT(false); 10917 return 0; 10918 } 10919 10920 value = cgs_read_register(ctx->cgs_device, address); 10921 10922 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); 10923 10924 return value; 10925 } 10926 10927 int amdgpu_dm_process_dmub_aux_transfer_sync( 10928 struct dc_context *ctx, 10929 unsigned int link_index, 10930 struct aux_payload *payload, 10931 enum aux_return_code_type *operation_result) 10932 { 10933 struct amdgpu_device *adev = ctx->driver_context; 10934 struct dmub_notification *p_notify = adev->dm.dmub_notify; 10935 int ret = -1; 10936 10937 mutex_lock(&adev->dm.dpia_aux_lock); 10938 if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) { 10939 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; 10940 goto out; 10941 } 10942 10943 if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 10944 DRM_ERROR("wait_for_completion_timeout timeout!"); 10945 *operation_result = AUX_RET_ERROR_TIMEOUT; 10946 goto out; 10947 } 10948 10949 if (p_notify->result != AUX_RET_SUCCESS) { 10950 /* 10951 * Transient states before tunneling is enabled could 10952 * lead to this error. We can ignore this for now. 10953 */ 10954 if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { 10955 DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", 10956 payload->address, payload->length, 10957 p_notify->result); 10958 } 10959 *operation_result = AUX_RET_ERROR_INVALID_REPLY; 10960 goto out; 10961 } 10962 10963 10964 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; 10965 if (!payload->write && p_notify->aux_reply.length && 10966 (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { 10967 10968 if (payload->length != p_notify->aux_reply.length) { 10969 DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", 10970 p_notify->aux_reply.length, 10971 payload->address, payload->length); 10972 *operation_result = AUX_RET_ERROR_INVALID_REPLY; 10973 goto out; 10974 } 10975 10976 memcpy(payload->data, p_notify->aux_reply.data, 10977 p_notify->aux_reply.length); 10978 } 10979 10980 /* success */ 10981 ret = p_notify->aux_reply.length; 10982 *operation_result = p_notify->result; 10983 out: 10984 reinit_completion(&adev->dm.dmub_aux_transfer_done); 10985 mutex_unlock(&adev->dm.dpia_aux_lock); 10986 return ret; 10987 } 10988 10989 int amdgpu_dm_process_dmub_set_config_sync( 10990 struct dc_context *ctx, 10991 unsigned int link_index, 10992 struct set_config_cmd_payload *payload, 10993 enum set_config_status *operation_result) 10994 { 10995 struct amdgpu_device *adev = ctx->driver_context; 10996 bool is_cmd_complete; 10997 int ret; 10998 10999 mutex_lock(&adev->dm.dpia_aux_lock); 11000 is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc, 11001 link_index, payload, adev->dm.dmub_notify); 11002 11003 if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 11004 ret = 0; 11005 *operation_result = adev->dm.dmub_notify->sc_status; 11006 } else { 11007 DRM_ERROR("wait_for_completion_timeout timeout!"); 11008 ret = -1; 11009 *operation_result = SET_CONFIG_UNKNOWN_ERROR; 11010 } 11011 11012 if (!is_cmd_complete) 11013 reinit_completion(&adev->dm.dmub_aux_transfer_done); 11014 mutex_unlock(&adev->dm.dpia_aux_lock); 11015 return ret; 11016 } 11017 11018 /* 11019 * Check whether seamless boot is supported. 11020 * 11021 * So far we only support seamless boot on CHIP_VANGOGH. 11022 * If everything goes well, we may consider expanding 11023 * seamless boot to other ASICs. 11024 */ 11025 bool check_seamless_boot_capability(struct amdgpu_device *adev) 11026 { 11027 switch (adev->ip_versions[DCE_HWIP][0]) { 11028 case IP_VERSION(3, 0, 1): 11029 if (!adev->mman.keep_stolen_vga_memory) 11030 return true; 11031 break; 11032 default: 11033 break; 11034 } 11035 11036 return false; 11037 } 11038 11039 bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 11040 { 11041 return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); 11042 } 11043 11044 bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 11045 { 11046 return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); 11047 } 11048