1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 /* The caprices of the preprocessor require that this be declared right here */ 27 #define CREATE_TRACE_POINTS 28 29 #include "dm_services_types.h" 30 #include "dc.h" 31 #include "link_enc_cfg.h" 32 #include "dc/inc/core_types.h" 33 #include "dal_asic_id.h" 34 #include "dmub/dmub_srv.h" 35 #include "dc/inc/hw/dmcu.h" 36 #include "dc/inc/hw/abm.h" 37 #include "dc/dc_dmub_srv.h" 38 #include "dc/dc_edid_parser.h" 39 #include "dc/dc_stat.h" 40 #include "amdgpu_dm_trace.h" 41 #include "dpcd_defs.h" 42 #include "link/protocols/link_dpcd.h" 43 #include "link_service_types.h" 44 #include "link/protocols/link_dp_capability.h" 45 #include "link/protocols/link_ddc.h" 46 47 #include "vid.h" 48 #include "amdgpu.h" 49 #include "amdgpu_display.h" 50 #include "amdgpu_ucode.h" 51 #include "atom.h" 52 #include "amdgpu_dm.h" 53 #include "amdgpu_dm_plane.h" 54 #include "amdgpu_dm_crtc.h" 55 #include "amdgpu_dm_hdcp.h" 56 #include <drm/display/drm_hdcp_helper.h> 57 #include "amdgpu_pm.h" 58 #include "amdgpu_atombios.h" 59 60 #include "amd_shared.h" 61 #include "amdgpu_dm_irq.h" 62 #include "dm_helpers.h" 63 #include "amdgpu_dm_mst_types.h" 64 #if defined(CONFIG_DEBUG_FS) 65 #include "amdgpu_dm_debugfs.h" 66 #endif 67 #include "amdgpu_dm_psr.h" 68 69 #include "ivsrcid/ivsrcid_vislands30.h" 70 71 #include <linux/backlight.h> 72 #include <linux/module.h> 73 #include <linux/moduleparam.h> 74 #include <linux/types.h> 75 #include <linux/pm_runtime.h> 76 #include <linux/pci.h> 77 #include <linux/firmware.h> 78 #include <linux/component.h> 79 #include <linux/dmi.h> 80 81 #include <drm/display/drm_dp_mst_helper.h> 82 #include <drm/display/drm_hdmi_helper.h> 83 #include <drm/drm_atomic.h> 84 #include <drm/drm_atomic_uapi.h> 85 #include <drm/drm_atomic_helper.h> 86 #include <drm/drm_blend.h> 87 #include <drm/drm_fourcc.h> 88 #include <drm/drm_edid.h> 89 #include <drm/drm_vblank.h> 90 #include <drm/drm_audio_component.h> 91 #include <drm/drm_gem_atomic_helper.h> 92 #include <drm/drm_plane_helper.h> 93 94 #include <acpi/video.h> 95 96 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" 97 98 #include "dcn/dcn_1_0_offset.h" 99 #include "dcn/dcn_1_0_sh_mask.h" 100 #include "soc15_hw_ip.h" 101 #include "soc15_common.h" 102 #include "vega10_ip_offset.h" 103 104 #include "gc/gc_11_0_0_offset.h" 105 #include "gc/gc_11_0_0_sh_mask.h" 106 107 #include "modules/inc/mod_freesync.h" 108 #include "modules/power/power_helpers.h" 109 110 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 111 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 112 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" 113 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); 114 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" 115 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); 116 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" 117 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); 118 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" 119 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); 120 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" 121 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); 122 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" 123 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); 124 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" 125 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); 126 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin" 127 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB); 128 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" 129 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); 130 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" 131 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); 132 133 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" 134 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); 135 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" 136 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); 137 138 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 139 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 140 141 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" 142 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); 143 144 /* Number of bytes in PSP header for firmware. */ 145 #define PSP_HEADER_BYTES 0x100 146 147 /* Number of bytes in PSP footer for firmware. */ 148 #define PSP_FOOTER_BYTES 0x100 149 150 /** 151 * DOC: overview 152 * 153 * The AMDgpu display manager, **amdgpu_dm** (or even simpler, 154 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM 155 * requests into DC requests, and DC responses into DRM responses. 156 * 157 * The root control structure is &struct amdgpu_display_manager. 158 */ 159 160 /* basic init/fini API */ 161 static int amdgpu_dm_init(struct amdgpu_device *adev); 162 static void amdgpu_dm_fini(struct amdgpu_device *adev); 163 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); 164 165 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) 166 { 167 switch (link->dpcd_caps.dongle_type) { 168 case DISPLAY_DONGLE_NONE: 169 return DRM_MODE_SUBCONNECTOR_Native; 170 case DISPLAY_DONGLE_DP_VGA_CONVERTER: 171 return DRM_MODE_SUBCONNECTOR_VGA; 172 case DISPLAY_DONGLE_DP_DVI_CONVERTER: 173 case DISPLAY_DONGLE_DP_DVI_DONGLE: 174 return DRM_MODE_SUBCONNECTOR_DVID; 175 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 176 case DISPLAY_DONGLE_DP_HDMI_DONGLE: 177 return DRM_MODE_SUBCONNECTOR_HDMIA; 178 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: 179 default: 180 return DRM_MODE_SUBCONNECTOR_Unknown; 181 } 182 } 183 184 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) 185 { 186 struct dc_link *link = aconnector->dc_link; 187 struct drm_connector *connector = &aconnector->base; 188 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 189 190 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 191 return; 192 193 if (aconnector->dc_sink) 194 subconnector = get_subconnector_type(link); 195 196 drm_object_property_set_value(&connector->base, 197 connector->dev->mode_config.dp_subconnector_property, 198 subconnector); 199 } 200 201 /* 202 * initializes drm_device display related structures, based on the information 203 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 204 * drm_encoder, drm_mode_config 205 * 206 * Returns 0 on success 207 */ 208 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 209 /* removes and deallocates the drm structures, created by the above function */ 210 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 211 212 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 213 struct amdgpu_dm_connector *amdgpu_dm_connector, 214 u32 link_index, 215 struct amdgpu_encoder *amdgpu_encoder); 216 static int amdgpu_dm_encoder_init(struct drm_device *dev, 217 struct amdgpu_encoder *aencoder, 218 uint32_t link_index); 219 220 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 221 222 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 223 224 static int amdgpu_dm_atomic_check(struct drm_device *dev, 225 struct drm_atomic_state *state); 226 227 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); 228 static void handle_hpd_rx_irq(void *param); 229 230 static bool 231 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 232 struct drm_crtc_state *new_crtc_state); 233 /* 234 * dm_vblank_get_counter 235 * 236 * @brief 237 * Get counter for number of vertical blanks 238 * 239 * @param 240 * struct amdgpu_device *adev - [in] desired amdgpu device 241 * int disp_idx - [in] which CRTC to get the counter from 242 * 243 * @return 244 * Counter for vertical blanks 245 */ 246 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 247 { 248 struct amdgpu_crtc *acrtc = NULL; 249 250 if (crtc >= adev->mode_info.num_crtc) 251 return 0; 252 253 acrtc = adev->mode_info.crtcs[crtc]; 254 255 if (!acrtc->dm_irq_params.stream) { 256 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 257 crtc); 258 return 0; 259 } 260 261 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); 262 } 263 264 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 265 u32 *vbl, u32 *position) 266 { 267 u32 v_blank_start, v_blank_end, h_position, v_position; 268 struct amdgpu_crtc *acrtc = NULL; 269 270 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 271 return -EINVAL; 272 273 acrtc = adev->mode_info.crtcs[crtc]; 274 275 if (!acrtc->dm_irq_params.stream) { 276 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 277 crtc); 278 return 0; 279 } 280 281 /* 282 * TODO rework base driver to use values directly. 283 * for now parse it back into reg-format 284 */ 285 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, 286 &v_blank_start, 287 &v_blank_end, 288 &h_position, 289 &v_position); 290 291 *position = v_position | (h_position << 16); 292 *vbl = v_blank_start | (v_blank_end << 16); 293 294 return 0; 295 } 296 297 static bool dm_is_idle(void *handle) 298 { 299 /* XXX todo */ 300 return true; 301 } 302 303 static int dm_wait_for_idle(void *handle) 304 { 305 /* XXX todo */ 306 return 0; 307 } 308 309 static bool dm_check_soft_reset(void *handle) 310 { 311 return false; 312 } 313 314 static int dm_soft_reset(void *handle) 315 { 316 /* XXX todo */ 317 return 0; 318 } 319 320 static struct amdgpu_crtc * 321 get_crtc_by_otg_inst(struct amdgpu_device *adev, 322 int otg_inst) 323 { 324 struct drm_device *dev = adev_to_drm(adev); 325 struct drm_crtc *crtc; 326 struct amdgpu_crtc *amdgpu_crtc; 327 328 if (WARN_ON(otg_inst == -1)) 329 return adev->mode_info.crtcs[0]; 330 331 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 332 amdgpu_crtc = to_amdgpu_crtc(crtc); 333 334 if (amdgpu_crtc->otg_inst == otg_inst) 335 return amdgpu_crtc; 336 } 337 338 return NULL; 339 } 340 341 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, 342 struct dm_crtc_state *new_state) 343 { 344 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) 345 return true; 346 else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) 347 return true; 348 else 349 return false; 350 } 351 352 static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update, 353 int planes_count) 354 { 355 int i, j; 356 357 for (i = 0, j = planes_count - 1; i < j; i++, j--) 358 swap(array_of_surface_update[i], array_of_surface_update[j]); 359 } 360 361 /** 362 * update_planes_and_stream_adapter() - Send planes to be updated in DC 363 * 364 * DC has a generic way to update planes and stream via 365 * dc_update_planes_and_stream function; however, DM might need some 366 * adjustments and preparation before calling it. This function is a wrapper 367 * for the dc_update_planes_and_stream that does any required configuration 368 * before passing control to DC. 369 * 370 * @dc: Display Core control structure 371 * @update_type: specify whether it is FULL/MEDIUM/FAST update 372 * @planes_count: planes count to update 373 * @stream: stream state 374 * @stream_update: stream update 375 * @array_of_surface_update: dc surface update pointer 376 * 377 */ 378 static inline bool update_planes_and_stream_adapter(struct dc *dc, 379 int update_type, 380 int planes_count, 381 struct dc_stream_state *stream, 382 struct dc_stream_update *stream_update, 383 struct dc_surface_update *array_of_surface_update) 384 { 385 reverse_planes_order(array_of_surface_update, planes_count); 386 387 /* 388 * Previous frame finished and HW is ready for optimization. 389 */ 390 if (update_type == UPDATE_TYPE_FAST) 391 dc_post_update_surfaces_to_stream(dc); 392 393 return dc_update_planes_and_stream(dc, 394 array_of_surface_update, 395 planes_count, 396 stream, 397 stream_update); 398 } 399 400 /** 401 * dm_pflip_high_irq() - Handle pageflip interrupt 402 * @interrupt_params: ignored 403 * 404 * Handles the pageflip interrupt by notifying all interested parties 405 * that the pageflip has been completed. 406 */ 407 static void dm_pflip_high_irq(void *interrupt_params) 408 { 409 struct amdgpu_crtc *amdgpu_crtc; 410 struct common_irq_params *irq_params = interrupt_params; 411 struct amdgpu_device *adev = irq_params->adev; 412 unsigned long flags; 413 struct drm_pending_vblank_event *e; 414 u32 vpos, hpos, v_blank_start, v_blank_end; 415 bool vrr_active; 416 417 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 418 419 /* IRQ could occur when in initial stage */ 420 /* TODO work and BO cleanup */ 421 if (amdgpu_crtc == NULL) { 422 DC_LOG_PFLIP("CRTC is null, returning.\n"); 423 return; 424 } 425 426 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 427 428 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { 429 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", 430 amdgpu_crtc->pflip_status, 431 AMDGPU_FLIP_SUBMITTED, 432 amdgpu_crtc->crtc_id, 433 amdgpu_crtc); 434 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 435 return; 436 } 437 438 /* page flip completed. */ 439 e = amdgpu_crtc->event; 440 amdgpu_crtc->event = NULL; 441 442 WARN_ON(!e); 443 444 vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc); 445 446 /* Fixed refresh rate, or VRR scanout position outside front-porch? */ 447 if (!vrr_active || 448 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, 449 &v_blank_end, &hpos, &vpos) || 450 (vpos < v_blank_start)) { 451 /* Update to correct count and vblank timestamp if racing with 452 * vblank irq. This also updates to the correct vblank timestamp 453 * even in VRR mode, as scanout is past the front-porch atm. 454 */ 455 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 456 457 /* Wake up userspace by sending the pageflip event with proper 458 * count and timestamp of vblank of flip completion. 459 */ 460 if (e) { 461 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); 462 463 /* Event sent, so done with vblank for this flip */ 464 drm_crtc_vblank_put(&amdgpu_crtc->base); 465 } 466 } else if (e) { 467 /* VRR active and inside front-porch: vblank count and 468 * timestamp for pageflip event will only be up to date after 469 * drm_crtc_handle_vblank() has been executed from late vblank 470 * irq handler after start of back-porch (vline 0). We queue the 471 * pageflip event for send-out by drm_crtc_handle_vblank() with 472 * updated timestamp and count, once it runs after us. 473 * 474 * We need to open-code this instead of using the helper 475 * drm_crtc_arm_vblank_event(), as that helper would 476 * call drm_crtc_accurate_vblank_count(), which we must 477 * not call in VRR mode while we are in front-porch! 478 */ 479 480 /* sequence will be replaced by real count during send-out. */ 481 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); 482 e->pipe = amdgpu_crtc->crtc_id; 483 484 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); 485 e = NULL; 486 } 487 488 /* Keep track of vblank of this flip for flip throttling. We use the 489 * cooked hw counter, as that one incremented at start of this vblank 490 * of pageflip completion, so last_flip_vblank is the forbidden count 491 * for queueing new pageflips if vsync + VRR is enabled. 492 */ 493 amdgpu_crtc->dm_irq_params.last_flip_vblank = 494 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); 495 496 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 497 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 498 499 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", 500 amdgpu_crtc->crtc_id, amdgpu_crtc, 501 vrr_active, (int) !e); 502 } 503 504 static void dm_vupdate_high_irq(void *interrupt_params) 505 { 506 struct common_irq_params *irq_params = interrupt_params; 507 struct amdgpu_device *adev = irq_params->adev; 508 struct amdgpu_crtc *acrtc; 509 struct drm_device *drm_dev; 510 struct drm_vblank_crtc *vblank; 511 ktime_t frame_duration_ns, previous_timestamp; 512 unsigned long flags; 513 int vrr_active; 514 515 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 516 517 if (acrtc) { 518 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 519 drm_dev = acrtc->base.dev; 520 vblank = &drm_dev->vblank[acrtc->base.index]; 521 previous_timestamp = atomic64_read(&irq_params->previous_timestamp); 522 frame_duration_ns = vblank->time - previous_timestamp; 523 524 if (frame_duration_ns > 0) { 525 trace_amdgpu_refresh_rate_track(acrtc->base.index, 526 frame_duration_ns, 527 ktime_divns(NSEC_PER_SEC, frame_duration_ns)); 528 atomic64_set(&irq_params->previous_timestamp, vblank->time); 529 } 530 531 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n", 532 acrtc->crtc_id, 533 vrr_active); 534 535 /* Core vblank handling is done here after end of front-porch in 536 * vrr mode, as vblank timestamping will give valid results 537 * while now done after front-porch. This will also deliver 538 * page-flip completion events that have been queued to us 539 * if a pageflip happened inside front-porch. 540 */ 541 if (vrr_active) { 542 amdgpu_dm_crtc_handle_vblank(acrtc); 543 544 /* BTR processing for pre-DCE12 ASICs */ 545 if (acrtc->dm_irq_params.stream && 546 adev->family < AMDGPU_FAMILY_AI) { 547 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 548 mod_freesync_handle_v_update( 549 adev->dm.freesync_module, 550 acrtc->dm_irq_params.stream, 551 &acrtc->dm_irq_params.vrr_params); 552 553 dc_stream_adjust_vmin_vmax( 554 adev->dm.dc, 555 acrtc->dm_irq_params.stream, 556 &acrtc->dm_irq_params.vrr_params.adjust); 557 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 558 } 559 } 560 } 561 } 562 563 /** 564 * dm_crtc_high_irq() - Handles CRTC interrupt 565 * @interrupt_params: used for determining the CRTC instance 566 * 567 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK 568 * event handler. 569 */ 570 static void dm_crtc_high_irq(void *interrupt_params) 571 { 572 struct common_irq_params *irq_params = interrupt_params; 573 struct amdgpu_device *adev = irq_params->adev; 574 struct amdgpu_crtc *acrtc; 575 unsigned long flags; 576 int vrr_active; 577 578 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 579 if (!acrtc) 580 return; 581 582 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 583 584 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, 585 vrr_active, acrtc->dm_irq_params.active_planes); 586 587 /** 588 * Core vblank handling at start of front-porch is only possible 589 * in non-vrr mode, as only there vblank timestamping will give 590 * valid results while done in front-porch. Otherwise defer it 591 * to dm_vupdate_high_irq after end of front-porch. 592 */ 593 if (!vrr_active) 594 amdgpu_dm_crtc_handle_vblank(acrtc); 595 596 /** 597 * Following stuff must happen at start of vblank, for crc 598 * computation and below-the-range btr support in vrr mode. 599 */ 600 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 601 602 /* BTR updates need to happen before VUPDATE on Vega and above. */ 603 if (adev->family < AMDGPU_FAMILY_AI) 604 return; 605 606 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 607 608 if (acrtc->dm_irq_params.stream && 609 acrtc->dm_irq_params.vrr_params.supported && 610 acrtc->dm_irq_params.freesync_config.state == 611 VRR_STATE_ACTIVE_VARIABLE) { 612 mod_freesync_handle_v_update(adev->dm.freesync_module, 613 acrtc->dm_irq_params.stream, 614 &acrtc->dm_irq_params.vrr_params); 615 616 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, 617 &acrtc->dm_irq_params.vrr_params.adjust); 618 } 619 620 /* 621 * If there aren't any active_planes then DCH HUBP may be clock-gated. 622 * In that case, pageflip completion interrupts won't fire and pageflip 623 * completion events won't get delivered. Prevent this by sending 624 * pending pageflip events from here if a flip is still pending. 625 * 626 * If any planes are enabled, use dm_pflip_high_irq() instead, to 627 * avoid race conditions between flip programming and completion, 628 * which could cause too early flip completion events. 629 */ 630 if (adev->family >= AMDGPU_FAMILY_RV && 631 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && 632 acrtc->dm_irq_params.active_planes == 0) { 633 if (acrtc->event) { 634 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); 635 acrtc->event = NULL; 636 drm_crtc_vblank_put(&acrtc->base); 637 } 638 acrtc->pflip_status = AMDGPU_FLIP_NONE; 639 } 640 641 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 642 } 643 644 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 645 /** 646 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for 647 * DCN generation ASICs 648 * @interrupt_params: interrupt parameters 649 * 650 * Used to set crc window/read out crc value at vertical line 0 position 651 */ 652 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) 653 { 654 struct common_irq_params *irq_params = interrupt_params; 655 struct amdgpu_device *adev = irq_params->adev; 656 struct amdgpu_crtc *acrtc; 657 658 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); 659 660 if (!acrtc) 661 return; 662 663 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); 664 } 665 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 666 667 /** 668 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. 669 * @adev: amdgpu_device pointer 670 * @notify: dmub notification structure 671 * 672 * Dmub AUX or SET_CONFIG command completion processing callback 673 * Copies dmub notification to DM which is to be read by AUX command. 674 * issuing thread and also signals the event to wake up the thread. 675 */ 676 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, 677 struct dmub_notification *notify) 678 { 679 if (adev->dm.dmub_notify) 680 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); 681 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) 682 complete(&adev->dm.dmub_aux_transfer_done); 683 } 684 685 /** 686 * dmub_hpd_callback - DMUB HPD interrupt processing callback. 687 * @adev: amdgpu_device pointer 688 * @notify: dmub notification structure 689 * 690 * Dmub Hpd interrupt processing callback. Gets displayindex through the 691 * ink index and calls helper to do the processing. 692 */ 693 static void dmub_hpd_callback(struct amdgpu_device *adev, 694 struct dmub_notification *notify) 695 { 696 struct amdgpu_dm_connector *aconnector; 697 struct amdgpu_dm_connector *hpd_aconnector = NULL; 698 struct drm_connector *connector; 699 struct drm_connector_list_iter iter; 700 struct dc_link *link; 701 u8 link_index = 0; 702 struct drm_device *dev; 703 704 if (adev == NULL) 705 return; 706 707 if (notify == NULL) { 708 DRM_ERROR("DMUB HPD callback notification was NULL"); 709 return; 710 } 711 712 if (notify->link_index > adev->dm.dc->link_count) { 713 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); 714 return; 715 } 716 717 link_index = notify->link_index; 718 link = adev->dm.dc->links[link_index]; 719 dev = adev->dm.ddev; 720 721 drm_connector_list_iter_begin(dev, &iter); 722 drm_for_each_connector_iter(connector, &iter) { 723 aconnector = to_amdgpu_dm_connector(connector); 724 if (link && aconnector->dc_link == link) { 725 if (notify->type == DMUB_NOTIFICATION_HPD) 726 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); 727 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) 728 DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index); 729 else 730 DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n", 731 notify->type, link_index); 732 733 hpd_aconnector = aconnector; 734 break; 735 } 736 } 737 drm_connector_list_iter_end(&iter); 738 739 if (hpd_aconnector) { 740 if (notify->type == DMUB_NOTIFICATION_HPD) 741 handle_hpd_irq_helper(hpd_aconnector); 742 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) 743 handle_hpd_rx_irq(hpd_aconnector); 744 } 745 } 746 747 /** 748 * register_dmub_notify_callback - Sets callback for DMUB notify 749 * @adev: amdgpu_device pointer 750 * @type: Type of dmub notification 751 * @callback: Dmub interrupt callback function 752 * @dmub_int_thread_offload: offload indicator 753 * 754 * API to register a dmub callback handler for a dmub notification 755 * Also sets indicator whether callback processing to be offloaded. 756 * to dmub interrupt handling thread 757 * Return: true if successfully registered, false if there is existing registration 758 */ 759 static bool register_dmub_notify_callback(struct amdgpu_device *adev, 760 enum dmub_notification_type type, 761 dmub_notify_interrupt_callback_t callback, 762 bool dmub_int_thread_offload) 763 { 764 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { 765 adev->dm.dmub_callback[type] = callback; 766 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; 767 } else 768 return false; 769 770 return true; 771 } 772 773 static void dm_handle_hpd_work(struct work_struct *work) 774 { 775 struct dmub_hpd_work *dmub_hpd_wrk; 776 777 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); 778 779 if (!dmub_hpd_wrk->dmub_notify) { 780 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); 781 return; 782 } 783 784 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { 785 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, 786 dmub_hpd_wrk->dmub_notify); 787 } 788 789 kfree(dmub_hpd_wrk->dmub_notify); 790 kfree(dmub_hpd_wrk); 791 792 } 793 794 #define DMUB_TRACE_MAX_READ 64 795 /** 796 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt 797 * @interrupt_params: used for determining the Outbox instance 798 * 799 * Handles the Outbox Interrupt 800 * event handler. 801 */ 802 static void dm_dmub_outbox1_low_irq(void *interrupt_params) 803 { 804 struct dmub_notification notify; 805 struct common_irq_params *irq_params = interrupt_params; 806 struct amdgpu_device *adev = irq_params->adev; 807 struct amdgpu_display_manager *dm = &adev->dm; 808 struct dmcub_trace_buf_entry entry = { 0 }; 809 u32 count = 0; 810 struct dmub_hpd_work *dmub_hpd_wrk; 811 struct dc_link *plink = NULL; 812 813 if (dc_enable_dmub_notifications(adev->dm.dc) && 814 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 815 816 do { 817 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); 818 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { 819 DRM_ERROR("DM: notify type %d invalid!", notify.type); 820 continue; 821 } 822 if (!dm->dmub_callback[notify.type]) { 823 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); 824 continue; 825 } 826 if (dm->dmub_thread_offload[notify.type] == true) { 827 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); 828 if (!dmub_hpd_wrk) { 829 DRM_ERROR("Failed to allocate dmub_hpd_wrk"); 830 return; 831 } 832 dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification), 833 GFP_ATOMIC); 834 if (!dmub_hpd_wrk->dmub_notify) { 835 kfree(dmub_hpd_wrk); 836 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); 837 return; 838 } 839 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); 840 dmub_hpd_wrk->adev = adev; 841 if (notify.type == DMUB_NOTIFICATION_HPD) { 842 plink = adev->dm.dc->links[notify.link_index]; 843 if (plink) { 844 plink->hpd_status = 845 notify.hpd_status == DP_HPD_PLUG; 846 } 847 } 848 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); 849 } else { 850 dm->dmub_callback[notify.type](adev, ¬ify); 851 } 852 } while (notify.pending_notification); 853 } 854 855 856 do { 857 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 858 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 859 entry.param0, entry.param1); 860 861 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 862 entry.trace_code, entry.tick_count, entry.param0, entry.param1); 863 } else 864 break; 865 866 count++; 867 868 } while (count <= DMUB_TRACE_MAX_READ); 869 870 if (count > DMUB_TRACE_MAX_READ) 871 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); 872 } 873 874 static int dm_set_clockgating_state(void *handle, 875 enum amd_clockgating_state state) 876 { 877 return 0; 878 } 879 880 static int dm_set_powergating_state(void *handle, 881 enum amd_powergating_state state) 882 { 883 return 0; 884 } 885 886 /* Prototypes of private functions */ 887 static int dm_early_init(void *handle); 888 889 /* Allocate memory for FBC compressed data */ 890 static void amdgpu_dm_fbc_init(struct drm_connector *connector) 891 { 892 struct drm_device *dev = connector->dev; 893 struct amdgpu_device *adev = drm_to_adev(dev); 894 struct dm_compressor_info *compressor = &adev->dm.compressor; 895 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); 896 struct drm_display_mode *mode; 897 unsigned long max_size = 0; 898 899 if (adev->dm.dc->fbc_compressor == NULL) 900 return; 901 902 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) 903 return; 904 905 if (compressor->bo_ptr) 906 return; 907 908 909 list_for_each_entry(mode, &connector->modes, head) { 910 if (max_size < mode->htotal * mode->vtotal) 911 max_size = mode->htotal * mode->vtotal; 912 } 913 914 if (max_size) { 915 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 916 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, 917 &compressor->gpu_addr, &compressor->cpu_addr); 918 919 if (r) 920 DRM_ERROR("DM: Failed to initialize FBC\n"); 921 else { 922 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 923 DRM_INFO("DM: FBC alloc %lu\n", max_size*4); 924 } 925 926 } 927 928 } 929 930 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, 931 int pipe, bool *enabled, 932 unsigned char *buf, int max_bytes) 933 { 934 struct drm_device *dev = dev_get_drvdata(kdev); 935 struct amdgpu_device *adev = drm_to_adev(dev); 936 struct drm_connector *connector; 937 struct drm_connector_list_iter conn_iter; 938 struct amdgpu_dm_connector *aconnector; 939 int ret = 0; 940 941 *enabled = false; 942 943 mutex_lock(&adev->dm.audio_lock); 944 945 drm_connector_list_iter_begin(dev, &conn_iter); 946 drm_for_each_connector_iter(connector, &conn_iter) { 947 aconnector = to_amdgpu_dm_connector(connector); 948 if (aconnector->audio_inst != port) 949 continue; 950 951 *enabled = true; 952 ret = drm_eld_size(connector->eld); 953 memcpy(buf, connector->eld, min(max_bytes, ret)); 954 955 break; 956 } 957 drm_connector_list_iter_end(&conn_iter); 958 959 mutex_unlock(&adev->dm.audio_lock); 960 961 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); 962 963 return ret; 964 } 965 966 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { 967 .get_eld = amdgpu_dm_audio_component_get_eld, 968 }; 969 970 static int amdgpu_dm_audio_component_bind(struct device *kdev, 971 struct device *hda_kdev, void *data) 972 { 973 struct drm_device *dev = dev_get_drvdata(kdev); 974 struct amdgpu_device *adev = drm_to_adev(dev); 975 struct drm_audio_component *acomp = data; 976 977 acomp->ops = &amdgpu_dm_audio_component_ops; 978 acomp->dev = kdev; 979 adev->dm.audio_component = acomp; 980 981 return 0; 982 } 983 984 static void amdgpu_dm_audio_component_unbind(struct device *kdev, 985 struct device *hda_kdev, void *data) 986 { 987 struct drm_device *dev = dev_get_drvdata(kdev); 988 struct amdgpu_device *adev = drm_to_adev(dev); 989 struct drm_audio_component *acomp = data; 990 991 acomp->ops = NULL; 992 acomp->dev = NULL; 993 adev->dm.audio_component = NULL; 994 } 995 996 static const struct component_ops amdgpu_dm_audio_component_bind_ops = { 997 .bind = amdgpu_dm_audio_component_bind, 998 .unbind = amdgpu_dm_audio_component_unbind, 999 }; 1000 1001 static int amdgpu_dm_audio_init(struct amdgpu_device *adev) 1002 { 1003 int i, ret; 1004 1005 if (!amdgpu_audio) 1006 return 0; 1007 1008 adev->mode_info.audio.enabled = true; 1009 1010 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; 1011 1012 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1013 adev->mode_info.audio.pin[i].channels = -1; 1014 adev->mode_info.audio.pin[i].rate = -1; 1015 adev->mode_info.audio.pin[i].bits_per_sample = -1; 1016 adev->mode_info.audio.pin[i].status_bits = 0; 1017 adev->mode_info.audio.pin[i].category_code = 0; 1018 adev->mode_info.audio.pin[i].connected = false; 1019 adev->mode_info.audio.pin[i].id = 1020 adev->dm.dc->res_pool->audios[i]->inst; 1021 adev->mode_info.audio.pin[i].offset = 0; 1022 } 1023 1024 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1025 if (ret < 0) 1026 return ret; 1027 1028 adev->dm.audio_registered = true; 1029 1030 return 0; 1031 } 1032 1033 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) 1034 { 1035 if (!amdgpu_audio) 1036 return; 1037 1038 if (!adev->mode_info.audio.enabled) 1039 return; 1040 1041 if (adev->dm.audio_registered) { 1042 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1043 adev->dm.audio_registered = false; 1044 } 1045 1046 /* TODO: Disable audio? */ 1047 1048 adev->mode_info.audio.enabled = false; 1049 } 1050 1051 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) 1052 { 1053 struct drm_audio_component *acomp = adev->dm.audio_component; 1054 1055 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { 1056 DRM_DEBUG_KMS("Notify ELD: %d\n", pin); 1057 1058 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 1059 pin, -1); 1060 } 1061 } 1062 1063 static int dm_dmub_hw_init(struct amdgpu_device *adev) 1064 { 1065 const struct dmcub_firmware_header_v1_0 *hdr; 1066 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1067 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 1068 const struct firmware *dmub_fw = adev->dm.dmub_fw; 1069 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 1070 struct abm *abm = adev->dm.dc->res_pool->abm; 1071 struct dmub_srv_hw_params hw_params; 1072 enum dmub_status status; 1073 const unsigned char *fw_inst_const, *fw_bss_data; 1074 u32 i, fw_inst_const_size, fw_bss_data_size; 1075 bool has_hw_support; 1076 1077 if (!dmub_srv) 1078 /* DMUB isn't supported on the ASIC. */ 1079 return 0; 1080 1081 if (!fb_info) { 1082 DRM_ERROR("No framebuffer info for DMUB service.\n"); 1083 return -EINVAL; 1084 } 1085 1086 if (!dmub_fw) { 1087 /* Firmware required for DMUB support. */ 1088 DRM_ERROR("No firmware provided for DMUB.\n"); 1089 return -EINVAL; 1090 } 1091 1092 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); 1093 if (status != DMUB_STATUS_OK) { 1094 DRM_ERROR("Error checking HW support for DMUB: %d\n", status); 1095 return -EINVAL; 1096 } 1097 1098 if (!has_hw_support) { 1099 DRM_INFO("DMUB unsupported on ASIC\n"); 1100 return 0; 1101 } 1102 1103 /* Reset DMCUB if it was previously running - before we overwrite its memory. */ 1104 status = dmub_srv_hw_reset(dmub_srv); 1105 if (status != DMUB_STATUS_OK) 1106 DRM_WARN("Error resetting DMUB HW: %d\n", status); 1107 1108 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 1109 1110 fw_inst_const = dmub_fw->data + 1111 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1112 PSP_HEADER_BYTES; 1113 1114 fw_bss_data = dmub_fw->data + 1115 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1116 le32_to_cpu(hdr->inst_const_bytes); 1117 1118 /* Copy firmware and bios info into FB memory. */ 1119 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1120 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1121 1122 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1123 1124 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, 1125 * amdgpu_ucode_init_single_fw will load dmub firmware 1126 * fw_inst_const part to cw0; otherwise, the firmware back door load 1127 * will be done by dm_dmub_hw_init 1128 */ 1129 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1130 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 1131 fw_inst_const_size); 1132 } 1133 1134 if (fw_bss_data_size) 1135 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, 1136 fw_bss_data, fw_bss_data_size); 1137 1138 /* Copy firmware bios info into FB memory. */ 1139 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, 1140 adev->bios_size); 1141 1142 /* Reset regions that need to be reset. */ 1143 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, 1144 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); 1145 1146 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, 1147 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); 1148 1149 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, 1150 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); 1151 1152 /* Initialize hardware. */ 1153 memset(&hw_params, 0, sizeof(hw_params)); 1154 hw_params.fb_base = adev->gmc.fb_start; 1155 hw_params.fb_offset = adev->vm_manager.vram_base_offset; 1156 1157 /* backdoor load firmware and trigger dmub running */ 1158 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1159 hw_params.load_inst_const = true; 1160 1161 if (dmcu) 1162 hw_params.psp_version = dmcu->psp_version; 1163 1164 for (i = 0; i < fb_info->num_fb; ++i) 1165 hw_params.fb[i] = &fb_info->fb[i]; 1166 1167 switch (adev->ip_versions[DCE_HWIP][0]) { 1168 case IP_VERSION(3, 1, 3): 1169 case IP_VERSION(3, 1, 4): 1170 hw_params.dpia_supported = true; 1171 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; 1172 break; 1173 default: 1174 break; 1175 } 1176 1177 status = dmub_srv_hw_init(dmub_srv, &hw_params); 1178 if (status != DMUB_STATUS_OK) { 1179 DRM_ERROR("Error initializing DMUB HW: %d\n", status); 1180 return -EINVAL; 1181 } 1182 1183 /* Wait for firmware load to finish. */ 1184 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1185 if (status != DMUB_STATUS_OK) 1186 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1187 1188 /* Init DMCU and ABM if available. */ 1189 if (dmcu && abm) { 1190 dmcu->funcs->dmcu_init(dmcu); 1191 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 1192 } 1193 1194 if (!adev->dm.dc->ctx->dmub_srv) 1195 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); 1196 if (!adev->dm.dc->ctx->dmub_srv) { 1197 DRM_ERROR("Couldn't allocate DC DMUB server!\n"); 1198 return -ENOMEM; 1199 } 1200 1201 DRM_INFO("DMUB hardware initialized: version=0x%08X\n", 1202 adev->dm.dmcub_fw_version); 1203 1204 return 0; 1205 } 1206 1207 static void dm_dmub_hw_resume(struct amdgpu_device *adev) 1208 { 1209 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1210 enum dmub_status status; 1211 bool init; 1212 1213 if (!dmub_srv) { 1214 /* DMUB isn't supported on the ASIC. */ 1215 return; 1216 } 1217 1218 status = dmub_srv_is_hw_init(dmub_srv, &init); 1219 if (status != DMUB_STATUS_OK) 1220 DRM_WARN("DMUB hardware init check failed: %d\n", status); 1221 1222 if (status == DMUB_STATUS_OK && init) { 1223 /* Wait for firmware load to finish. */ 1224 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1225 if (status != DMUB_STATUS_OK) 1226 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1227 } else { 1228 /* Perform the full hardware initialization. */ 1229 dm_dmub_hw_init(adev); 1230 } 1231 } 1232 1233 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 1234 { 1235 u64 pt_base; 1236 u32 logical_addr_low; 1237 u32 logical_addr_high; 1238 u32 agp_base, agp_bot, agp_top; 1239 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1240 1241 memset(pa_config, 0, sizeof(*pa_config)); 1242 1243 agp_base = 0; 1244 agp_bot = adev->gmc.agp_start >> 24; 1245 agp_top = adev->gmc.agp_end >> 24; 1246 1247 /* AGP aperture is disabled */ 1248 if (agp_bot == agp_top) { 1249 logical_addr_low = adev->gmc.fb_start >> 18; 1250 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1251 AMD_APU_IS_RENOIR | 1252 AMD_APU_IS_GREEN_SARDINE)) 1253 /* 1254 * Raven2 has a HW issue that it is unable to use the vram which 1255 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1256 * workaround that increase system aperture high address (add 1) 1257 * to get rid of the VM fault and hardware hang. 1258 */ 1259 logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; 1260 else 1261 logical_addr_high = adev->gmc.fb_end >> 18; 1262 } else { 1263 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; 1264 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 | 1265 AMD_APU_IS_RENOIR | 1266 AMD_APU_IS_GREEN_SARDINE)) 1267 /* 1268 * Raven2 has a HW issue that it is unable to use the vram which 1269 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1270 * workaround that increase system aperture high address (add 1) 1271 * to get rid of the VM fault and hardware hang. 1272 */ 1273 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); 1274 else 1275 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; 1276 } 1277 1278 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 1279 1280 page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >> 1281 AMDGPU_GPU_PAGE_SHIFT); 1282 page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >> 1283 AMDGPU_GPU_PAGE_SHIFT); 1284 page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >> 1285 AMDGPU_GPU_PAGE_SHIFT); 1286 page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >> 1287 AMDGPU_GPU_PAGE_SHIFT); 1288 page_table_base.high_part = upper_32_bits(pt_base); 1289 page_table_base.low_part = lower_32_bits(pt_base); 1290 1291 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; 1292 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; 1293 1294 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24; 1295 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; 1296 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; 1297 1298 pa_config->system_aperture.fb_base = adev->gmc.fb_start; 1299 pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset; 1300 pa_config->system_aperture.fb_top = adev->gmc.fb_end; 1301 1302 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; 1303 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; 1304 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; 1305 1306 pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support; 1307 1308 } 1309 1310 static void force_connector_state( 1311 struct amdgpu_dm_connector *aconnector, 1312 enum drm_connector_force force_state) 1313 { 1314 struct drm_connector *connector = &aconnector->base; 1315 1316 mutex_lock(&connector->dev->mode_config.mutex); 1317 aconnector->base.force = force_state; 1318 mutex_unlock(&connector->dev->mode_config.mutex); 1319 1320 mutex_lock(&aconnector->hpd_lock); 1321 drm_kms_helper_connector_hotplug_event(connector); 1322 mutex_unlock(&aconnector->hpd_lock); 1323 } 1324 1325 static void dm_handle_hpd_rx_offload_work(struct work_struct *work) 1326 { 1327 struct hpd_rx_irq_offload_work *offload_work; 1328 struct amdgpu_dm_connector *aconnector; 1329 struct dc_link *dc_link; 1330 struct amdgpu_device *adev; 1331 enum dc_connection_type new_connection_type = dc_connection_none; 1332 unsigned long flags; 1333 union test_response test_response; 1334 1335 memset(&test_response, 0, sizeof(test_response)); 1336 1337 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); 1338 aconnector = offload_work->offload_wq->aconnector; 1339 1340 if (!aconnector) { 1341 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); 1342 goto skip; 1343 } 1344 1345 adev = drm_to_adev(aconnector->base.dev); 1346 dc_link = aconnector->dc_link; 1347 1348 mutex_lock(&aconnector->hpd_lock); 1349 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 1350 DRM_ERROR("KMS: Failed to detect connector\n"); 1351 mutex_unlock(&aconnector->hpd_lock); 1352 1353 if (new_connection_type == dc_connection_none) 1354 goto skip; 1355 1356 if (amdgpu_in_reset(adev)) 1357 goto skip; 1358 1359 if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 1360 offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 1361 dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); 1362 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1363 offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; 1364 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1365 goto skip; 1366 } 1367 1368 mutex_lock(&adev->dm.dc_lock); 1369 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 1370 dc_link_dp_handle_automated_test(dc_link); 1371 1372 if (aconnector->timing_changed) { 1373 /* force connector disconnect and reconnect */ 1374 force_connector_state(aconnector, DRM_FORCE_OFF); 1375 msleep(100); 1376 force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); 1377 } 1378 1379 test_response.bits.ACK = 1; 1380 1381 core_link_write_dpcd( 1382 dc_link, 1383 DP_TEST_RESPONSE, 1384 &test_response.raw, 1385 sizeof(test_response)); 1386 } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && 1387 dc_link_check_link_loss_status(dc_link, &offload_work->data) && 1388 dc_link_dp_allow_hpd_rx_irq(dc_link)) { 1389 /* offload_work->data is from handle_hpd_rx_irq-> 1390 * schedule_hpd_rx_offload_work.this is defer handle 1391 * for hpd short pulse. upon here, link status may be 1392 * changed, need get latest link status from dpcd 1393 * registers. if link status is good, skip run link 1394 * training again. 1395 */ 1396 union hpd_irq_data irq_data; 1397 1398 memset(&irq_data, 0, sizeof(irq_data)); 1399 1400 /* before dc_link_dp_handle_link_loss, allow new link lost handle 1401 * request be added to work queue if link lost at end of dc_link_ 1402 * dp_handle_link_loss 1403 */ 1404 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1405 offload_work->offload_wq->is_handling_link_loss = false; 1406 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1407 1408 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && 1409 dc_link_check_link_loss_status(dc_link, &irq_data)) 1410 dc_link_dp_handle_link_loss(dc_link); 1411 } 1412 mutex_unlock(&adev->dm.dc_lock); 1413 1414 skip: 1415 kfree(offload_work); 1416 1417 } 1418 1419 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) 1420 { 1421 int max_caps = dc->caps.max_links; 1422 int i = 0; 1423 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; 1424 1425 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); 1426 1427 if (!hpd_rx_offload_wq) 1428 return NULL; 1429 1430 1431 for (i = 0; i < max_caps; i++) { 1432 hpd_rx_offload_wq[i].wq = 1433 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); 1434 1435 if (hpd_rx_offload_wq[i].wq == NULL) { 1436 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); 1437 goto out_err; 1438 } 1439 1440 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); 1441 } 1442 1443 return hpd_rx_offload_wq; 1444 1445 out_err: 1446 for (i = 0; i < max_caps; i++) { 1447 if (hpd_rx_offload_wq[i].wq) 1448 destroy_workqueue(hpd_rx_offload_wq[i].wq); 1449 } 1450 kfree(hpd_rx_offload_wq); 1451 return NULL; 1452 } 1453 1454 struct amdgpu_stutter_quirk { 1455 u16 chip_vendor; 1456 u16 chip_device; 1457 u16 subsys_vendor; 1458 u16 subsys_device; 1459 u8 revision; 1460 }; 1461 1462 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { 1463 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ 1464 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, 1465 { 0, 0, 0, 0, 0 }, 1466 }; 1467 1468 static bool dm_should_disable_stutter(struct pci_dev *pdev) 1469 { 1470 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; 1471 1472 while (p && p->chip_device != 0) { 1473 if (pdev->vendor == p->chip_vendor && 1474 pdev->device == p->chip_device && 1475 pdev->subsystem_vendor == p->subsys_vendor && 1476 pdev->subsystem_device == p->subsys_device && 1477 pdev->revision == p->revision) { 1478 return true; 1479 } 1480 ++p; 1481 } 1482 return false; 1483 } 1484 1485 static const struct dmi_system_id hpd_disconnect_quirk_table[] = { 1486 { 1487 .matches = { 1488 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1489 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), 1490 }, 1491 }, 1492 { 1493 .matches = { 1494 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1495 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), 1496 }, 1497 }, 1498 { 1499 .matches = { 1500 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1501 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), 1502 }, 1503 }, 1504 { 1505 .matches = { 1506 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1507 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), 1508 }, 1509 }, 1510 { 1511 .matches = { 1512 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1513 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), 1514 }, 1515 }, 1516 { 1517 .matches = { 1518 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1519 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), 1520 }, 1521 }, 1522 { 1523 .matches = { 1524 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1525 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), 1526 }, 1527 }, 1528 { 1529 .matches = { 1530 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1531 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), 1532 }, 1533 }, 1534 { 1535 .matches = { 1536 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 1537 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), 1538 }, 1539 }, 1540 {} 1541 /* TODO: refactor this from a fixed table to a dynamic option */ 1542 }; 1543 1544 static void retrieve_dmi_info(struct amdgpu_display_manager *dm) 1545 { 1546 const struct dmi_system_id *dmi_id; 1547 1548 dm->aux_hpd_discon_quirk = false; 1549 1550 dmi_id = dmi_first_match(hpd_disconnect_quirk_table); 1551 if (dmi_id) { 1552 dm->aux_hpd_discon_quirk = true; 1553 DRM_INFO("aux_hpd_discon_quirk attached\n"); 1554 } 1555 } 1556 1557 static int amdgpu_dm_init(struct amdgpu_device *adev) 1558 { 1559 struct dc_init_data init_data; 1560 struct dc_callback_init init_params; 1561 int r; 1562 1563 adev->dm.ddev = adev_to_drm(adev); 1564 adev->dm.adev = adev; 1565 1566 /* Zero all the fields */ 1567 memset(&init_data, 0, sizeof(init_data)); 1568 memset(&init_params, 0, sizeof(init_params)); 1569 1570 mutex_init(&adev->dm.dpia_aux_lock); 1571 mutex_init(&adev->dm.dc_lock); 1572 mutex_init(&adev->dm.audio_lock); 1573 1574 if (amdgpu_dm_irq_init(adev)) { 1575 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); 1576 goto error; 1577 } 1578 1579 init_data.asic_id.chip_family = adev->family; 1580 1581 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1582 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 1583 init_data.asic_id.chip_id = adev->pdev->device; 1584 1585 init_data.asic_id.vram_width = adev->gmc.vram_width; 1586 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 1587 init_data.asic_id.atombios_base_address = 1588 adev->mode_info.atom_context->bios; 1589 1590 init_data.driver = adev; 1591 1592 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 1593 1594 if (!adev->dm.cgs_device) { 1595 DRM_ERROR("amdgpu: failed to create cgs device.\n"); 1596 goto error; 1597 } 1598 1599 init_data.cgs_device = adev->dm.cgs_device; 1600 1601 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 1602 1603 switch (adev->ip_versions[DCE_HWIP][0]) { 1604 case IP_VERSION(2, 1, 0): 1605 switch (adev->dm.dmcub_fw_version) { 1606 case 0: /* development */ 1607 case 0x1: /* linux-firmware.git hash 6d9f399 */ 1608 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ 1609 init_data.flags.disable_dmcu = false; 1610 break; 1611 default: 1612 init_data.flags.disable_dmcu = true; 1613 } 1614 break; 1615 case IP_VERSION(2, 0, 3): 1616 init_data.flags.disable_dmcu = true; 1617 break; 1618 default: 1619 break; 1620 } 1621 1622 switch (adev->asic_type) { 1623 case CHIP_CARRIZO: 1624 case CHIP_STONEY: 1625 init_data.flags.gpu_vm_support = true; 1626 break; 1627 default: 1628 switch (adev->ip_versions[DCE_HWIP][0]) { 1629 case IP_VERSION(1, 0, 0): 1630 case IP_VERSION(1, 0, 1): 1631 /* enable S/G on PCO and RV2 */ 1632 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || 1633 (adev->apu_flags & AMD_APU_IS_PICASSO)) 1634 init_data.flags.gpu_vm_support = true; 1635 break; 1636 case IP_VERSION(2, 1, 0): 1637 case IP_VERSION(3, 0, 1): 1638 case IP_VERSION(3, 1, 2): 1639 case IP_VERSION(3, 1, 3): 1640 case IP_VERSION(3, 1, 4): 1641 case IP_VERSION(3, 1, 5): 1642 case IP_VERSION(3, 1, 6): 1643 init_data.flags.gpu_vm_support = true; 1644 break; 1645 default: 1646 break; 1647 } 1648 break; 1649 } 1650 if (init_data.flags.gpu_vm_support && 1651 (amdgpu_sg_display == 0)) 1652 init_data.flags.gpu_vm_support = false; 1653 1654 if (init_data.flags.gpu_vm_support) 1655 adev->mode_info.gpu_vm_support = true; 1656 1657 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 1658 init_data.flags.fbc_support = true; 1659 1660 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) 1661 init_data.flags.multi_mon_pp_mclk_switch = true; 1662 1663 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) 1664 init_data.flags.disable_fractional_pwm = true; 1665 1666 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) 1667 init_data.flags.edp_no_power_sequencing = true; 1668 1669 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) 1670 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; 1671 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) 1672 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; 1673 1674 init_data.flags.seamless_boot_edp_requested = false; 1675 1676 if (check_seamless_boot_capability(adev)) { 1677 init_data.flags.seamless_boot_edp_requested = true; 1678 init_data.flags.allow_seamless_boot_optimization = true; 1679 DRM_INFO("Seamless boot condition check passed\n"); 1680 } 1681 1682 init_data.flags.enable_mipi_converter_optimization = true; 1683 1684 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; 1685 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; 1686 1687 INIT_LIST_HEAD(&adev->dm.da_list); 1688 1689 retrieve_dmi_info(&adev->dm); 1690 1691 /* Display Core create. */ 1692 adev->dm.dc = dc_create(&init_data); 1693 1694 if (adev->dm.dc) { 1695 DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, 1696 dce_version_to_string(adev->dm.dc->ctx->dce_version)); 1697 } else { 1698 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); 1699 goto error; 1700 } 1701 1702 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { 1703 adev->dm.dc->debug.force_single_disp_pipe_split = false; 1704 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 1705 } 1706 1707 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 1708 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 1709 if (dm_should_disable_stutter(adev->pdev)) 1710 adev->dm.dc->debug.disable_stutter = true; 1711 1712 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) 1713 adev->dm.dc->debug.disable_stutter = true; 1714 1715 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 1716 adev->dm.dc->debug.disable_dsc = true; 1717 1718 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) 1719 adev->dm.dc->debug.disable_clock_gate = true; 1720 1721 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) 1722 adev->dm.dc->debug.force_subvp_mclk_switch = true; 1723 1724 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; 1725 1726 /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ 1727 adev->dm.dc->debug.ignore_cable_id = true; 1728 1729 /* TODO: There is a new drm mst change where the freedom of 1730 * vc_next_start_slot update is revoked/moved into drm, instead of in 1731 * driver. This forces us to make sure to get vc_next_start_slot updated 1732 * in drm function each time without considering if mst_state is active 1733 * or not. Otherwise, next time hotplug will give wrong start_slot 1734 * number. We are implementing a temporary solution to even notify drm 1735 * mst deallocation when link is no longer of MST type when uncommitting 1736 * the stream so we will have more time to work on a proper solution. 1737 * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we 1738 * should notify drm to do a complete "reset" of its states and stop 1739 * calling further drm mst functions when link is no longer of an MST 1740 * type. This could happen when we unplug an MST hubs/displays. When 1741 * uncommit stream comes later after unplug, we should just reset 1742 * hardware states only. 1743 */ 1744 adev->dm.dc->debug.temp_mst_deallocation_sequence = true; 1745 1746 if (adev->dm.dc->caps.dp_hdmi21_pcon_support) 1747 DRM_INFO("DP-HDMI FRL PCON supported\n"); 1748 1749 r = dm_dmub_hw_init(adev); 1750 if (r) { 1751 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 1752 goto error; 1753 } 1754 1755 dc_hardware_init(adev->dm.dc); 1756 1757 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); 1758 if (!adev->dm.hpd_rx_offload_wq) { 1759 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); 1760 goto error; 1761 } 1762 1763 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { 1764 struct dc_phy_addr_space_config pa_config; 1765 1766 mmhub_read_system_context(adev, &pa_config); 1767 1768 // Call the DC init_memory func 1769 dc_setup_system_context(adev->dm.dc, &pa_config); 1770 } 1771 1772 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 1773 if (!adev->dm.freesync_module) { 1774 DRM_ERROR( 1775 "amdgpu: failed to initialize freesync_module.\n"); 1776 } else 1777 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", 1778 adev->dm.freesync_module); 1779 1780 amdgpu_dm_init_color_mod(); 1781 1782 if (adev->dm.dc->caps.max_links > 0) { 1783 adev->dm.vblank_control_workqueue = 1784 create_singlethread_workqueue("dm_vblank_control_workqueue"); 1785 if (!adev->dm.vblank_control_workqueue) 1786 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); 1787 } 1788 1789 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { 1790 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); 1791 1792 if (!adev->dm.hdcp_workqueue) 1793 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); 1794 else 1795 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); 1796 1797 dc_init_callbacks(adev->dm.dc, &init_params); 1798 } 1799 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 1800 init_completion(&adev->dm.dmub_aux_transfer_done); 1801 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); 1802 if (!adev->dm.dmub_notify) { 1803 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); 1804 goto error; 1805 } 1806 1807 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); 1808 if (!adev->dm.delayed_hpd_wq) { 1809 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); 1810 goto error; 1811 } 1812 1813 amdgpu_dm_outbox_init(adev); 1814 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, 1815 dmub_aux_setconfig_callback, false)) { 1816 DRM_ERROR("amdgpu: fail to register dmub aux callback"); 1817 goto error; 1818 } 1819 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { 1820 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 1821 goto error; 1822 } 1823 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { 1824 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 1825 goto error; 1826 } 1827 } 1828 1829 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. 1830 * It is expected that DMUB will resend any pending notifications at this point, for 1831 * example HPD from DPIA. 1832 */ 1833 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 1834 dc_enable_dmub_outbox(adev->dm.dc); 1835 1836 /* DPIA trace goes to dmesg logs only if outbox is enabled */ 1837 if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE) 1838 dc_dmub_srv_enable_dpia_trace(adev->dm.dc); 1839 } 1840 1841 if (amdgpu_dm_initialize_drm_device(adev)) { 1842 DRM_ERROR( 1843 "amdgpu: failed to initialize sw for display support.\n"); 1844 goto error; 1845 } 1846 1847 /* create fake encoders for MST */ 1848 dm_dp_create_fake_mst_encoders(adev); 1849 1850 /* TODO: Add_display_info? */ 1851 1852 /* TODO use dynamic cursor width */ 1853 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 1854 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 1855 1856 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { 1857 DRM_ERROR( 1858 "amdgpu: failed to initialize sw for display support.\n"); 1859 goto error; 1860 } 1861 1862 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1863 adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); 1864 if (!adev->dm.secure_display_ctxs) 1865 DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n"); 1866 #endif 1867 1868 DRM_DEBUG_DRIVER("KMS initialized.\n"); 1869 1870 return 0; 1871 error: 1872 amdgpu_dm_fini(adev); 1873 1874 return -EINVAL; 1875 } 1876 1877 static int amdgpu_dm_early_fini(void *handle) 1878 { 1879 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1880 1881 amdgpu_dm_audio_fini(adev); 1882 1883 return 0; 1884 } 1885 1886 static void amdgpu_dm_fini(struct amdgpu_device *adev) 1887 { 1888 int i; 1889 1890 if (adev->dm.vblank_control_workqueue) { 1891 destroy_workqueue(adev->dm.vblank_control_workqueue); 1892 adev->dm.vblank_control_workqueue = NULL; 1893 } 1894 1895 amdgpu_dm_destroy_drm_device(&adev->dm); 1896 1897 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1898 if (adev->dm.secure_display_ctxs) { 1899 for (i = 0; i < adev->mode_info.num_crtc; i++) { 1900 if (adev->dm.secure_display_ctxs[i].crtc) { 1901 flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); 1902 flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); 1903 } 1904 } 1905 kfree(adev->dm.secure_display_ctxs); 1906 adev->dm.secure_display_ctxs = NULL; 1907 } 1908 #endif 1909 if (adev->dm.hdcp_workqueue) { 1910 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); 1911 adev->dm.hdcp_workqueue = NULL; 1912 } 1913 1914 if (adev->dm.dc) 1915 dc_deinit_callbacks(adev->dm.dc); 1916 1917 if (adev->dm.dc) 1918 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 1919 1920 if (dc_enable_dmub_notifications(adev->dm.dc)) { 1921 kfree(adev->dm.dmub_notify); 1922 adev->dm.dmub_notify = NULL; 1923 destroy_workqueue(adev->dm.delayed_hpd_wq); 1924 adev->dm.delayed_hpd_wq = NULL; 1925 } 1926 1927 if (adev->dm.dmub_bo) 1928 amdgpu_bo_free_kernel(&adev->dm.dmub_bo, 1929 &adev->dm.dmub_bo_gpu_addr, 1930 &adev->dm.dmub_bo_cpu_addr); 1931 1932 if (adev->dm.hpd_rx_offload_wq) { 1933 for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 1934 if (adev->dm.hpd_rx_offload_wq[i].wq) { 1935 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); 1936 adev->dm.hpd_rx_offload_wq[i].wq = NULL; 1937 } 1938 } 1939 1940 kfree(adev->dm.hpd_rx_offload_wq); 1941 adev->dm.hpd_rx_offload_wq = NULL; 1942 } 1943 1944 /* DC Destroy TODO: Replace destroy DAL */ 1945 if (adev->dm.dc) 1946 dc_destroy(&adev->dm.dc); 1947 /* 1948 * TODO: pageflip, vlank interrupt 1949 * 1950 * amdgpu_dm_irq_fini(adev); 1951 */ 1952 1953 if (adev->dm.cgs_device) { 1954 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 1955 adev->dm.cgs_device = NULL; 1956 } 1957 if (adev->dm.freesync_module) { 1958 mod_freesync_destroy(adev->dm.freesync_module); 1959 adev->dm.freesync_module = NULL; 1960 } 1961 1962 mutex_destroy(&adev->dm.audio_lock); 1963 mutex_destroy(&adev->dm.dc_lock); 1964 mutex_destroy(&adev->dm.dpia_aux_lock); 1965 } 1966 1967 static int load_dmcu_fw(struct amdgpu_device *adev) 1968 { 1969 const char *fw_name_dmcu = NULL; 1970 int r; 1971 const struct dmcu_firmware_header_v1_0 *hdr; 1972 1973 switch (adev->asic_type) { 1974 #if defined(CONFIG_DRM_AMD_DC_SI) 1975 case CHIP_TAHITI: 1976 case CHIP_PITCAIRN: 1977 case CHIP_VERDE: 1978 case CHIP_OLAND: 1979 #endif 1980 case CHIP_BONAIRE: 1981 case CHIP_HAWAII: 1982 case CHIP_KAVERI: 1983 case CHIP_KABINI: 1984 case CHIP_MULLINS: 1985 case CHIP_TONGA: 1986 case CHIP_FIJI: 1987 case CHIP_CARRIZO: 1988 case CHIP_STONEY: 1989 case CHIP_POLARIS11: 1990 case CHIP_POLARIS10: 1991 case CHIP_POLARIS12: 1992 case CHIP_VEGAM: 1993 case CHIP_VEGA10: 1994 case CHIP_VEGA12: 1995 case CHIP_VEGA20: 1996 return 0; 1997 case CHIP_NAVI12: 1998 fw_name_dmcu = FIRMWARE_NAVI12_DMCU; 1999 break; 2000 case CHIP_RAVEN: 2001 if (ASICREV_IS_PICASSO(adev->external_rev_id)) 2002 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 2003 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) 2004 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 2005 else 2006 return 0; 2007 break; 2008 default: 2009 switch (adev->ip_versions[DCE_HWIP][0]) { 2010 case IP_VERSION(2, 0, 2): 2011 case IP_VERSION(2, 0, 3): 2012 case IP_VERSION(2, 0, 0): 2013 case IP_VERSION(2, 1, 0): 2014 case IP_VERSION(3, 0, 0): 2015 case IP_VERSION(3, 0, 2): 2016 case IP_VERSION(3, 0, 3): 2017 case IP_VERSION(3, 0, 1): 2018 case IP_VERSION(3, 1, 2): 2019 case IP_VERSION(3, 1, 3): 2020 case IP_VERSION(3, 1, 4): 2021 case IP_VERSION(3, 1, 5): 2022 case IP_VERSION(3, 1, 6): 2023 case IP_VERSION(3, 2, 0): 2024 case IP_VERSION(3, 2, 1): 2025 return 0; 2026 default: 2027 break; 2028 } 2029 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 2030 return -EINVAL; 2031 } 2032 2033 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2034 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); 2035 return 0; 2036 } 2037 2038 r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu); 2039 if (r == -ENODEV) { 2040 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ 2041 DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); 2042 adev->dm.fw_dmcu = NULL; 2043 return 0; 2044 } 2045 if (r) { 2046 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", 2047 fw_name_dmcu); 2048 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2049 return r; 2050 } 2051 2052 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; 2053 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; 2054 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; 2055 adev->firmware.fw_size += 2056 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2057 2058 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; 2059 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; 2060 adev->firmware.fw_size += 2061 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 2062 2063 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); 2064 2065 DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); 2066 2067 return 0; 2068 } 2069 2070 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) 2071 { 2072 struct amdgpu_device *adev = ctx; 2073 2074 return dm_read_reg(adev->dm.dc->ctx, address); 2075 } 2076 2077 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, 2078 uint32_t value) 2079 { 2080 struct amdgpu_device *adev = ctx; 2081 2082 return dm_write_reg(adev->dm.dc->ctx, address, value); 2083 } 2084 2085 static int dm_dmub_sw_init(struct amdgpu_device *adev) 2086 { 2087 struct dmub_srv_create_params create_params; 2088 struct dmub_srv_region_params region_params; 2089 struct dmub_srv_region_info region_info; 2090 struct dmub_srv_memory_params memory_params; 2091 struct dmub_srv_fb_info *fb_info; 2092 struct dmub_srv *dmub_srv; 2093 const struct dmcub_firmware_header_v1_0 *hdr; 2094 enum dmub_asic dmub_asic; 2095 enum dmub_status status; 2096 int r; 2097 2098 switch (adev->ip_versions[DCE_HWIP][0]) { 2099 case IP_VERSION(2, 1, 0): 2100 dmub_asic = DMUB_ASIC_DCN21; 2101 break; 2102 case IP_VERSION(3, 0, 0): 2103 dmub_asic = DMUB_ASIC_DCN30; 2104 break; 2105 case IP_VERSION(3, 0, 1): 2106 dmub_asic = DMUB_ASIC_DCN301; 2107 break; 2108 case IP_VERSION(3, 0, 2): 2109 dmub_asic = DMUB_ASIC_DCN302; 2110 break; 2111 case IP_VERSION(3, 0, 3): 2112 dmub_asic = DMUB_ASIC_DCN303; 2113 break; 2114 case IP_VERSION(3, 1, 2): 2115 case IP_VERSION(3, 1, 3): 2116 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; 2117 break; 2118 case IP_VERSION(3, 1, 4): 2119 dmub_asic = DMUB_ASIC_DCN314; 2120 break; 2121 case IP_VERSION(3, 1, 5): 2122 dmub_asic = DMUB_ASIC_DCN315; 2123 break; 2124 case IP_VERSION(3, 1, 6): 2125 dmub_asic = DMUB_ASIC_DCN316; 2126 break; 2127 case IP_VERSION(3, 2, 0): 2128 dmub_asic = DMUB_ASIC_DCN32; 2129 break; 2130 case IP_VERSION(3, 2, 1): 2131 dmub_asic = DMUB_ASIC_DCN321; 2132 break; 2133 default: 2134 /* ASIC doesn't support DMUB. */ 2135 return 0; 2136 } 2137 2138 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; 2139 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); 2140 2141 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 2142 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = 2143 AMDGPU_UCODE_ID_DMCUB; 2144 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = 2145 adev->dm.dmub_fw; 2146 adev->firmware.fw_size += 2147 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); 2148 2149 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", 2150 adev->dm.dmcub_fw_version); 2151 } 2152 2153 2154 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 2155 dmub_srv = adev->dm.dmub_srv; 2156 2157 if (!dmub_srv) { 2158 DRM_ERROR("Failed to allocate DMUB service!\n"); 2159 return -ENOMEM; 2160 } 2161 2162 memset(&create_params, 0, sizeof(create_params)); 2163 create_params.user_ctx = adev; 2164 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 2165 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 2166 create_params.asic = dmub_asic; 2167 2168 /* Create the DMUB service. */ 2169 status = dmub_srv_create(dmub_srv, &create_params); 2170 if (status != DMUB_STATUS_OK) { 2171 DRM_ERROR("Error creating DMUB service: %d\n", status); 2172 return -EINVAL; 2173 } 2174 2175 /* Calculate the size of all the regions for the DMUB service. */ 2176 memset(®ion_params, 0, sizeof(region_params)); 2177 2178 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 2179 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 2180 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 2181 region_params.vbios_size = adev->bios_size; 2182 region_params.fw_bss_data = region_params.bss_data_size ? 2183 adev->dm.dmub_fw->data + 2184 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2185 le32_to_cpu(hdr->inst_const_bytes) : NULL; 2186 region_params.fw_inst_const = 2187 adev->dm.dmub_fw->data + 2188 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2189 PSP_HEADER_BYTES; 2190 region_params.is_mailbox_in_inbox = false; 2191 2192 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, 2193 ®ion_info); 2194 2195 if (status != DMUB_STATUS_OK) { 2196 DRM_ERROR("Error calculating DMUB region info: %d\n", status); 2197 return -EINVAL; 2198 } 2199 2200 /* 2201 * Allocate a framebuffer based on the total size of all the regions. 2202 * TODO: Move this into GART. 2203 */ 2204 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 2205 AMDGPU_GEM_DOMAIN_VRAM | 2206 AMDGPU_GEM_DOMAIN_GTT, 2207 &adev->dm.dmub_bo, 2208 &adev->dm.dmub_bo_gpu_addr, 2209 &adev->dm.dmub_bo_cpu_addr); 2210 if (r) 2211 return r; 2212 2213 /* Rebase the regions on the framebuffer address. */ 2214 memset(&memory_params, 0, sizeof(memory_params)); 2215 memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr; 2216 memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr; 2217 memory_params.region_info = ®ion_info; 2218 2219 adev->dm.dmub_fb_info = 2220 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); 2221 fb_info = adev->dm.dmub_fb_info; 2222 2223 if (!fb_info) { 2224 DRM_ERROR( 2225 "Failed to allocate framebuffer info for DMUB service!\n"); 2226 return -ENOMEM; 2227 } 2228 2229 status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); 2230 if (status != DMUB_STATUS_OK) { 2231 DRM_ERROR("Error calculating DMUB FB info: %d\n", status); 2232 return -EINVAL; 2233 } 2234 2235 return 0; 2236 } 2237 2238 static int dm_sw_init(void *handle) 2239 { 2240 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2241 int r; 2242 2243 r = dm_dmub_sw_init(adev); 2244 if (r) 2245 return r; 2246 2247 return load_dmcu_fw(adev); 2248 } 2249 2250 static int dm_sw_fini(void *handle) 2251 { 2252 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2253 2254 kfree(adev->dm.dmub_fb_info); 2255 adev->dm.dmub_fb_info = NULL; 2256 2257 if (adev->dm.dmub_srv) { 2258 dmub_srv_destroy(adev->dm.dmub_srv); 2259 adev->dm.dmub_srv = NULL; 2260 } 2261 2262 amdgpu_ucode_release(&adev->dm.dmub_fw); 2263 amdgpu_ucode_release(&adev->dm.fw_dmcu); 2264 2265 return 0; 2266 } 2267 2268 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 2269 { 2270 struct amdgpu_dm_connector *aconnector; 2271 struct drm_connector *connector; 2272 struct drm_connector_list_iter iter; 2273 int ret = 0; 2274 2275 drm_connector_list_iter_begin(dev, &iter); 2276 drm_for_each_connector_iter(connector, &iter) { 2277 aconnector = to_amdgpu_dm_connector(connector); 2278 if (aconnector->dc_link->type == dc_connection_mst_branch && 2279 aconnector->mst_mgr.aux) { 2280 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", 2281 aconnector, 2282 aconnector->base.base.id); 2283 2284 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 2285 if (ret < 0) { 2286 DRM_ERROR("DM_MST: Failed to start MST\n"); 2287 aconnector->dc_link->type = 2288 dc_connection_single; 2289 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, 2290 aconnector->dc_link); 2291 break; 2292 } 2293 } 2294 } 2295 drm_connector_list_iter_end(&iter); 2296 2297 return ret; 2298 } 2299 2300 static int dm_late_init(void *handle) 2301 { 2302 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2303 2304 struct dmcu_iram_parameters params; 2305 unsigned int linear_lut[16]; 2306 int i; 2307 struct dmcu *dmcu = NULL; 2308 2309 dmcu = adev->dm.dc->res_pool->dmcu; 2310 2311 for (i = 0; i < 16; i++) 2312 linear_lut[i] = 0xFFFF * i / 15; 2313 2314 params.set = 0; 2315 params.backlight_ramping_override = false; 2316 params.backlight_ramping_start = 0xCCCC; 2317 params.backlight_ramping_reduction = 0xCCCCCCCC; 2318 params.backlight_lut_array_size = 16; 2319 params.backlight_lut_array = linear_lut; 2320 2321 /* Min backlight level after ABM reduction, Don't allow below 1% 2322 * 0xFFFF x 0.01 = 0x28F 2323 */ 2324 params.min_abm_backlight = 0x28F; 2325 /* In the case where abm is implemented on dmcub, 2326 * dmcu object will be null. 2327 * ABM 2.4 and up are implemented on dmcub. 2328 */ 2329 if (dmcu) { 2330 if (!dmcu_load_iram(dmcu, params)) 2331 return -EINVAL; 2332 } else if (adev->dm.dc->ctx->dmub_srv) { 2333 struct dc_link *edp_links[MAX_NUM_EDP]; 2334 int edp_num; 2335 2336 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num); 2337 for (i = 0; i < edp_num; i++) { 2338 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) 2339 return -EINVAL; 2340 } 2341 } 2342 2343 return detect_mst_link_for_all_connectors(adev_to_drm(adev)); 2344 } 2345 2346 static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) 2347 { 2348 int ret; 2349 u8 guid[16]; 2350 u64 tmp64; 2351 2352 mutex_lock(&mgr->lock); 2353 if (!mgr->mst_primary) 2354 goto out_fail; 2355 2356 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { 2357 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2358 goto out_fail; 2359 } 2360 2361 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2362 DP_MST_EN | 2363 DP_UP_REQ_EN | 2364 DP_UPSTREAM_IS_SRC); 2365 if (ret < 0) { 2366 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); 2367 goto out_fail; 2368 } 2369 2370 /* Some hubs forget their guids after they resume */ 2371 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); 2372 if (ret != 16) { 2373 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); 2374 goto out_fail; 2375 } 2376 2377 if (memchr_inv(guid, 0, 16) == NULL) { 2378 tmp64 = get_jiffies_64(); 2379 memcpy(&guid[0], &tmp64, sizeof(u64)); 2380 memcpy(&guid[8], &tmp64, sizeof(u64)); 2381 2382 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16); 2383 2384 if (ret != 16) { 2385 drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); 2386 goto out_fail; 2387 } 2388 } 2389 2390 memcpy(mgr->mst_primary->guid, guid, 16); 2391 2392 out_fail: 2393 mutex_unlock(&mgr->lock); 2394 } 2395 2396 static void s3_handle_mst(struct drm_device *dev, bool suspend) 2397 { 2398 struct amdgpu_dm_connector *aconnector; 2399 struct drm_connector *connector; 2400 struct drm_connector_list_iter iter; 2401 struct drm_dp_mst_topology_mgr *mgr; 2402 2403 drm_connector_list_iter_begin(dev, &iter); 2404 drm_for_each_connector_iter(connector, &iter) { 2405 aconnector = to_amdgpu_dm_connector(connector); 2406 if (aconnector->dc_link->type != dc_connection_mst_branch || 2407 aconnector->mst_root) 2408 continue; 2409 2410 mgr = &aconnector->mst_mgr; 2411 2412 if (suspend) { 2413 drm_dp_mst_topology_mgr_suspend(mgr); 2414 } else { 2415 /* if extended timeout is supported in hardware, 2416 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer 2417 * CTS 4.2.1.1 regression introduced by CTS specs requirement update. 2418 */ 2419 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); 2420 if (!dp_is_lttpr_present(aconnector->dc_link)) 2421 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); 2422 2423 /* TODO: move resume_mst_branch_status() into drm mst resume again 2424 * once topology probing work is pulled out from mst resume into mst 2425 * resume 2nd step. mst resume 2nd step should be called after old 2426 * state getting restored (i.e. drm_atomic_helper_resume()). 2427 */ 2428 resume_mst_branch_status(mgr); 2429 } 2430 } 2431 drm_connector_list_iter_end(&iter); 2432 } 2433 2434 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) 2435 { 2436 int ret = 0; 2437 2438 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends 2439 * on window driver dc implementation. 2440 * For Navi1x, clock settings of dcn watermarks are fixed. the settings 2441 * should be passed to smu during boot up and resume from s3. 2442 * boot up: dc calculate dcn watermark clock settings within dc_create, 2443 * dcn20_resource_construct 2444 * then call pplib functions below to pass the settings to smu: 2445 * smu_set_watermarks_for_clock_ranges 2446 * smu_set_watermarks_table 2447 * navi10_set_watermarks_table 2448 * smu_write_watermarks_table 2449 * 2450 * For Renoir, clock settings of dcn watermark are also fixed values. 2451 * dc has implemented different flow for window driver: 2452 * dc_hardware_init / dc_set_power_state 2453 * dcn10_init_hw 2454 * notify_wm_ranges 2455 * set_wm_ranges 2456 * -- Linux 2457 * smu_set_watermarks_for_clock_ranges 2458 * renoir_set_watermarks_table 2459 * smu_write_watermarks_table 2460 * 2461 * For Linux, 2462 * dc_hardware_init -> amdgpu_dm_init 2463 * dc_set_power_state --> dm_resume 2464 * 2465 * therefore, this function apply to navi10/12/14 but not Renoir 2466 * * 2467 */ 2468 switch (adev->ip_versions[DCE_HWIP][0]) { 2469 case IP_VERSION(2, 0, 2): 2470 case IP_VERSION(2, 0, 0): 2471 break; 2472 default: 2473 return 0; 2474 } 2475 2476 ret = amdgpu_dpm_write_watermarks_table(adev); 2477 if (ret) { 2478 DRM_ERROR("Failed to update WMTABLE!\n"); 2479 return ret; 2480 } 2481 2482 return 0; 2483 } 2484 2485 /** 2486 * dm_hw_init() - Initialize DC device 2487 * @handle: The base driver device containing the amdgpu_dm device. 2488 * 2489 * Initialize the &struct amdgpu_display_manager device. This involves calling 2490 * the initializers of each DM component, then populating the struct with them. 2491 * 2492 * Although the function implies hardware initialization, both hardware and 2493 * software are initialized here. Splitting them out to their relevant init 2494 * hooks is a future TODO item. 2495 * 2496 * Some notable things that are initialized here: 2497 * 2498 * - Display Core, both software and hardware 2499 * - DC modules that we need (freesync and color management) 2500 * - DRM software states 2501 * - Interrupt sources and handlers 2502 * - Vblank support 2503 * - Debug FS entries, if enabled 2504 */ 2505 static int dm_hw_init(void *handle) 2506 { 2507 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2508 /* Create DAL display manager */ 2509 amdgpu_dm_init(adev); 2510 amdgpu_dm_hpd_init(adev); 2511 2512 return 0; 2513 } 2514 2515 /** 2516 * dm_hw_fini() - Teardown DC device 2517 * @handle: The base driver device containing the amdgpu_dm device. 2518 * 2519 * Teardown components within &struct amdgpu_display_manager that require 2520 * cleanup. This involves cleaning up the DRM device, DC, and any modules that 2521 * were loaded. Also flush IRQ workqueues and disable them. 2522 */ 2523 static int dm_hw_fini(void *handle) 2524 { 2525 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2526 2527 amdgpu_dm_hpd_fini(adev); 2528 2529 amdgpu_dm_irq_fini(adev); 2530 amdgpu_dm_fini(adev); 2531 return 0; 2532 } 2533 2534 2535 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, 2536 struct dc_state *state, bool enable) 2537 { 2538 enum dc_irq_source irq_source; 2539 struct amdgpu_crtc *acrtc; 2540 int rc = -EBUSY; 2541 int i = 0; 2542 2543 for (i = 0; i < state->stream_count; i++) { 2544 acrtc = get_crtc_by_otg_inst( 2545 adev, state->stream_status[i].primary_otg_inst); 2546 2547 if (acrtc && state->stream_status[i].plane_count != 0) { 2548 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; 2549 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 2550 if (rc) 2551 DRM_WARN("Failed to %s pflip interrupts\n", 2552 enable ? "enable" : "disable"); 2553 2554 if (enable) { 2555 if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) 2556 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); 2557 } else 2558 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); 2559 2560 if (rc) 2561 DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); 2562 2563 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 2564 /* During gpu-reset we disable and then enable vblank irq, so 2565 * don't use amdgpu_irq_get/put() to avoid refcount change. 2566 */ 2567 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) 2568 DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); 2569 } 2570 } 2571 2572 } 2573 2574 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) 2575 { 2576 struct dc_state *context = NULL; 2577 enum dc_status res = DC_ERROR_UNEXPECTED; 2578 int i; 2579 struct dc_stream_state *del_streams[MAX_PIPES]; 2580 int del_streams_count = 0; 2581 2582 memset(del_streams, 0, sizeof(del_streams)); 2583 2584 context = dc_create_state(dc); 2585 if (context == NULL) 2586 goto context_alloc_fail; 2587 2588 dc_resource_state_copy_construct_current(dc, context); 2589 2590 /* First remove from context all streams */ 2591 for (i = 0; i < context->stream_count; i++) { 2592 struct dc_stream_state *stream = context->streams[i]; 2593 2594 del_streams[del_streams_count++] = stream; 2595 } 2596 2597 /* Remove all planes for removed streams and then remove the streams */ 2598 for (i = 0; i < del_streams_count; i++) { 2599 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { 2600 res = DC_FAIL_DETACH_SURFACES; 2601 goto fail; 2602 } 2603 2604 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); 2605 if (res != DC_OK) 2606 goto fail; 2607 } 2608 2609 res = dc_commit_streams(dc, context->streams, context->stream_count); 2610 2611 fail: 2612 dc_release_state(context); 2613 2614 context_alloc_fail: 2615 return res; 2616 } 2617 2618 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) 2619 { 2620 int i; 2621 2622 if (dm->hpd_rx_offload_wq) { 2623 for (i = 0; i < dm->dc->caps.max_links; i++) 2624 flush_workqueue(dm->hpd_rx_offload_wq[i].wq); 2625 } 2626 } 2627 2628 static int dm_suspend(void *handle) 2629 { 2630 struct amdgpu_device *adev = handle; 2631 struct amdgpu_display_manager *dm = &adev->dm; 2632 int ret = 0; 2633 2634 if (amdgpu_in_reset(adev)) { 2635 mutex_lock(&dm->dc_lock); 2636 2637 dc_allow_idle_optimizations(adev->dm.dc, false); 2638 2639 dm->cached_dc_state = dc_copy_state(dm->dc->current_state); 2640 2641 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); 2642 2643 amdgpu_dm_commit_zero_streams(dm->dc); 2644 2645 amdgpu_dm_irq_suspend(adev); 2646 2647 hpd_rx_irq_work_suspend(dm); 2648 2649 return ret; 2650 } 2651 2652 WARN_ON(adev->dm.cached_state); 2653 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); 2654 2655 s3_handle_mst(adev_to_drm(adev), true); 2656 2657 amdgpu_dm_irq_suspend(adev); 2658 2659 hpd_rx_irq_work_suspend(dm); 2660 2661 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 2662 2663 return 0; 2664 } 2665 2666 struct amdgpu_dm_connector * 2667 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 2668 struct drm_crtc *crtc) 2669 { 2670 u32 i; 2671 struct drm_connector_state *new_con_state; 2672 struct drm_connector *connector; 2673 struct drm_crtc *crtc_from_state; 2674 2675 for_each_new_connector_in_state(state, connector, new_con_state, i) { 2676 crtc_from_state = new_con_state->crtc; 2677 2678 if (crtc_from_state == crtc) 2679 return to_amdgpu_dm_connector(connector); 2680 } 2681 2682 return NULL; 2683 } 2684 2685 static void emulated_link_detect(struct dc_link *link) 2686 { 2687 struct dc_sink_init_data sink_init_data = { 0 }; 2688 struct display_sink_capability sink_caps = { 0 }; 2689 enum dc_edid_status edid_status; 2690 struct dc_context *dc_ctx = link->ctx; 2691 struct dc_sink *sink = NULL; 2692 struct dc_sink *prev_sink = NULL; 2693 2694 link->type = dc_connection_none; 2695 prev_sink = link->local_sink; 2696 2697 if (prev_sink) 2698 dc_sink_release(prev_sink); 2699 2700 switch (link->connector_signal) { 2701 case SIGNAL_TYPE_HDMI_TYPE_A: { 2702 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2703 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; 2704 break; 2705 } 2706 2707 case SIGNAL_TYPE_DVI_SINGLE_LINK: { 2708 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2709 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 2710 break; 2711 } 2712 2713 case SIGNAL_TYPE_DVI_DUAL_LINK: { 2714 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2715 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; 2716 break; 2717 } 2718 2719 case SIGNAL_TYPE_LVDS: { 2720 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2721 sink_caps.signal = SIGNAL_TYPE_LVDS; 2722 break; 2723 } 2724 2725 case SIGNAL_TYPE_EDP: { 2726 sink_caps.transaction_type = 2727 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2728 sink_caps.signal = SIGNAL_TYPE_EDP; 2729 break; 2730 } 2731 2732 case SIGNAL_TYPE_DISPLAY_PORT: { 2733 sink_caps.transaction_type = 2734 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2735 sink_caps.signal = SIGNAL_TYPE_VIRTUAL; 2736 break; 2737 } 2738 2739 default: 2740 DC_ERROR("Invalid connector type! signal:%d\n", 2741 link->connector_signal); 2742 return; 2743 } 2744 2745 sink_init_data.link = link; 2746 sink_init_data.sink_signal = sink_caps.signal; 2747 2748 sink = dc_sink_create(&sink_init_data); 2749 if (!sink) { 2750 DC_ERROR("Failed to create sink!\n"); 2751 return; 2752 } 2753 2754 /* dc_sink_create returns a new reference */ 2755 link->local_sink = sink; 2756 2757 edid_status = dm_helpers_read_local_edid( 2758 link->ctx, 2759 link, 2760 sink); 2761 2762 if (edid_status != EDID_OK) 2763 DC_ERROR("Failed to read EDID"); 2764 2765 } 2766 2767 static void dm_gpureset_commit_state(struct dc_state *dc_state, 2768 struct amdgpu_display_manager *dm) 2769 { 2770 struct { 2771 struct dc_surface_update surface_updates[MAX_SURFACES]; 2772 struct dc_plane_info plane_infos[MAX_SURFACES]; 2773 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 2774 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 2775 struct dc_stream_update stream_update; 2776 } *bundle; 2777 int k, m; 2778 2779 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 2780 2781 if (!bundle) { 2782 dm_error("Failed to allocate update bundle\n"); 2783 goto cleanup; 2784 } 2785 2786 for (k = 0; k < dc_state->stream_count; k++) { 2787 bundle->stream_update.stream = dc_state->streams[k]; 2788 2789 for (m = 0; m < dc_state->stream_status->plane_count; m++) { 2790 bundle->surface_updates[m].surface = 2791 dc_state->stream_status->plane_states[m]; 2792 bundle->surface_updates[m].surface->force_full_update = 2793 true; 2794 } 2795 2796 update_planes_and_stream_adapter(dm->dc, 2797 UPDATE_TYPE_FULL, 2798 dc_state->stream_status->plane_count, 2799 dc_state->streams[k], 2800 &bundle->stream_update, 2801 bundle->surface_updates); 2802 } 2803 2804 cleanup: 2805 kfree(bundle); 2806 } 2807 2808 static int dm_resume(void *handle) 2809 { 2810 struct amdgpu_device *adev = handle; 2811 struct drm_device *ddev = adev_to_drm(adev); 2812 struct amdgpu_display_manager *dm = &adev->dm; 2813 struct amdgpu_dm_connector *aconnector; 2814 struct drm_connector *connector; 2815 struct drm_connector_list_iter iter; 2816 struct drm_crtc *crtc; 2817 struct drm_crtc_state *new_crtc_state; 2818 struct dm_crtc_state *dm_new_crtc_state; 2819 struct drm_plane *plane; 2820 struct drm_plane_state *new_plane_state; 2821 struct dm_plane_state *dm_new_plane_state; 2822 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 2823 enum dc_connection_type new_connection_type = dc_connection_none; 2824 struct dc_state *dc_state; 2825 int i, r, j, ret; 2826 bool need_hotplug = false; 2827 2828 if (amdgpu_in_reset(adev)) { 2829 dc_state = dm->cached_dc_state; 2830 2831 /* 2832 * The dc->current_state is backed up into dm->cached_dc_state 2833 * before we commit 0 streams. 2834 * 2835 * DC will clear link encoder assignments on the real state 2836 * but the changes won't propagate over to the copy we made 2837 * before the 0 streams commit. 2838 * 2839 * DC expects that link encoder assignments are *not* valid 2840 * when committing a state, so as a workaround we can copy 2841 * off of the current state. 2842 * 2843 * We lose the previous assignments, but we had already 2844 * commit 0 streams anyway. 2845 */ 2846 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); 2847 2848 r = dm_dmub_hw_init(adev); 2849 if (r) 2850 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 2851 2852 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2853 dc_resume(dm->dc); 2854 2855 amdgpu_dm_irq_resume_early(adev); 2856 2857 for (i = 0; i < dc_state->stream_count; i++) { 2858 dc_state->streams[i]->mode_changed = true; 2859 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { 2860 dc_state->stream_status[i].plane_states[j]->update_flags.raw 2861 = 0xffffffff; 2862 } 2863 } 2864 2865 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 2866 amdgpu_dm_outbox_init(adev); 2867 dc_enable_dmub_outbox(adev->dm.dc); 2868 } 2869 2870 WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); 2871 2872 dm_gpureset_commit_state(dm->cached_dc_state, dm); 2873 2874 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); 2875 2876 dc_release_state(dm->cached_dc_state); 2877 dm->cached_dc_state = NULL; 2878 2879 amdgpu_dm_irq_resume_late(adev); 2880 2881 mutex_unlock(&dm->dc_lock); 2882 2883 return 0; 2884 } 2885 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 2886 dc_release_state(dm_state->context); 2887 dm_state->context = dc_create_state(dm->dc); 2888 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 2889 dc_resource_state_construct(dm->dc, dm_state->context); 2890 2891 /* Before powering on DC we need to re-initialize DMUB. */ 2892 dm_dmub_hw_resume(adev); 2893 2894 /* Re-enable outbox interrupts for DPIA. */ 2895 if (dc_is_dmub_outbox_supported(adev->dm.dc)) { 2896 amdgpu_dm_outbox_init(adev); 2897 dc_enable_dmub_outbox(adev->dm.dc); 2898 } 2899 2900 /* power on hardware */ 2901 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2902 2903 /* program HPD filter */ 2904 dc_resume(dm->dc); 2905 2906 /* 2907 * early enable HPD Rx IRQ, should be done before set mode as short 2908 * pulse interrupts are used for MST 2909 */ 2910 amdgpu_dm_irq_resume_early(adev); 2911 2912 /* On resume we need to rewrite the MSTM control bits to enable MST*/ 2913 s3_handle_mst(ddev, false); 2914 2915 /* Do detection*/ 2916 drm_connector_list_iter_begin(ddev, &iter); 2917 drm_for_each_connector_iter(connector, &iter) { 2918 aconnector = to_amdgpu_dm_connector(connector); 2919 2920 if (!aconnector->dc_link) 2921 continue; 2922 2923 /* 2924 * this is the case when traversing through already created end sink 2925 * MST connectors, should be skipped 2926 */ 2927 if (aconnector && aconnector->mst_root) 2928 continue; 2929 2930 mutex_lock(&aconnector->hpd_lock); 2931 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 2932 DRM_ERROR("KMS: Failed to detect connector\n"); 2933 2934 if (aconnector->base.force && new_connection_type == dc_connection_none) { 2935 emulated_link_detect(aconnector->dc_link); 2936 } else { 2937 mutex_lock(&dm->dc_lock); 2938 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 2939 mutex_unlock(&dm->dc_lock); 2940 } 2941 2942 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 2943 aconnector->fake_enable = false; 2944 2945 if (aconnector->dc_sink) 2946 dc_sink_release(aconnector->dc_sink); 2947 aconnector->dc_sink = NULL; 2948 amdgpu_dm_update_connector_after_detect(aconnector); 2949 mutex_unlock(&aconnector->hpd_lock); 2950 } 2951 drm_connector_list_iter_end(&iter); 2952 2953 /* Force mode set in atomic commit */ 2954 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) 2955 new_crtc_state->active_changed = true; 2956 2957 /* 2958 * atomic_check is expected to create the dc states. We need to release 2959 * them here, since they were duplicated as part of the suspend 2960 * procedure. 2961 */ 2962 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 2963 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 2964 if (dm_new_crtc_state->stream) { 2965 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 2966 dc_stream_release(dm_new_crtc_state->stream); 2967 dm_new_crtc_state->stream = NULL; 2968 } 2969 } 2970 2971 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { 2972 dm_new_plane_state = to_dm_plane_state(new_plane_state); 2973 if (dm_new_plane_state->dc_state) { 2974 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 2975 dc_plane_state_release(dm_new_plane_state->dc_state); 2976 dm_new_plane_state->dc_state = NULL; 2977 } 2978 } 2979 2980 drm_atomic_helper_resume(ddev, dm->cached_state); 2981 2982 dm->cached_state = NULL; 2983 2984 /* Do mst topology probing after resuming cached state*/ 2985 drm_connector_list_iter_begin(ddev, &iter); 2986 drm_for_each_connector_iter(connector, &iter) { 2987 aconnector = to_amdgpu_dm_connector(connector); 2988 if (aconnector->dc_link->type != dc_connection_mst_branch || 2989 aconnector->mst_root) 2990 continue; 2991 2992 ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); 2993 2994 if (ret < 0) { 2995 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, 2996 aconnector->dc_link); 2997 need_hotplug = true; 2998 } 2999 } 3000 drm_connector_list_iter_end(&iter); 3001 3002 if (need_hotplug) 3003 drm_kms_helper_hotplug_event(ddev); 3004 3005 amdgpu_dm_irq_resume_late(adev); 3006 3007 amdgpu_dm_smu_write_watermarks_table(adev); 3008 3009 return 0; 3010 } 3011 3012 /** 3013 * DOC: DM Lifecycle 3014 * 3015 * DM (and consequently DC) is registered in the amdgpu base driver as a IP 3016 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to 3017 * the base driver's device list to be initialized and torn down accordingly. 3018 * 3019 * The functions to do so are provided as hooks in &struct amd_ip_funcs. 3020 */ 3021 3022 static const struct amd_ip_funcs amdgpu_dm_funcs = { 3023 .name = "dm", 3024 .early_init = dm_early_init, 3025 .late_init = dm_late_init, 3026 .sw_init = dm_sw_init, 3027 .sw_fini = dm_sw_fini, 3028 .early_fini = amdgpu_dm_early_fini, 3029 .hw_init = dm_hw_init, 3030 .hw_fini = dm_hw_fini, 3031 .suspend = dm_suspend, 3032 .resume = dm_resume, 3033 .is_idle = dm_is_idle, 3034 .wait_for_idle = dm_wait_for_idle, 3035 .check_soft_reset = dm_check_soft_reset, 3036 .soft_reset = dm_soft_reset, 3037 .set_clockgating_state = dm_set_clockgating_state, 3038 .set_powergating_state = dm_set_powergating_state, 3039 }; 3040 3041 const struct amdgpu_ip_block_version dm_ip_block = { 3042 .type = AMD_IP_BLOCK_TYPE_DCE, 3043 .major = 1, 3044 .minor = 0, 3045 .rev = 0, 3046 .funcs = &amdgpu_dm_funcs, 3047 }; 3048 3049 3050 /** 3051 * DOC: atomic 3052 * 3053 * *WIP* 3054 */ 3055 3056 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 3057 .fb_create = amdgpu_display_user_framebuffer_create, 3058 .get_format_info = amdgpu_dm_plane_get_format_info, 3059 .atomic_check = amdgpu_dm_atomic_check, 3060 .atomic_commit = drm_atomic_helper_commit, 3061 }; 3062 3063 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 3064 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, 3065 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, 3066 }; 3067 3068 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) 3069 { 3070 struct amdgpu_dm_backlight_caps *caps; 3071 struct drm_connector *conn_base; 3072 struct amdgpu_device *adev; 3073 struct drm_luminance_range_info *luminance_range; 3074 3075 if (aconnector->bl_idx == -1 || 3076 aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) 3077 return; 3078 3079 conn_base = &aconnector->base; 3080 adev = drm_to_adev(conn_base->dev); 3081 3082 caps = &adev->dm.backlight_caps[aconnector->bl_idx]; 3083 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; 3084 caps->aux_support = false; 3085 3086 if (caps->ext_caps->bits.oled == 1 3087 /* 3088 * || 3089 * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || 3090 * caps->ext_caps->bits.hdr_aux_backlight_control == 1 3091 */) 3092 caps->aux_support = true; 3093 3094 if (amdgpu_backlight == 0) 3095 caps->aux_support = false; 3096 else if (amdgpu_backlight == 1) 3097 caps->aux_support = true; 3098 3099 luminance_range = &conn_base->display_info.luminance_range; 3100 3101 if (luminance_range->max_luminance) { 3102 caps->aux_min_input_signal = luminance_range->min_luminance; 3103 caps->aux_max_input_signal = luminance_range->max_luminance; 3104 } else { 3105 caps->aux_min_input_signal = 0; 3106 caps->aux_max_input_signal = 512; 3107 } 3108 } 3109 3110 void amdgpu_dm_update_connector_after_detect( 3111 struct amdgpu_dm_connector *aconnector) 3112 { 3113 struct drm_connector *connector = &aconnector->base; 3114 struct drm_device *dev = connector->dev; 3115 struct dc_sink *sink; 3116 3117 /* MST handled by drm_mst framework */ 3118 if (aconnector->mst_mgr.mst_state == true) 3119 return; 3120 3121 sink = aconnector->dc_link->local_sink; 3122 if (sink) 3123 dc_sink_retain(sink); 3124 3125 /* 3126 * Edid mgmt connector gets first update only in mode_valid hook and then 3127 * the connector sink is set to either fake or physical sink depends on link status. 3128 * Skip if already done during boot. 3129 */ 3130 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 3131 && aconnector->dc_em_sink) { 3132 3133 /* 3134 * For S3 resume with headless use eml_sink to fake stream 3135 * because on resume connector->sink is set to NULL 3136 */ 3137 mutex_lock(&dev->mode_config.mutex); 3138 3139 if (sink) { 3140 if (aconnector->dc_sink) { 3141 amdgpu_dm_update_freesync_caps(connector, NULL); 3142 /* 3143 * retain and release below are used to 3144 * bump up refcount for sink because the link doesn't point 3145 * to it anymore after disconnect, so on next crtc to connector 3146 * reshuffle by UMD we will get into unwanted dc_sink release 3147 */ 3148 dc_sink_release(aconnector->dc_sink); 3149 } 3150 aconnector->dc_sink = sink; 3151 dc_sink_retain(aconnector->dc_sink); 3152 amdgpu_dm_update_freesync_caps(connector, 3153 aconnector->edid); 3154 } else { 3155 amdgpu_dm_update_freesync_caps(connector, NULL); 3156 if (!aconnector->dc_sink) { 3157 aconnector->dc_sink = aconnector->dc_em_sink; 3158 dc_sink_retain(aconnector->dc_sink); 3159 } 3160 } 3161 3162 mutex_unlock(&dev->mode_config.mutex); 3163 3164 if (sink) 3165 dc_sink_release(sink); 3166 return; 3167 } 3168 3169 /* 3170 * TODO: temporary guard to look for proper fix 3171 * if this sink is MST sink, we should not do anything 3172 */ 3173 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 3174 dc_sink_release(sink); 3175 return; 3176 } 3177 3178 if (aconnector->dc_sink == sink) { 3179 /* 3180 * We got a DP short pulse (Link Loss, DP CTS, etc...). 3181 * Do nothing!! 3182 */ 3183 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", 3184 aconnector->connector_id); 3185 if (sink) 3186 dc_sink_release(sink); 3187 return; 3188 } 3189 3190 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 3191 aconnector->connector_id, aconnector->dc_sink, sink); 3192 3193 mutex_lock(&dev->mode_config.mutex); 3194 3195 /* 3196 * 1. Update status of the drm connector 3197 * 2. Send an event and let userspace tell us what to do 3198 */ 3199 if (sink) { 3200 /* 3201 * TODO: check if we still need the S3 mode update workaround. 3202 * If yes, put it here. 3203 */ 3204 if (aconnector->dc_sink) { 3205 amdgpu_dm_update_freesync_caps(connector, NULL); 3206 dc_sink_release(aconnector->dc_sink); 3207 } 3208 3209 aconnector->dc_sink = sink; 3210 dc_sink_retain(aconnector->dc_sink); 3211 if (sink->dc_edid.length == 0) { 3212 aconnector->edid = NULL; 3213 if (aconnector->dc_link->aux_mode) { 3214 drm_dp_cec_unset_edid( 3215 &aconnector->dm_dp_aux.aux); 3216 } 3217 } else { 3218 aconnector->edid = 3219 (struct edid *)sink->dc_edid.raw_edid; 3220 3221 if (aconnector->dc_link->aux_mode) 3222 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, 3223 aconnector->edid); 3224 } 3225 3226 if (!aconnector->timing_requested) { 3227 aconnector->timing_requested = 3228 kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); 3229 if (!aconnector->timing_requested) 3230 dm_error("failed to create aconnector->requested_timing\n"); 3231 } 3232 3233 drm_connector_update_edid_property(connector, aconnector->edid); 3234 amdgpu_dm_update_freesync_caps(connector, aconnector->edid); 3235 update_connector_ext_caps(aconnector); 3236 } else { 3237 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3238 amdgpu_dm_update_freesync_caps(connector, NULL); 3239 drm_connector_update_edid_property(connector, NULL); 3240 aconnector->num_modes = 0; 3241 dc_sink_release(aconnector->dc_sink); 3242 aconnector->dc_sink = NULL; 3243 aconnector->edid = NULL; 3244 kfree(aconnector->timing_requested); 3245 aconnector->timing_requested = NULL; 3246 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ 3247 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 3248 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 3249 } 3250 3251 mutex_unlock(&dev->mode_config.mutex); 3252 3253 update_subconnector_property(aconnector); 3254 3255 if (sink) 3256 dc_sink_release(sink); 3257 } 3258 3259 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) 3260 { 3261 struct drm_connector *connector = &aconnector->base; 3262 struct drm_device *dev = connector->dev; 3263 enum dc_connection_type new_connection_type = dc_connection_none; 3264 struct amdgpu_device *adev = drm_to_adev(dev); 3265 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 3266 bool ret = false; 3267 3268 if (adev->dm.disable_hpd_irq) 3269 return; 3270 3271 /* 3272 * In case of failure or MST no need to update connector status or notify the OS 3273 * since (for MST case) MST does this in its own context. 3274 */ 3275 mutex_lock(&aconnector->hpd_lock); 3276 3277 if (adev->dm.hdcp_workqueue) { 3278 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 3279 dm_con_state->update_hdcp = true; 3280 } 3281 if (aconnector->fake_enable) 3282 aconnector->fake_enable = false; 3283 3284 aconnector->timing_changed = false; 3285 3286 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) 3287 DRM_ERROR("KMS: Failed to detect connector\n"); 3288 3289 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3290 emulated_link_detect(aconnector->dc_link); 3291 3292 drm_modeset_lock_all(dev); 3293 dm_restore_drm_connector_state(dev, connector); 3294 drm_modeset_unlock_all(dev); 3295 3296 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3297 drm_kms_helper_connector_hotplug_event(connector); 3298 } else { 3299 mutex_lock(&adev->dm.dc_lock); 3300 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 3301 mutex_unlock(&adev->dm.dc_lock); 3302 if (ret) { 3303 amdgpu_dm_update_connector_after_detect(aconnector); 3304 3305 drm_modeset_lock_all(dev); 3306 dm_restore_drm_connector_state(dev, connector); 3307 drm_modeset_unlock_all(dev); 3308 3309 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3310 drm_kms_helper_connector_hotplug_event(connector); 3311 } 3312 } 3313 mutex_unlock(&aconnector->hpd_lock); 3314 3315 } 3316 3317 static void handle_hpd_irq(void *param) 3318 { 3319 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3320 3321 handle_hpd_irq_helper(aconnector); 3322 3323 } 3324 3325 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, 3326 union hpd_irq_data hpd_irq_data) 3327 { 3328 struct hpd_rx_irq_offload_work *offload_work = 3329 kzalloc(sizeof(*offload_work), GFP_KERNEL); 3330 3331 if (!offload_work) { 3332 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); 3333 return; 3334 } 3335 3336 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); 3337 offload_work->data = hpd_irq_data; 3338 offload_work->offload_wq = offload_wq; 3339 3340 queue_work(offload_wq->wq, &offload_work->work); 3341 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); 3342 } 3343 3344 static void handle_hpd_rx_irq(void *param) 3345 { 3346 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3347 struct drm_connector *connector = &aconnector->base; 3348 struct drm_device *dev = connector->dev; 3349 struct dc_link *dc_link = aconnector->dc_link; 3350 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 3351 bool result = false; 3352 enum dc_connection_type new_connection_type = dc_connection_none; 3353 struct amdgpu_device *adev = drm_to_adev(dev); 3354 union hpd_irq_data hpd_irq_data; 3355 bool link_loss = false; 3356 bool has_left_work = false; 3357 int idx = dc_link->link_index; 3358 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; 3359 3360 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 3361 3362 if (adev->dm.disable_hpd_irq) 3363 return; 3364 3365 /* 3366 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio 3367 * conflict, after implement i2c helper, this mutex should be 3368 * retired. 3369 */ 3370 mutex_lock(&aconnector->hpd_lock); 3371 3372 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, 3373 &link_loss, true, &has_left_work); 3374 3375 if (!has_left_work) 3376 goto out; 3377 3378 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 3379 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3380 goto out; 3381 } 3382 3383 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { 3384 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 3385 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 3386 bool skip = false; 3387 3388 /* 3389 * DOWN_REP_MSG_RDY is also handled by polling method 3390 * mgr->cbs->poll_hpd_irq() 3391 */ 3392 spin_lock(&offload_wq->offload_lock); 3393 skip = offload_wq->is_handling_mst_msg_rdy_event; 3394 3395 if (!skip) 3396 offload_wq->is_handling_mst_msg_rdy_event = true; 3397 3398 spin_unlock(&offload_wq->offload_lock); 3399 3400 if (!skip) 3401 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3402 3403 goto out; 3404 } 3405 3406 if (link_loss) { 3407 bool skip = false; 3408 3409 spin_lock(&offload_wq->offload_lock); 3410 skip = offload_wq->is_handling_link_loss; 3411 3412 if (!skip) 3413 offload_wq->is_handling_link_loss = true; 3414 3415 spin_unlock(&offload_wq->offload_lock); 3416 3417 if (!skip) 3418 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3419 3420 goto out; 3421 } 3422 } 3423 3424 out: 3425 if (result && !is_mst_root_connector) { 3426 /* Downstream Port status changed. */ 3427 if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) 3428 DRM_ERROR("KMS: Failed to detect connector\n"); 3429 3430 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3431 emulated_link_detect(dc_link); 3432 3433 if (aconnector->fake_enable) 3434 aconnector->fake_enable = false; 3435 3436 amdgpu_dm_update_connector_after_detect(aconnector); 3437 3438 3439 drm_modeset_lock_all(dev); 3440 dm_restore_drm_connector_state(dev, connector); 3441 drm_modeset_unlock_all(dev); 3442 3443 drm_kms_helper_connector_hotplug_event(connector); 3444 } else { 3445 bool ret = false; 3446 3447 mutex_lock(&adev->dm.dc_lock); 3448 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); 3449 mutex_unlock(&adev->dm.dc_lock); 3450 3451 if (ret) { 3452 if (aconnector->fake_enable) 3453 aconnector->fake_enable = false; 3454 3455 amdgpu_dm_update_connector_after_detect(aconnector); 3456 3457 drm_modeset_lock_all(dev); 3458 dm_restore_drm_connector_state(dev, connector); 3459 drm_modeset_unlock_all(dev); 3460 3461 drm_kms_helper_connector_hotplug_event(connector); 3462 } 3463 } 3464 } 3465 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { 3466 if (adev->dm.hdcp_workqueue) 3467 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); 3468 } 3469 3470 if (dc_link->type != dc_connection_mst_branch) 3471 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); 3472 3473 mutex_unlock(&aconnector->hpd_lock); 3474 } 3475 3476 static void register_hpd_handlers(struct amdgpu_device *adev) 3477 { 3478 struct drm_device *dev = adev_to_drm(adev); 3479 struct drm_connector *connector; 3480 struct amdgpu_dm_connector *aconnector; 3481 const struct dc_link *dc_link; 3482 struct dc_interrupt_params int_params = {0}; 3483 3484 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3485 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3486 3487 list_for_each_entry(connector, 3488 &dev->mode_config.connector_list, head) { 3489 3490 aconnector = to_amdgpu_dm_connector(connector); 3491 dc_link = aconnector->dc_link; 3492 3493 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { 3494 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3495 int_params.irq_source = dc_link->irq_source_hpd; 3496 3497 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3498 handle_hpd_irq, 3499 (void *) aconnector); 3500 } 3501 3502 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { 3503 3504 /* Also register for DP short pulse (hpd_rx). */ 3505 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3506 int_params.irq_source = dc_link->irq_source_hpd_rx; 3507 3508 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3509 handle_hpd_rx_irq, 3510 (void *) aconnector); 3511 } 3512 3513 if (adev->dm.hpd_rx_offload_wq) 3514 adev->dm.hpd_rx_offload_wq[connector->index].aconnector = 3515 aconnector; 3516 } 3517 } 3518 3519 #if defined(CONFIG_DRM_AMD_DC_SI) 3520 /* Register IRQ sources and initialize IRQ callbacks */ 3521 static int dce60_register_irq_handlers(struct amdgpu_device *adev) 3522 { 3523 struct dc *dc = adev->dm.dc; 3524 struct common_irq_params *c_irq_params; 3525 struct dc_interrupt_params int_params = {0}; 3526 int r; 3527 int i; 3528 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3529 3530 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3531 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3532 3533 /* 3534 * Actions of amdgpu_irq_add_id(): 3535 * 1. Register a set() function with base driver. 3536 * Base driver will call set() function to enable/disable an 3537 * interrupt in DC hardware. 3538 * 2. Register amdgpu_dm_irq_handler(). 3539 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3540 * coming from DC hardware. 3541 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3542 * for acknowledging and handling. 3543 */ 3544 3545 /* Use VBLANK interrupt */ 3546 for (i = 0; i < adev->mode_info.num_crtc; i++) { 3547 r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); 3548 if (r) { 3549 DRM_ERROR("Failed to add crtc irq id!\n"); 3550 return r; 3551 } 3552 3553 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3554 int_params.irq_source = 3555 dc_interrupt_to_irq_source(dc, i + 1, 0); 3556 3557 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3558 3559 c_irq_params->adev = adev; 3560 c_irq_params->irq_src = int_params.irq_source; 3561 3562 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3563 dm_crtc_high_irq, c_irq_params); 3564 } 3565 3566 /* Use GRPH_PFLIP interrupt */ 3567 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 3568 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 3569 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 3570 if (r) { 3571 DRM_ERROR("Failed to add page flip irq id!\n"); 3572 return r; 3573 } 3574 3575 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3576 int_params.irq_source = 3577 dc_interrupt_to_irq_source(dc, i, 0); 3578 3579 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3580 3581 c_irq_params->adev = adev; 3582 c_irq_params->irq_src = int_params.irq_source; 3583 3584 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3585 dm_pflip_high_irq, c_irq_params); 3586 3587 } 3588 3589 /* HPD */ 3590 r = amdgpu_irq_add_id(adev, client_id, 3591 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 3592 if (r) { 3593 DRM_ERROR("Failed to add hpd irq id!\n"); 3594 return r; 3595 } 3596 3597 register_hpd_handlers(adev); 3598 3599 return 0; 3600 } 3601 #endif 3602 3603 /* Register IRQ sources and initialize IRQ callbacks */ 3604 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 3605 { 3606 struct dc *dc = adev->dm.dc; 3607 struct common_irq_params *c_irq_params; 3608 struct dc_interrupt_params int_params = {0}; 3609 int r; 3610 int i; 3611 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3612 3613 if (adev->family >= AMDGPU_FAMILY_AI) 3614 client_id = SOC15_IH_CLIENTID_DCE; 3615 3616 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3617 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3618 3619 /* 3620 * Actions of amdgpu_irq_add_id(): 3621 * 1. Register a set() function with base driver. 3622 * Base driver will call set() function to enable/disable an 3623 * interrupt in DC hardware. 3624 * 2. Register amdgpu_dm_irq_handler(). 3625 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3626 * coming from DC hardware. 3627 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3628 * for acknowledging and handling. 3629 */ 3630 3631 /* Use VBLANK interrupt */ 3632 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { 3633 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); 3634 if (r) { 3635 DRM_ERROR("Failed to add crtc irq id!\n"); 3636 return r; 3637 } 3638 3639 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3640 int_params.irq_source = 3641 dc_interrupt_to_irq_source(dc, i, 0); 3642 3643 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3644 3645 c_irq_params->adev = adev; 3646 c_irq_params->irq_src = int_params.irq_source; 3647 3648 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3649 dm_crtc_high_irq, c_irq_params); 3650 } 3651 3652 /* Use VUPDATE interrupt */ 3653 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { 3654 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); 3655 if (r) { 3656 DRM_ERROR("Failed to add vupdate irq id!\n"); 3657 return r; 3658 } 3659 3660 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3661 int_params.irq_source = 3662 dc_interrupt_to_irq_source(dc, i, 0); 3663 3664 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3665 3666 c_irq_params->adev = adev; 3667 c_irq_params->irq_src = int_params.irq_source; 3668 3669 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3670 dm_vupdate_high_irq, c_irq_params); 3671 } 3672 3673 /* Use GRPH_PFLIP interrupt */ 3674 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 3675 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 3676 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 3677 if (r) { 3678 DRM_ERROR("Failed to add page flip irq id!\n"); 3679 return r; 3680 } 3681 3682 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3683 int_params.irq_source = 3684 dc_interrupt_to_irq_source(dc, i, 0); 3685 3686 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3687 3688 c_irq_params->adev = adev; 3689 c_irq_params->irq_src = int_params.irq_source; 3690 3691 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3692 dm_pflip_high_irq, c_irq_params); 3693 3694 } 3695 3696 /* HPD */ 3697 r = amdgpu_irq_add_id(adev, client_id, 3698 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 3699 if (r) { 3700 DRM_ERROR("Failed to add hpd irq id!\n"); 3701 return r; 3702 } 3703 3704 register_hpd_handlers(adev); 3705 3706 return 0; 3707 } 3708 3709 /* Register IRQ sources and initialize IRQ callbacks */ 3710 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 3711 { 3712 struct dc *dc = adev->dm.dc; 3713 struct common_irq_params *c_irq_params; 3714 struct dc_interrupt_params int_params = {0}; 3715 int r; 3716 int i; 3717 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3718 static const unsigned int vrtl_int_srcid[] = { 3719 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, 3720 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, 3721 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, 3722 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, 3723 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, 3724 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL 3725 }; 3726 #endif 3727 3728 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3729 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3730 3731 /* 3732 * Actions of amdgpu_irq_add_id(): 3733 * 1. Register a set() function with base driver. 3734 * Base driver will call set() function to enable/disable an 3735 * interrupt in DC hardware. 3736 * 2. Register amdgpu_dm_irq_handler(). 3737 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3738 * coming from DC hardware. 3739 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3740 * for acknowledging and handling. 3741 */ 3742 3743 /* Use VSTARTUP interrupt */ 3744 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 3745 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 3746 i++) { 3747 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); 3748 3749 if (r) { 3750 DRM_ERROR("Failed to add crtc irq id!\n"); 3751 return r; 3752 } 3753 3754 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3755 int_params.irq_source = 3756 dc_interrupt_to_irq_source(dc, i, 0); 3757 3758 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3759 3760 c_irq_params->adev = adev; 3761 c_irq_params->irq_src = int_params.irq_source; 3762 3763 amdgpu_dm_irq_register_interrupt( 3764 adev, &int_params, dm_crtc_high_irq, c_irq_params); 3765 } 3766 3767 /* Use otg vertical line interrupt */ 3768 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3769 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { 3770 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, 3771 vrtl_int_srcid[i], &adev->vline0_irq); 3772 3773 if (r) { 3774 DRM_ERROR("Failed to add vline0 irq id!\n"); 3775 return r; 3776 } 3777 3778 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3779 int_params.irq_source = 3780 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); 3781 3782 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) { 3783 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]); 3784 break; 3785 } 3786 3787 c_irq_params = &adev->dm.vline0_params[int_params.irq_source 3788 - DC_IRQ_SOURCE_DC1_VLINE0]; 3789 3790 c_irq_params->adev = adev; 3791 c_irq_params->irq_src = int_params.irq_source; 3792 3793 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3794 dm_dcn_vertical_interrupt0_high_irq, c_irq_params); 3795 } 3796 #endif 3797 3798 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to 3799 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx 3800 * to trigger at end of each vblank, regardless of state of the lock, 3801 * matching DCE behaviour. 3802 */ 3803 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; 3804 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; 3805 i++) { 3806 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); 3807 3808 if (r) { 3809 DRM_ERROR("Failed to add vupdate irq id!\n"); 3810 return r; 3811 } 3812 3813 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3814 int_params.irq_source = 3815 dc_interrupt_to_irq_source(dc, i, 0); 3816 3817 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3818 3819 c_irq_params->adev = adev; 3820 c_irq_params->irq_src = int_params.irq_source; 3821 3822 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3823 dm_vupdate_high_irq, c_irq_params); 3824 } 3825 3826 /* Use GRPH_PFLIP interrupt */ 3827 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 3828 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; 3829 i++) { 3830 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 3831 if (r) { 3832 DRM_ERROR("Failed to add page flip irq id!\n"); 3833 return r; 3834 } 3835 3836 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3837 int_params.irq_source = 3838 dc_interrupt_to_irq_source(dc, i, 0); 3839 3840 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3841 3842 c_irq_params->adev = adev; 3843 c_irq_params->irq_src = int_params.irq_source; 3844 3845 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3846 dm_pflip_high_irq, c_irq_params); 3847 3848 } 3849 3850 /* HPD */ 3851 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 3852 &adev->hpd_irq); 3853 if (r) { 3854 DRM_ERROR("Failed to add hpd irq id!\n"); 3855 return r; 3856 } 3857 3858 register_hpd_handlers(adev); 3859 3860 return 0; 3861 } 3862 /* Register Outbox IRQ sources and initialize IRQ callbacks */ 3863 static int register_outbox_irq_handlers(struct amdgpu_device *adev) 3864 { 3865 struct dc *dc = adev->dm.dc; 3866 struct common_irq_params *c_irq_params; 3867 struct dc_interrupt_params int_params = {0}; 3868 int r, i; 3869 3870 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3871 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3872 3873 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, 3874 &adev->dmub_outbox_irq); 3875 if (r) { 3876 DRM_ERROR("Failed to add outbox irq id!\n"); 3877 return r; 3878 } 3879 3880 if (dc->ctx->dmub_srv) { 3881 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; 3882 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3883 int_params.irq_source = 3884 dc_interrupt_to_irq_source(dc, i, 0); 3885 3886 c_irq_params = &adev->dm.dmub_outbox_params[0]; 3887 3888 c_irq_params->adev = adev; 3889 c_irq_params->irq_src = int_params.irq_source; 3890 3891 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3892 dm_dmub_outbox1_low_irq, c_irq_params); 3893 } 3894 3895 return 0; 3896 } 3897 3898 /* 3899 * Acquires the lock for the atomic state object and returns 3900 * the new atomic state. 3901 * 3902 * This should only be called during atomic check. 3903 */ 3904 int dm_atomic_get_state(struct drm_atomic_state *state, 3905 struct dm_atomic_state **dm_state) 3906 { 3907 struct drm_device *dev = state->dev; 3908 struct amdgpu_device *adev = drm_to_adev(dev); 3909 struct amdgpu_display_manager *dm = &adev->dm; 3910 struct drm_private_state *priv_state; 3911 3912 if (*dm_state) 3913 return 0; 3914 3915 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); 3916 if (IS_ERR(priv_state)) 3917 return PTR_ERR(priv_state); 3918 3919 *dm_state = to_dm_atomic_state(priv_state); 3920 3921 return 0; 3922 } 3923 3924 static struct dm_atomic_state * 3925 dm_atomic_get_new_state(struct drm_atomic_state *state) 3926 { 3927 struct drm_device *dev = state->dev; 3928 struct amdgpu_device *adev = drm_to_adev(dev); 3929 struct amdgpu_display_manager *dm = &adev->dm; 3930 struct drm_private_obj *obj; 3931 struct drm_private_state *new_obj_state; 3932 int i; 3933 3934 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { 3935 if (obj->funcs == dm->atomic_obj.funcs) 3936 return to_dm_atomic_state(new_obj_state); 3937 } 3938 3939 return NULL; 3940 } 3941 3942 static struct drm_private_state * 3943 dm_atomic_duplicate_state(struct drm_private_obj *obj) 3944 { 3945 struct dm_atomic_state *old_state, *new_state; 3946 3947 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); 3948 if (!new_state) 3949 return NULL; 3950 3951 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); 3952 3953 old_state = to_dm_atomic_state(obj->state); 3954 3955 if (old_state && old_state->context) 3956 new_state->context = dc_copy_state(old_state->context); 3957 3958 if (!new_state->context) { 3959 kfree(new_state); 3960 return NULL; 3961 } 3962 3963 return &new_state->base; 3964 } 3965 3966 static void dm_atomic_destroy_state(struct drm_private_obj *obj, 3967 struct drm_private_state *state) 3968 { 3969 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 3970 3971 if (dm_state && dm_state->context) 3972 dc_release_state(dm_state->context); 3973 3974 kfree(dm_state); 3975 } 3976 3977 static struct drm_private_state_funcs dm_atomic_state_funcs = { 3978 .atomic_duplicate_state = dm_atomic_duplicate_state, 3979 .atomic_destroy_state = dm_atomic_destroy_state, 3980 }; 3981 3982 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 3983 { 3984 struct dm_atomic_state *state; 3985 int r; 3986 3987 adev->mode_info.mode_config_initialized = true; 3988 3989 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 3990 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 3991 3992 adev_to_drm(adev)->mode_config.max_width = 16384; 3993 adev_to_drm(adev)->mode_config.max_height = 16384; 3994 3995 adev_to_drm(adev)->mode_config.preferred_depth = 24; 3996 if (adev->asic_type == CHIP_HAWAII) 3997 /* disable prefer shadow for now due to hibernation issues */ 3998 adev_to_drm(adev)->mode_config.prefer_shadow = 0; 3999 else 4000 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 4001 /* indicates support for immediate flip */ 4002 adev_to_drm(adev)->mode_config.async_page_flip = true; 4003 4004 state = kzalloc(sizeof(*state), GFP_KERNEL); 4005 if (!state) 4006 return -ENOMEM; 4007 4008 state->context = dc_create_state(adev->dm.dc); 4009 if (!state->context) { 4010 kfree(state); 4011 return -ENOMEM; 4012 } 4013 4014 dc_resource_state_copy_construct_current(adev->dm.dc, state->context); 4015 4016 drm_atomic_private_obj_init(adev_to_drm(adev), 4017 &adev->dm.atomic_obj, 4018 &state->base, 4019 &dm_atomic_state_funcs); 4020 4021 r = amdgpu_display_modeset_create_props(adev); 4022 if (r) { 4023 dc_release_state(state->context); 4024 kfree(state); 4025 return r; 4026 } 4027 4028 r = amdgpu_dm_audio_init(adev); 4029 if (r) { 4030 dc_release_state(state->context); 4031 kfree(state); 4032 return r; 4033 } 4034 4035 return 0; 4036 } 4037 4038 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 4039 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 4040 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 4041 4042 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, 4043 int bl_idx) 4044 { 4045 #if defined(CONFIG_ACPI) 4046 struct amdgpu_dm_backlight_caps caps; 4047 4048 memset(&caps, 0, sizeof(caps)); 4049 4050 if (dm->backlight_caps[bl_idx].caps_valid) 4051 return; 4052 4053 amdgpu_acpi_get_backlight_caps(&caps); 4054 if (caps.caps_valid) { 4055 dm->backlight_caps[bl_idx].caps_valid = true; 4056 if (caps.aux_support) 4057 return; 4058 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; 4059 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; 4060 } else { 4061 dm->backlight_caps[bl_idx].min_input_signal = 4062 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4063 dm->backlight_caps[bl_idx].max_input_signal = 4064 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4065 } 4066 #else 4067 if (dm->backlight_caps[bl_idx].aux_support) 4068 return; 4069 4070 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 4071 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 4072 #endif 4073 } 4074 4075 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, 4076 unsigned int *min, unsigned int *max) 4077 { 4078 if (!caps) 4079 return 0; 4080 4081 if (caps->aux_support) { 4082 // Firmware limits are in nits, DC API wants millinits. 4083 *max = 1000 * caps->aux_max_input_signal; 4084 *min = 1000 * caps->aux_min_input_signal; 4085 } else { 4086 // Firmware limits are 8-bit, PWM control is 16-bit. 4087 *max = 0x101 * caps->max_input_signal; 4088 *min = 0x101 * caps->min_input_signal; 4089 } 4090 return 1; 4091 } 4092 4093 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, 4094 uint32_t brightness) 4095 { 4096 unsigned int min, max; 4097 4098 if (!get_brightness_range(caps, &min, &max)) 4099 return brightness; 4100 4101 // Rescale 0..255 to min..max 4102 return min + DIV_ROUND_CLOSEST((max - min) * brightness, 4103 AMDGPU_MAX_BL_LEVEL); 4104 } 4105 4106 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, 4107 uint32_t brightness) 4108 { 4109 unsigned int min, max; 4110 4111 if (!get_brightness_range(caps, &min, &max)) 4112 return brightness; 4113 4114 if (brightness < min) 4115 return 0; 4116 // Rescale min..max to 0..255 4117 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), 4118 max - min); 4119 } 4120 4121 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 4122 int bl_idx, 4123 u32 user_brightness) 4124 { 4125 struct amdgpu_dm_backlight_caps caps; 4126 struct dc_link *link; 4127 u32 brightness; 4128 bool rc; 4129 4130 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4131 caps = dm->backlight_caps[bl_idx]; 4132 4133 dm->brightness[bl_idx] = user_brightness; 4134 /* update scratch register */ 4135 if (bl_idx == 0) 4136 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); 4137 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); 4138 link = (struct dc_link *)dm->backlight_link[bl_idx]; 4139 4140 /* Change brightness based on AUX property */ 4141 if (caps.aux_support) { 4142 rc = dc_link_set_backlight_level_nits(link, true, brightness, 4143 AUX_BL_DEFAULT_TRANSITION_TIME_MS); 4144 if (!rc) 4145 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); 4146 } else { 4147 rc = dc_link_set_backlight_level(link, brightness, 0); 4148 if (!rc) 4149 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); 4150 } 4151 4152 if (rc) 4153 dm->actual_brightness[bl_idx] = user_brightness; 4154 } 4155 4156 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 4157 { 4158 struct amdgpu_display_manager *dm = bl_get_data(bd); 4159 int i; 4160 4161 for (i = 0; i < dm->num_of_edps; i++) { 4162 if (bd == dm->backlight_dev[i]) 4163 break; 4164 } 4165 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4166 i = 0; 4167 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); 4168 4169 return 0; 4170 } 4171 4172 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, 4173 int bl_idx) 4174 { 4175 int ret; 4176 struct amdgpu_dm_backlight_caps caps; 4177 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; 4178 4179 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4180 caps = dm->backlight_caps[bl_idx]; 4181 4182 if (caps.aux_support) { 4183 u32 avg, peak; 4184 bool rc; 4185 4186 rc = dc_link_get_backlight_level_nits(link, &avg, &peak); 4187 if (!rc) 4188 return dm->brightness[bl_idx]; 4189 return convert_brightness_to_user(&caps, avg); 4190 } 4191 4192 ret = dc_link_get_backlight_level(link); 4193 4194 if (ret == DC_ERROR_UNEXPECTED) 4195 return dm->brightness[bl_idx]; 4196 4197 return convert_brightness_to_user(&caps, ret); 4198 } 4199 4200 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 4201 { 4202 struct amdgpu_display_manager *dm = bl_get_data(bd); 4203 int i; 4204 4205 for (i = 0; i < dm->num_of_edps; i++) { 4206 if (bd == dm->backlight_dev[i]) 4207 break; 4208 } 4209 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4210 i = 0; 4211 return amdgpu_dm_backlight_get_level(dm, i); 4212 } 4213 4214 static const struct backlight_ops amdgpu_dm_backlight_ops = { 4215 .options = BL_CORE_SUSPENDRESUME, 4216 .get_brightness = amdgpu_dm_backlight_get_brightness, 4217 .update_status = amdgpu_dm_backlight_update_status, 4218 }; 4219 4220 static void 4221 amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) 4222 { 4223 struct drm_device *drm = aconnector->base.dev; 4224 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; 4225 struct backlight_properties props = { 0 }; 4226 char bl_name[16]; 4227 4228 if (aconnector->bl_idx == -1) 4229 return; 4230 4231 if (!acpi_video_backlight_use_native()) { 4232 drm_info(drm, "Skipping amdgpu DM backlight registration\n"); 4233 /* Try registering an ACPI video backlight device instead. */ 4234 acpi_video_register_backlight(); 4235 return; 4236 } 4237 4238 props.max_brightness = AMDGPU_MAX_BL_LEVEL; 4239 props.brightness = AMDGPU_MAX_BL_LEVEL; 4240 props.type = BACKLIGHT_RAW; 4241 4242 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 4243 drm->primary->index + aconnector->bl_idx); 4244 4245 dm->backlight_dev[aconnector->bl_idx] = 4246 backlight_device_register(bl_name, aconnector->base.kdev, dm, 4247 &amdgpu_dm_backlight_ops, &props); 4248 4249 if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { 4250 DRM_ERROR("DM: Backlight registration failed!\n"); 4251 dm->backlight_dev[aconnector->bl_idx] = NULL; 4252 } else 4253 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); 4254 } 4255 4256 static int initialize_plane(struct amdgpu_display_manager *dm, 4257 struct amdgpu_mode_info *mode_info, int plane_id, 4258 enum drm_plane_type plane_type, 4259 const struct dc_plane_cap *plane_cap) 4260 { 4261 struct drm_plane *plane; 4262 unsigned long possible_crtcs; 4263 int ret = 0; 4264 4265 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); 4266 if (!plane) { 4267 DRM_ERROR("KMS: Failed to allocate plane\n"); 4268 return -ENOMEM; 4269 } 4270 plane->type = plane_type; 4271 4272 /* 4273 * HACK: IGT tests expect that the primary plane for a CRTC 4274 * can only have one possible CRTC. Only expose support for 4275 * any CRTC if they're not going to be used as a primary plane 4276 * for a CRTC - like overlay or underlay planes. 4277 */ 4278 possible_crtcs = 1 << plane_id; 4279 if (plane_id >= dm->dc->caps.max_streams) 4280 possible_crtcs = 0xff; 4281 4282 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); 4283 4284 if (ret) { 4285 DRM_ERROR("KMS: Failed to initialize plane\n"); 4286 kfree(plane); 4287 return ret; 4288 } 4289 4290 if (mode_info) 4291 mode_info->planes[plane_id] = plane; 4292 4293 return ret; 4294 } 4295 4296 4297 static void setup_backlight_device(struct amdgpu_display_manager *dm, 4298 struct amdgpu_dm_connector *aconnector) 4299 { 4300 struct dc_link *link = aconnector->dc_link; 4301 int bl_idx = dm->num_of_edps; 4302 4303 if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) || 4304 link->type == dc_connection_none) 4305 return; 4306 4307 if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) { 4308 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n"); 4309 return; 4310 } 4311 4312 aconnector->bl_idx = bl_idx; 4313 4314 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4315 dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL; 4316 dm->backlight_link[bl_idx] = link; 4317 dm->num_of_edps++; 4318 4319 update_connector_ext_caps(aconnector); 4320 } 4321 4322 static void amdgpu_set_panel_orientation(struct drm_connector *connector); 4323 4324 /* 4325 * In this architecture, the association 4326 * connector -> encoder -> crtc 4327 * id not really requried. The crtc and connector will hold the 4328 * display_index as an abstraction to use with DAL component 4329 * 4330 * Returns 0 on success 4331 */ 4332 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 4333 { 4334 struct amdgpu_display_manager *dm = &adev->dm; 4335 s32 i; 4336 struct amdgpu_dm_connector *aconnector = NULL; 4337 struct amdgpu_encoder *aencoder = NULL; 4338 struct amdgpu_mode_info *mode_info = &adev->mode_info; 4339 u32 link_cnt; 4340 s32 primary_planes; 4341 enum dc_connection_type new_connection_type = dc_connection_none; 4342 const struct dc_plane_cap *plane; 4343 bool psr_feature_enabled = false; 4344 int max_overlay = dm->dc->caps.max_slave_planes; 4345 4346 dm->display_indexes_num = dm->dc->caps.max_streams; 4347 /* Update the actual used number of crtc */ 4348 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 4349 4350 amdgpu_dm_set_irq_funcs(adev); 4351 4352 link_cnt = dm->dc->caps.max_links; 4353 if (amdgpu_dm_mode_config_init(dm->adev)) { 4354 DRM_ERROR("DM: Failed to initialize mode config\n"); 4355 return -EINVAL; 4356 } 4357 4358 /* There is one primary plane per CRTC */ 4359 primary_planes = dm->dc->caps.max_streams; 4360 ASSERT(primary_planes <= AMDGPU_MAX_PLANES); 4361 4362 /* 4363 * Initialize primary planes, implicit planes for legacy IOCTLS. 4364 * Order is reversed to match iteration order in atomic check. 4365 */ 4366 for (i = (primary_planes - 1); i >= 0; i--) { 4367 plane = &dm->dc->caps.planes[i]; 4368 4369 if (initialize_plane(dm, mode_info, i, 4370 DRM_PLANE_TYPE_PRIMARY, plane)) { 4371 DRM_ERROR("KMS: Failed to initialize primary plane\n"); 4372 goto fail; 4373 } 4374 } 4375 4376 /* 4377 * Initialize overlay planes, index starting after primary planes. 4378 * These planes have a higher DRM index than the primary planes since 4379 * they should be considered as having a higher z-order. 4380 * Order is reversed to match iteration order in atomic check. 4381 * 4382 * Only support DCN for now, and only expose one so we don't encourage 4383 * userspace to use up all the pipes. 4384 */ 4385 for (i = 0; i < dm->dc->caps.max_planes; ++i) { 4386 struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; 4387 4388 /* Do not create overlay if MPO disabled */ 4389 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO) 4390 break; 4391 4392 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) 4393 continue; 4394 4395 if (!plane->pixel_format_support.argb8888) 4396 continue; 4397 4398 if (max_overlay-- == 0) 4399 break; 4400 4401 if (initialize_plane(dm, NULL, primary_planes + i, 4402 DRM_PLANE_TYPE_OVERLAY, plane)) { 4403 DRM_ERROR("KMS: Failed to initialize overlay plane\n"); 4404 goto fail; 4405 } 4406 } 4407 4408 for (i = 0; i < dm->dc->caps.max_streams; i++) 4409 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { 4410 DRM_ERROR("KMS: Failed to initialize crtc\n"); 4411 goto fail; 4412 } 4413 4414 /* Use Outbox interrupt */ 4415 switch (adev->ip_versions[DCE_HWIP][0]) { 4416 case IP_VERSION(3, 0, 0): 4417 case IP_VERSION(3, 1, 2): 4418 case IP_VERSION(3, 1, 3): 4419 case IP_VERSION(3, 1, 4): 4420 case IP_VERSION(3, 1, 5): 4421 case IP_VERSION(3, 1, 6): 4422 case IP_VERSION(3, 2, 0): 4423 case IP_VERSION(3, 2, 1): 4424 case IP_VERSION(2, 1, 0): 4425 if (register_outbox_irq_handlers(dm->adev)) { 4426 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4427 goto fail; 4428 } 4429 break; 4430 default: 4431 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", 4432 adev->ip_versions[DCE_HWIP][0]); 4433 } 4434 4435 /* Determine whether to enable PSR support by default. */ 4436 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { 4437 switch (adev->ip_versions[DCE_HWIP][0]) { 4438 case IP_VERSION(3, 1, 2): 4439 case IP_VERSION(3, 1, 3): 4440 case IP_VERSION(3, 1, 4): 4441 case IP_VERSION(3, 1, 5): 4442 case IP_VERSION(3, 1, 6): 4443 case IP_VERSION(3, 2, 0): 4444 case IP_VERSION(3, 2, 1): 4445 psr_feature_enabled = true; 4446 break; 4447 default: 4448 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; 4449 break; 4450 } 4451 } 4452 4453 /* loops over all connectors on the board */ 4454 for (i = 0; i < link_cnt; i++) { 4455 struct dc_link *link = NULL; 4456 4457 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { 4458 DRM_ERROR( 4459 "KMS: Cannot support more than %d display indexes\n", 4460 AMDGPU_DM_MAX_DISPLAY_INDEX); 4461 continue; 4462 } 4463 4464 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 4465 if (!aconnector) 4466 goto fail; 4467 4468 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); 4469 if (!aencoder) 4470 goto fail; 4471 4472 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 4473 DRM_ERROR("KMS: Failed to initialize encoder\n"); 4474 goto fail; 4475 } 4476 4477 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 4478 DRM_ERROR("KMS: Failed to initialize connector\n"); 4479 goto fail; 4480 } 4481 4482 link = dc_get_link_at_index(dm->dc, i); 4483 4484 if (!dc_link_detect_connection_type(link, &new_connection_type)) 4485 DRM_ERROR("KMS: Failed to detect connector\n"); 4486 4487 if (aconnector->base.force && new_connection_type == dc_connection_none) { 4488 emulated_link_detect(link); 4489 amdgpu_dm_update_connector_after_detect(aconnector); 4490 } else { 4491 bool ret = false; 4492 4493 mutex_lock(&dm->dc_lock); 4494 ret = dc_link_detect(link, DETECT_REASON_BOOT); 4495 mutex_unlock(&dm->dc_lock); 4496 4497 if (ret) { 4498 amdgpu_dm_update_connector_after_detect(aconnector); 4499 setup_backlight_device(dm, aconnector); 4500 4501 if (psr_feature_enabled) 4502 amdgpu_dm_set_psr_caps(link); 4503 4504 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when 4505 * PSR is also supported. 4506 */ 4507 if (link->psr_settings.psr_feature_enabled) 4508 adev_to_drm(adev)->vblank_disable_immediate = false; 4509 } 4510 } 4511 amdgpu_set_panel_orientation(&aconnector->base); 4512 } 4513 4514 /* Software is initialized. Now we can register interrupt handlers. */ 4515 switch (adev->asic_type) { 4516 #if defined(CONFIG_DRM_AMD_DC_SI) 4517 case CHIP_TAHITI: 4518 case CHIP_PITCAIRN: 4519 case CHIP_VERDE: 4520 case CHIP_OLAND: 4521 if (dce60_register_irq_handlers(dm->adev)) { 4522 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4523 goto fail; 4524 } 4525 break; 4526 #endif 4527 case CHIP_BONAIRE: 4528 case CHIP_HAWAII: 4529 case CHIP_KAVERI: 4530 case CHIP_KABINI: 4531 case CHIP_MULLINS: 4532 case CHIP_TONGA: 4533 case CHIP_FIJI: 4534 case CHIP_CARRIZO: 4535 case CHIP_STONEY: 4536 case CHIP_POLARIS11: 4537 case CHIP_POLARIS10: 4538 case CHIP_POLARIS12: 4539 case CHIP_VEGAM: 4540 case CHIP_VEGA10: 4541 case CHIP_VEGA12: 4542 case CHIP_VEGA20: 4543 if (dce110_register_irq_handlers(dm->adev)) { 4544 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4545 goto fail; 4546 } 4547 break; 4548 default: 4549 switch (adev->ip_versions[DCE_HWIP][0]) { 4550 case IP_VERSION(1, 0, 0): 4551 case IP_VERSION(1, 0, 1): 4552 case IP_VERSION(2, 0, 2): 4553 case IP_VERSION(2, 0, 3): 4554 case IP_VERSION(2, 0, 0): 4555 case IP_VERSION(2, 1, 0): 4556 case IP_VERSION(3, 0, 0): 4557 case IP_VERSION(3, 0, 2): 4558 case IP_VERSION(3, 0, 3): 4559 case IP_VERSION(3, 0, 1): 4560 case IP_VERSION(3, 1, 2): 4561 case IP_VERSION(3, 1, 3): 4562 case IP_VERSION(3, 1, 4): 4563 case IP_VERSION(3, 1, 5): 4564 case IP_VERSION(3, 1, 6): 4565 case IP_VERSION(3, 2, 0): 4566 case IP_VERSION(3, 2, 1): 4567 if (dcn10_register_irq_handlers(dm->adev)) { 4568 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4569 goto fail; 4570 } 4571 break; 4572 default: 4573 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", 4574 adev->ip_versions[DCE_HWIP][0]); 4575 goto fail; 4576 } 4577 break; 4578 } 4579 4580 return 0; 4581 fail: 4582 kfree(aencoder); 4583 kfree(aconnector); 4584 4585 return -EINVAL; 4586 } 4587 4588 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 4589 { 4590 drm_atomic_private_obj_fini(&dm->atomic_obj); 4591 } 4592 4593 /****************************************************************************** 4594 * amdgpu_display_funcs functions 4595 *****************************************************************************/ 4596 4597 /* 4598 * dm_bandwidth_update - program display watermarks 4599 * 4600 * @adev: amdgpu_device pointer 4601 * 4602 * Calculate and program the display watermarks and line buffer allocation. 4603 */ 4604 static void dm_bandwidth_update(struct amdgpu_device *adev) 4605 { 4606 /* TODO: implement later */ 4607 } 4608 4609 static const struct amdgpu_display_funcs dm_display_funcs = { 4610 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 4611 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 4612 .backlight_set_level = NULL, /* never called for DC */ 4613 .backlight_get_level = NULL, /* never called for DC */ 4614 .hpd_sense = NULL,/* called unconditionally */ 4615 .hpd_set_polarity = NULL, /* called unconditionally */ 4616 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 4617 .page_flip_get_scanoutpos = 4618 dm_crtc_get_scanoutpos,/* called unconditionally */ 4619 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 4620 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 4621 }; 4622 4623 #if defined(CONFIG_DEBUG_KERNEL_DC) 4624 4625 static ssize_t s3_debug_store(struct device *device, 4626 struct device_attribute *attr, 4627 const char *buf, 4628 size_t count) 4629 { 4630 int ret; 4631 int s3_state; 4632 struct drm_device *drm_dev = dev_get_drvdata(device); 4633 struct amdgpu_device *adev = drm_to_adev(drm_dev); 4634 4635 ret = kstrtoint(buf, 0, &s3_state); 4636 4637 if (ret == 0) { 4638 if (s3_state) { 4639 dm_resume(adev); 4640 drm_kms_helper_hotplug_event(adev_to_drm(adev)); 4641 } else 4642 dm_suspend(adev); 4643 } 4644 4645 return ret == 0 ? count : 0; 4646 } 4647 4648 DEVICE_ATTR_WO(s3_debug); 4649 4650 #endif 4651 4652 static int dm_init_microcode(struct amdgpu_device *adev) 4653 { 4654 char *fw_name_dmub; 4655 int r; 4656 4657 switch (adev->ip_versions[DCE_HWIP][0]) { 4658 case IP_VERSION(2, 1, 0): 4659 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 4660 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 4661 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; 4662 break; 4663 case IP_VERSION(3, 0, 0): 4664 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) 4665 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 4666 else 4667 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; 4668 break; 4669 case IP_VERSION(3, 0, 1): 4670 fw_name_dmub = FIRMWARE_VANGOGH_DMUB; 4671 break; 4672 case IP_VERSION(3, 0, 2): 4673 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; 4674 break; 4675 case IP_VERSION(3, 0, 3): 4676 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; 4677 break; 4678 case IP_VERSION(3, 1, 2): 4679 case IP_VERSION(3, 1, 3): 4680 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; 4681 break; 4682 case IP_VERSION(3, 1, 4): 4683 fw_name_dmub = FIRMWARE_DCN_314_DMUB; 4684 break; 4685 case IP_VERSION(3, 1, 5): 4686 fw_name_dmub = FIRMWARE_DCN_315_DMUB; 4687 break; 4688 case IP_VERSION(3, 1, 6): 4689 fw_name_dmub = FIRMWARE_DCN316_DMUB; 4690 break; 4691 case IP_VERSION(3, 2, 0): 4692 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; 4693 break; 4694 case IP_VERSION(3, 2, 1): 4695 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; 4696 break; 4697 default: 4698 /* ASIC doesn't support DMUB. */ 4699 return 0; 4700 } 4701 r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub); 4702 if (r) 4703 DRM_ERROR("DMUB firmware loading failed: %d\n", r); 4704 return r; 4705 } 4706 4707 static int dm_early_init(void *handle) 4708 { 4709 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4710 struct amdgpu_mode_info *mode_info = &adev->mode_info; 4711 struct atom_context *ctx = mode_info->atom_context; 4712 int index = GetIndexIntoMasterTable(DATA, Object_Header); 4713 u16 data_offset; 4714 4715 /* if there is no object header, skip DM */ 4716 if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { 4717 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 4718 dev_info(adev->dev, "No object header, skipping DM\n"); 4719 return -ENOENT; 4720 } 4721 4722 switch (adev->asic_type) { 4723 #if defined(CONFIG_DRM_AMD_DC_SI) 4724 case CHIP_TAHITI: 4725 case CHIP_PITCAIRN: 4726 case CHIP_VERDE: 4727 adev->mode_info.num_crtc = 6; 4728 adev->mode_info.num_hpd = 6; 4729 adev->mode_info.num_dig = 6; 4730 break; 4731 case CHIP_OLAND: 4732 adev->mode_info.num_crtc = 2; 4733 adev->mode_info.num_hpd = 2; 4734 adev->mode_info.num_dig = 2; 4735 break; 4736 #endif 4737 case CHIP_BONAIRE: 4738 case CHIP_HAWAII: 4739 adev->mode_info.num_crtc = 6; 4740 adev->mode_info.num_hpd = 6; 4741 adev->mode_info.num_dig = 6; 4742 break; 4743 case CHIP_KAVERI: 4744 adev->mode_info.num_crtc = 4; 4745 adev->mode_info.num_hpd = 6; 4746 adev->mode_info.num_dig = 7; 4747 break; 4748 case CHIP_KABINI: 4749 case CHIP_MULLINS: 4750 adev->mode_info.num_crtc = 2; 4751 adev->mode_info.num_hpd = 6; 4752 adev->mode_info.num_dig = 6; 4753 break; 4754 case CHIP_FIJI: 4755 case CHIP_TONGA: 4756 adev->mode_info.num_crtc = 6; 4757 adev->mode_info.num_hpd = 6; 4758 adev->mode_info.num_dig = 7; 4759 break; 4760 case CHIP_CARRIZO: 4761 adev->mode_info.num_crtc = 3; 4762 adev->mode_info.num_hpd = 6; 4763 adev->mode_info.num_dig = 9; 4764 break; 4765 case CHIP_STONEY: 4766 adev->mode_info.num_crtc = 2; 4767 adev->mode_info.num_hpd = 6; 4768 adev->mode_info.num_dig = 9; 4769 break; 4770 case CHIP_POLARIS11: 4771 case CHIP_POLARIS12: 4772 adev->mode_info.num_crtc = 5; 4773 adev->mode_info.num_hpd = 5; 4774 adev->mode_info.num_dig = 5; 4775 break; 4776 case CHIP_POLARIS10: 4777 case CHIP_VEGAM: 4778 adev->mode_info.num_crtc = 6; 4779 adev->mode_info.num_hpd = 6; 4780 adev->mode_info.num_dig = 6; 4781 break; 4782 case CHIP_VEGA10: 4783 case CHIP_VEGA12: 4784 case CHIP_VEGA20: 4785 adev->mode_info.num_crtc = 6; 4786 adev->mode_info.num_hpd = 6; 4787 adev->mode_info.num_dig = 6; 4788 break; 4789 default: 4790 4791 switch (adev->ip_versions[DCE_HWIP][0]) { 4792 case IP_VERSION(2, 0, 2): 4793 case IP_VERSION(3, 0, 0): 4794 adev->mode_info.num_crtc = 6; 4795 adev->mode_info.num_hpd = 6; 4796 adev->mode_info.num_dig = 6; 4797 break; 4798 case IP_VERSION(2, 0, 0): 4799 case IP_VERSION(3, 0, 2): 4800 adev->mode_info.num_crtc = 5; 4801 adev->mode_info.num_hpd = 5; 4802 adev->mode_info.num_dig = 5; 4803 break; 4804 case IP_VERSION(2, 0, 3): 4805 case IP_VERSION(3, 0, 3): 4806 adev->mode_info.num_crtc = 2; 4807 adev->mode_info.num_hpd = 2; 4808 adev->mode_info.num_dig = 2; 4809 break; 4810 case IP_VERSION(1, 0, 0): 4811 case IP_VERSION(1, 0, 1): 4812 case IP_VERSION(3, 0, 1): 4813 case IP_VERSION(2, 1, 0): 4814 case IP_VERSION(3, 1, 2): 4815 case IP_VERSION(3, 1, 3): 4816 case IP_VERSION(3, 1, 4): 4817 case IP_VERSION(3, 1, 5): 4818 case IP_VERSION(3, 1, 6): 4819 case IP_VERSION(3, 2, 0): 4820 case IP_VERSION(3, 2, 1): 4821 adev->mode_info.num_crtc = 4; 4822 adev->mode_info.num_hpd = 4; 4823 adev->mode_info.num_dig = 4; 4824 break; 4825 default: 4826 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", 4827 adev->ip_versions[DCE_HWIP][0]); 4828 return -EINVAL; 4829 } 4830 break; 4831 } 4832 4833 if (adev->mode_info.funcs == NULL) 4834 adev->mode_info.funcs = &dm_display_funcs; 4835 4836 /* 4837 * Note: Do NOT change adev->audio_endpt_rreg and 4838 * adev->audio_endpt_wreg because they are initialised in 4839 * amdgpu_device_init() 4840 */ 4841 #if defined(CONFIG_DEBUG_KERNEL_DC) 4842 device_create_file( 4843 adev_to_drm(adev)->dev, 4844 &dev_attr_s3_debug); 4845 #endif 4846 adev->dc_enabled = true; 4847 4848 return dm_init_microcode(adev); 4849 } 4850 4851 static bool modereset_required(struct drm_crtc_state *crtc_state) 4852 { 4853 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 4854 } 4855 4856 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 4857 { 4858 drm_encoder_cleanup(encoder); 4859 kfree(encoder); 4860 } 4861 4862 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 4863 .destroy = amdgpu_dm_encoder_destroy, 4864 }; 4865 4866 static int 4867 fill_plane_color_attributes(const struct drm_plane_state *plane_state, 4868 const enum surface_pixel_format format, 4869 enum dc_color_space *color_space) 4870 { 4871 bool full_range; 4872 4873 *color_space = COLOR_SPACE_SRGB; 4874 4875 /* DRM color properties only affect non-RGB formats. */ 4876 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 4877 return 0; 4878 4879 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); 4880 4881 switch (plane_state->color_encoding) { 4882 case DRM_COLOR_YCBCR_BT601: 4883 if (full_range) 4884 *color_space = COLOR_SPACE_YCBCR601; 4885 else 4886 *color_space = COLOR_SPACE_YCBCR601_LIMITED; 4887 break; 4888 4889 case DRM_COLOR_YCBCR_BT709: 4890 if (full_range) 4891 *color_space = COLOR_SPACE_YCBCR709; 4892 else 4893 *color_space = COLOR_SPACE_YCBCR709_LIMITED; 4894 break; 4895 4896 case DRM_COLOR_YCBCR_BT2020: 4897 if (full_range) 4898 *color_space = COLOR_SPACE_2020_YCBCR; 4899 else 4900 return -EINVAL; 4901 break; 4902 4903 default: 4904 return -EINVAL; 4905 } 4906 4907 return 0; 4908 } 4909 4910 static int 4911 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 4912 const struct drm_plane_state *plane_state, 4913 const u64 tiling_flags, 4914 struct dc_plane_info *plane_info, 4915 struct dc_plane_address *address, 4916 bool tmz_surface, 4917 bool force_disable_dcc) 4918 { 4919 const struct drm_framebuffer *fb = plane_state->fb; 4920 const struct amdgpu_framebuffer *afb = 4921 to_amdgpu_framebuffer(plane_state->fb); 4922 int ret; 4923 4924 memset(plane_info, 0, sizeof(*plane_info)); 4925 4926 switch (fb->format->format) { 4927 case DRM_FORMAT_C8: 4928 plane_info->format = 4929 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 4930 break; 4931 case DRM_FORMAT_RGB565: 4932 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 4933 break; 4934 case DRM_FORMAT_XRGB8888: 4935 case DRM_FORMAT_ARGB8888: 4936 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 4937 break; 4938 case DRM_FORMAT_XRGB2101010: 4939 case DRM_FORMAT_ARGB2101010: 4940 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 4941 break; 4942 case DRM_FORMAT_XBGR2101010: 4943 case DRM_FORMAT_ABGR2101010: 4944 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 4945 break; 4946 case DRM_FORMAT_XBGR8888: 4947 case DRM_FORMAT_ABGR8888: 4948 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; 4949 break; 4950 case DRM_FORMAT_NV21: 4951 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 4952 break; 4953 case DRM_FORMAT_NV12: 4954 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 4955 break; 4956 case DRM_FORMAT_P010: 4957 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; 4958 break; 4959 case DRM_FORMAT_XRGB16161616F: 4960 case DRM_FORMAT_ARGB16161616F: 4961 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; 4962 break; 4963 case DRM_FORMAT_XBGR16161616F: 4964 case DRM_FORMAT_ABGR16161616F: 4965 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; 4966 break; 4967 case DRM_FORMAT_XRGB16161616: 4968 case DRM_FORMAT_ARGB16161616: 4969 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; 4970 break; 4971 case DRM_FORMAT_XBGR16161616: 4972 case DRM_FORMAT_ABGR16161616: 4973 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; 4974 break; 4975 default: 4976 DRM_ERROR( 4977 "Unsupported screen format %p4cc\n", 4978 &fb->format->format); 4979 return -EINVAL; 4980 } 4981 4982 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 4983 case DRM_MODE_ROTATE_0: 4984 plane_info->rotation = ROTATION_ANGLE_0; 4985 break; 4986 case DRM_MODE_ROTATE_90: 4987 plane_info->rotation = ROTATION_ANGLE_90; 4988 break; 4989 case DRM_MODE_ROTATE_180: 4990 plane_info->rotation = ROTATION_ANGLE_180; 4991 break; 4992 case DRM_MODE_ROTATE_270: 4993 plane_info->rotation = ROTATION_ANGLE_270; 4994 break; 4995 default: 4996 plane_info->rotation = ROTATION_ANGLE_0; 4997 break; 4998 } 4999 5000 5001 plane_info->visible = true; 5002 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; 5003 5004 plane_info->layer_index = plane_state->normalized_zpos; 5005 5006 ret = fill_plane_color_attributes(plane_state, plane_info->format, 5007 &plane_info->color_space); 5008 if (ret) 5009 return ret; 5010 5011 ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format, 5012 plane_info->rotation, tiling_flags, 5013 &plane_info->tiling_info, 5014 &plane_info->plane_size, 5015 &plane_info->dcc, address, 5016 tmz_surface, force_disable_dcc); 5017 if (ret) 5018 return ret; 5019 5020 amdgpu_dm_plane_fill_blending_from_plane_state( 5021 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, 5022 &plane_info->global_alpha, &plane_info->global_alpha_value); 5023 5024 return 0; 5025 } 5026 5027 static int fill_dc_plane_attributes(struct amdgpu_device *adev, 5028 struct dc_plane_state *dc_plane_state, 5029 struct drm_plane_state *plane_state, 5030 struct drm_crtc_state *crtc_state) 5031 { 5032 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5033 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; 5034 struct dc_scaling_info scaling_info; 5035 struct dc_plane_info plane_info; 5036 int ret; 5037 bool force_disable_dcc = false; 5038 5039 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info); 5040 if (ret) 5041 return ret; 5042 5043 dc_plane_state->src_rect = scaling_info.src_rect; 5044 dc_plane_state->dst_rect = scaling_info.dst_rect; 5045 dc_plane_state->clip_rect = scaling_info.clip_rect; 5046 dc_plane_state->scaling_quality = scaling_info.scaling_quality; 5047 5048 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; 5049 ret = fill_dc_plane_info_and_addr(adev, plane_state, 5050 afb->tiling_flags, 5051 &plane_info, 5052 &dc_plane_state->address, 5053 afb->tmz_surface, 5054 force_disable_dcc); 5055 if (ret) 5056 return ret; 5057 5058 dc_plane_state->format = plane_info.format; 5059 dc_plane_state->color_space = plane_info.color_space; 5060 dc_plane_state->format = plane_info.format; 5061 dc_plane_state->plane_size = plane_info.plane_size; 5062 dc_plane_state->rotation = plane_info.rotation; 5063 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; 5064 dc_plane_state->stereo_format = plane_info.stereo_format; 5065 dc_plane_state->tiling_info = plane_info.tiling_info; 5066 dc_plane_state->visible = plane_info.visible; 5067 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; 5068 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha; 5069 dc_plane_state->global_alpha = plane_info.global_alpha; 5070 dc_plane_state->global_alpha_value = plane_info.global_alpha_value; 5071 dc_plane_state->dcc = plane_info.dcc; 5072 dc_plane_state->layer_index = plane_info.layer_index; 5073 dc_plane_state->flip_int_enabled = true; 5074 5075 /* 5076 * Always set input transfer function, since plane state is refreshed 5077 * every time. 5078 */ 5079 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); 5080 if (ret) 5081 return ret; 5082 5083 return 0; 5084 } 5085 5086 static inline void fill_dc_dirty_rect(struct drm_plane *plane, 5087 struct rect *dirty_rect, int32_t x, 5088 s32 y, s32 width, s32 height, 5089 int *i, bool ffu) 5090 { 5091 WARN_ON(*i >= DC_MAX_DIRTY_RECTS); 5092 5093 dirty_rect->x = x; 5094 dirty_rect->y = y; 5095 dirty_rect->width = width; 5096 dirty_rect->height = height; 5097 5098 if (ffu) 5099 drm_dbg(plane->dev, 5100 "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", 5101 plane->base.id, width, height); 5102 else 5103 drm_dbg(plane->dev, 5104 "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", 5105 plane->base.id, x, y, width, height); 5106 5107 (*i)++; 5108 } 5109 5110 /** 5111 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates 5112 * 5113 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP 5114 * remote fb 5115 * @old_plane_state: Old state of @plane 5116 * @new_plane_state: New state of @plane 5117 * @crtc_state: New state of CRTC connected to the @plane 5118 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects 5119 * @dirty_regions_changed: dirty regions changed 5120 * 5121 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions 5122 * (referred to as "damage clips" in DRM nomenclature) that require updating on 5123 * the eDP remote buffer. The responsibility of specifying the dirty regions is 5124 * amdgpu_dm's. 5125 * 5126 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the 5127 * plane with regions that require flushing to the eDP remote buffer. In 5128 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - 5129 * implicitly provide damage clips without any client support via the plane 5130 * bounds. 5131 */ 5132 static void fill_dc_dirty_rects(struct drm_plane *plane, 5133 struct drm_plane_state *old_plane_state, 5134 struct drm_plane_state *new_plane_state, 5135 struct drm_crtc_state *crtc_state, 5136 struct dc_flip_addrs *flip_addrs, 5137 bool *dirty_regions_changed) 5138 { 5139 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5140 struct rect *dirty_rects = flip_addrs->dirty_rects; 5141 u32 num_clips; 5142 struct drm_mode_rect *clips; 5143 bool bb_changed; 5144 bool fb_changed; 5145 u32 i = 0; 5146 *dirty_regions_changed = false; 5147 5148 /* 5149 * Cursor plane has it's own dirty rect update interface. See 5150 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data 5151 */ 5152 if (plane->type == DRM_PLANE_TYPE_CURSOR) 5153 return; 5154 5155 if (new_plane_state->rotation != DRM_MODE_ROTATE_0) 5156 goto ffu; 5157 5158 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 5159 clips = drm_plane_get_damage_clips(new_plane_state); 5160 5161 if (!dm_crtc_state->mpo_requested) { 5162 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) 5163 goto ffu; 5164 5165 for (; flip_addrs->dirty_rect_count < num_clips; clips++) 5166 fill_dc_dirty_rect(new_plane_state->plane, 5167 &dirty_rects[flip_addrs->dirty_rect_count], 5168 clips->x1, clips->y1, 5169 clips->x2 - clips->x1, clips->y2 - clips->y1, 5170 &flip_addrs->dirty_rect_count, 5171 false); 5172 return; 5173 } 5174 5175 /* 5176 * MPO is requested. Add entire plane bounding box to dirty rects if 5177 * flipped to or damaged. 5178 * 5179 * If plane is moved or resized, also add old bounding box to dirty 5180 * rects. 5181 */ 5182 fb_changed = old_plane_state->fb->base.id != 5183 new_plane_state->fb->base.id; 5184 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || 5185 old_plane_state->crtc_y != new_plane_state->crtc_y || 5186 old_plane_state->crtc_w != new_plane_state->crtc_w || 5187 old_plane_state->crtc_h != new_plane_state->crtc_h); 5188 5189 drm_dbg(plane->dev, 5190 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", 5191 new_plane_state->plane->base.id, 5192 bb_changed, fb_changed, num_clips); 5193 5194 *dirty_regions_changed = bb_changed; 5195 5196 if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS) 5197 goto ffu; 5198 5199 if (bb_changed) { 5200 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5201 new_plane_state->crtc_x, 5202 new_plane_state->crtc_y, 5203 new_plane_state->crtc_w, 5204 new_plane_state->crtc_h, &i, false); 5205 5206 /* Add old plane bounding-box if plane is moved or resized */ 5207 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5208 old_plane_state->crtc_x, 5209 old_plane_state->crtc_y, 5210 old_plane_state->crtc_w, 5211 old_plane_state->crtc_h, &i, false); 5212 } 5213 5214 if (num_clips) { 5215 for (; i < num_clips; clips++) 5216 fill_dc_dirty_rect(new_plane_state->plane, 5217 &dirty_rects[i], clips->x1, 5218 clips->y1, clips->x2 - clips->x1, 5219 clips->y2 - clips->y1, &i, false); 5220 } else if (fb_changed && !bb_changed) { 5221 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], 5222 new_plane_state->crtc_x, 5223 new_plane_state->crtc_y, 5224 new_plane_state->crtc_w, 5225 new_plane_state->crtc_h, &i, false); 5226 } 5227 5228 flip_addrs->dirty_rect_count = i; 5229 return; 5230 5231 ffu: 5232 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0, 5233 dm_crtc_state->base.mode.crtc_hdisplay, 5234 dm_crtc_state->base.mode.crtc_vdisplay, 5235 &flip_addrs->dirty_rect_count, true); 5236 } 5237 5238 static void update_stream_scaling_settings(const struct drm_display_mode *mode, 5239 const struct dm_connector_state *dm_state, 5240 struct dc_stream_state *stream) 5241 { 5242 enum amdgpu_rmx_type rmx_type; 5243 5244 struct rect src = { 0 }; /* viewport in composition space*/ 5245 struct rect dst = { 0 }; /* stream addressable area */ 5246 5247 /* no mode. nothing to be done */ 5248 if (!mode) 5249 return; 5250 5251 /* Full screen scaling by default */ 5252 src.width = mode->hdisplay; 5253 src.height = mode->vdisplay; 5254 dst.width = stream->timing.h_addressable; 5255 dst.height = stream->timing.v_addressable; 5256 5257 if (dm_state) { 5258 rmx_type = dm_state->scaling; 5259 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 5260 if (src.width * dst.height < 5261 src.height * dst.width) { 5262 /* height needs less upscaling/more downscaling */ 5263 dst.width = src.width * 5264 dst.height / src.height; 5265 } else { 5266 /* width needs less upscaling/more downscaling */ 5267 dst.height = src.height * 5268 dst.width / src.width; 5269 } 5270 } else if (rmx_type == RMX_CENTER) { 5271 dst = src; 5272 } 5273 5274 dst.x = (stream->timing.h_addressable - dst.width) / 2; 5275 dst.y = (stream->timing.v_addressable - dst.height) / 2; 5276 5277 if (dm_state->underscan_enable) { 5278 dst.x += dm_state->underscan_hborder / 2; 5279 dst.y += dm_state->underscan_vborder / 2; 5280 dst.width -= dm_state->underscan_hborder; 5281 dst.height -= dm_state->underscan_vborder; 5282 } 5283 } 5284 5285 stream->src = src; 5286 stream->dst = dst; 5287 5288 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", 5289 dst.x, dst.y, dst.width, dst.height); 5290 5291 } 5292 5293 static enum dc_color_depth 5294 convert_color_depth_from_display_info(const struct drm_connector *connector, 5295 bool is_y420, int requested_bpc) 5296 { 5297 u8 bpc; 5298 5299 if (is_y420) { 5300 bpc = 8; 5301 5302 /* Cap display bpc based on HDMI 2.0 HF-VSDB */ 5303 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) 5304 bpc = 16; 5305 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) 5306 bpc = 12; 5307 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) 5308 bpc = 10; 5309 } else { 5310 bpc = (uint8_t)connector->display_info.bpc; 5311 /* Assume 8 bpc by default if no bpc is specified. */ 5312 bpc = bpc ? bpc : 8; 5313 } 5314 5315 if (requested_bpc > 0) { 5316 /* 5317 * Cap display bpc based on the user requested value. 5318 * 5319 * The value for state->max_bpc may not correctly updated 5320 * depending on when the connector gets added to the state 5321 * or if this was called outside of atomic check, so it 5322 * can't be used directly. 5323 */ 5324 bpc = min_t(u8, bpc, requested_bpc); 5325 5326 /* Round down to the nearest even number. */ 5327 bpc = bpc - (bpc & 1); 5328 } 5329 5330 switch (bpc) { 5331 case 0: 5332 /* 5333 * Temporary Work around, DRM doesn't parse color depth for 5334 * EDID revision before 1.4 5335 * TODO: Fix edid parsing 5336 */ 5337 return COLOR_DEPTH_888; 5338 case 6: 5339 return COLOR_DEPTH_666; 5340 case 8: 5341 return COLOR_DEPTH_888; 5342 case 10: 5343 return COLOR_DEPTH_101010; 5344 case 12: 5345 return COLOR_DEPTH_121212; 5346 case 14: 5347 return COLOR_DEPTH_141414; 5348 case 16: 5349 return COLOR_DEPTH_161616; 5350 default: 5351 return COLOR_DEPTH_UNDEFINED; 5352 } 5353 } 5354 5355 static enum dc_aspect_ratio 5356 get_aspect_ratio(const struct drm_display_mode *mode_in) 5357 { 5358 /* 1-1 mapping, since both enums follow the HDMI spec. */ 5359 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; 5360 } 5361 5362 static enum dc_color_space 5363 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, 5364 const struct drm_connector_state *connector_state) 5365 { 5366 enum dc_color_space color_space = COLOR_SPACE_SRGB; 5367 5368 switch (connector_state->colorspace) { 5369 case DRM_MODE_COLORIMETRY_BT601_YCC: 5370 if (dc_crtc_timing->flags.Y_ONLY) 5371 color_space = COLOR_SPACE_YCBCR601_LIMITED; 5372 else 5373 color_space = COLOR_SPACE_YCBCR601; 5374 break; 5375 case DRM_MODE_COLORIMETRY_BT709_YCC: 5376 if (dc_crtc_timing->flags.Y_ONLY) 5377 color_space = COLOR_SPACE_YCBCR709_LIMITED; 5378 else 5379 color_space = COLOR_SPACE_YCBCR709; 5380 break; 5381 case DRM_MODE_COLORIMETRY_OPRGB: 5382 color_space = COLOR_SPACE_ADOBERGB; 5383 break; 5384 case DRM_MODE_COLORIMETRY_BT2020_RGB: 5385 case DRM_MODE_COLORIMETRY_BT2020_YCC: 5386 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) 5387 color_space = COLOR_SPACE_2020_RGB_FULLRANGE; 5388 else 5389 color_space = COLOR_SPACE_2020_YCBCR; 5390 break; 5391 case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 5392 default: 5393 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { 5394 color_space = COLOR_SPACE_SRGB; 5395 /* 5396 * 27030khz is the separation point between HDTV and SDTV 5397 * according to HDMI spec, we use YCbCr709 and YCbCr601 5398 * respectively 5399 */ 5400 } else if (dc_crtc_timing->pix_clk_100hz > 270300) { 5401 if (dc_crtc_timing->flags.Y_ONLY) 5402 color_space = 5403 COLOR_SPACE_YCBCR709_LIMITED; 5404 else 5405 color_space = COLOR_SPACE_YCBCR709; 5406 } else { 5407 if (dc_crtc_timing->flags.Y_ONLY) 5408 color_space = 5409 COLOR_SPACE_YCBCR601_LIMITED; 5410 else 5411 color_space = COLOR_SPACE_YCBCR601; 5412 } 5413 break; 5414 } 5415 5416 return color_space; 5417 } 5418 5419 static bool adjust_colour_depth_from_display_info( 5420 struct dc_crtc_timing *timing_out, 5421 const struct drm_display_info *info) 5422 { 5423 enum dc_color_depth depth = timing_out->display_color_depth; 5424 int normalized_clk; 5425 5426 do { 5427 normalized_clk = timing_out->pix_clk_100hz / 10; 5428 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 5429 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 5430 normalized_clk /= 2; 5431 /* Adjusting pix clock following on HDMI spec based on colour depth */ 5432 switch (depth) { 5433 case COLOR_DEPTH_888: 5434 break; 5435 case COLOR_DEPTH_101010: 5436 normalized_clk = (normalized_clk * 30) / 24; 5437 break; 5438 case COLOR_DEPTH_121212: 5439 normalized_clk = (normalized_clk * 36) / 24; 5440 break; 5441 case COLOR_DEPTH_161616: 5442 normalized_clk = (normalized_clk * 48) / 24; 5443 break; 5444 default: 5445 /* The above depths are the only ones valid for HDMI. */ 5446 return false; 5447 } 5448 if (normalized_clk <= info->max_tmds_clock) { 5449 timing_out->display_color_depth = depth; 5450 return true; 5451 } 5452 } while (--depth > COLOR_DEPTH_666); 5453 return false; 5454 } 5455 5456 static void fill_stream_properties_from_drm_display_mode( 5457 struct dc_stream_state *stream, 5458 const struct drm_display_mode *mode_in, 5459 const struct drm_connector *connector, 5460 const struct drm_connector_state *connector_state, 5461 const struct dc_stream_state *old_stream, 5462 int requested_bpc) 5463 { 5464 struct dc_crtc_timing *timing_out = &stream->timing; 5465 const struct drm_display_info *info = &connector->display_info; 5466 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 5467 struct hdmi_vendor_infoframe hv_frame; 5468 struct hdmi_avi_infoframe avi_frame; 5469 5470 memset(&hv_frame, 0, sizeof(hv_frame)); 5471 memset(&avi_frame, 0, sizeof(avi_frame)); 5472 5473 timing_out->h_border_left = 0; 5474 timing_out->h_border_right = 0; 5475 timing_out->v_border_top = 0; 5476 timing_out->v_border_bottom = 0; 5477 /* TODO: un-hardcode */ 5478 if (drm_mode_is_420_only(info, mode_in) 5479 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5480 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5481 else if (drm_mode_is_420_also(info, mode_in) 5482 && aconnector->force_yuv420_output) 5483 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5484 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) 5485 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5486 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 5487 else 5488 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 5489 5490 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 5491 timing_out->display_color_depth = convert_color_depth_from_display_info( 5492 connector, 5493 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), 5494 requested_bpc); 5495 timing_out->scan_type = SCANNING_TYPE_NODATA; 5496 timing_out->hdmi_vic = 0; 5497 5498 if (old_stream) { 5499 timing_out->vic = old_stream->timing.vic; 5500 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; 5501 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; 5502 } else { 5503 timing_out->vic = drm_match_cea_mode(mode_in); 5504 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 5505 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 5506 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 5507 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 5508 } 5509 5510 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 5511 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); 5512 timing_out->vic = avi_frame.video_code; 5513 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); 5514 timing_out->hdmi_vic = hv_frame.vic; 5515 } 5516 5517 if (is_freesync_video_mode(mode_in, aconnector)) { 5518 timing_out->h_addressable = mode_in->hdisplay; 5519 timing_out->h_total = mode_in->htotal; 5520 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; 5521 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; 5522 timing_out->v_total = mode_in->vtotal; 5523 timing_out->v_addressable = mode_in->vdisplay; 5524 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; 5525 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; 5526 timing_out->pix_clk_100hz = mode_in->clock * 10; 5527 } else { 5528 timing_out->h_addressable = mode_in->crtc_hdisplay; 5529 timing_out->h_total = mode_in->crtc_htotal; 5530 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 5531 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 5532 timing_out->v_total = mode_in->crtc_vtotal; 5533 timing_out->v_addressable = mode_in->crtc_vdisplay; 5534 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 5535 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 5536 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; 5537 } 5538 5539 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 5540 5541 stream->out_transfer_func->type = TF_TYPE_PREDEFINED; 5542 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; 5543 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 5544 if (!adjust_colour_depth_from_display_info(timing_out, info) && 5545 drm_mode_is_420_also(info, mode_in) && 5546 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { 5547 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5548 adjust_colour_depth_from_display_info(timing_out, info); 5549 } 5550 } 5551 5552 stream->output_color_space = get_output_color_space(timing_out, connector_state); 5553 } 5554 5555 static void fill_audio_info(struct audio_info *audio_info, 5556 const struct drm_connector *drm_connector, 5557 const struct dc_sink *dc_sink) 5558 { 5559 int i = 0; 5560 int cea_revision = 0; 5561 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 5562 5563 audio_info->manufacture_id = edid_caps->manufacturer_id; 5564 audio_info->product_id = edid_caps->product_id; 5565 5566 cea_revision = drm_connector->display_info.cea_rev; 5567 5568 strscpy(audio_info->display_name, 5569 edid_caps->display_name, 5570 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 5571 5572 if (cea_revision >= 3) { 5573 audio_info->mode_count = edid_caps->audio_mode_count; 5574 5575 for (i = 0; i < audio_info->mode_count; ++i) { 5576 audio_info->modes[i].format_code = 5577 (enum audio_format_code) 5578 (edid_caps->audio_modes[i].format_code); 5579 audio_info->modes[i].channel_count = 5580 edid_caps->audio_modes[i].channel_count; 5581 audio_info->modes[i].sample_rates.all = 5582 edid_caps->audio_modes[i].sample_rate; 5583 audio_info->modes[i].sample_size = 5584 edid_caps->audio_modes[i].sample_size; 5585 } 5586 } 5587 5588 audio_info->flags.all = edid_caps->speaker_flags; 5589 5590 /* TODO: We only check for the progressive mode, check for interlace mode too */ 5591 if (drm_connector->latency_present[0]) { 5592 audio_info->video_latency = drm_connector->video_latency[0]; 5593 audio_info->audio_latency = drm_connector->audio_latency[0]; 5594 } 5595 5596 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 5597 5598 } 5599 5600 static void 5601 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 5602 struct drm_display_mode *dst_mode) 5603 { 5604 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 5605 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 5606 dst_mode->crtc_clock = src_mode->crtc_clock; 5607 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 5608 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 5609 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 5610 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 5611 dst_mode->crtc_htotal = src_mode->crtc_htotal; 5612 dst_mode->crtc_hskew = src_mode->crtc_hskew; 5613 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 5614 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 5615 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 5616 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 5617 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 5618 } 5619 5620 static void 5621 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 5622 const struct drm_display_mode *native_mode, 5623 bool scale_enabled) 5624 { 5625 if (scale_enabled) { 5626 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 5627 } else if (native_mode->clock == drm_mode->clock && 5628 native_mode->htotal == drm_mode->htotal && 5629 native_mode->vtotal == drm_mode->vtotal) { 5630 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 5631 } else { 5632 /* no scaling nor amdgpu inserted, no need to patch */ 5633 } 5634 } 5635 5636 static struct dc_sink * 5637 create_fake_sink(struct amdgpu_dm_connector *aconnector) 5638 { 5639 struct dc_sink_init_data sink_init_data = { 0 }; 5640 struct dc_sink *sink = NULL; 5641 5642 sink_init_data.link = aconnector->dc_link; 5643 sink_init_data.sink_signal = aconnector->dc_link->connector_signal; 5644 5645 sink = dc_sink_create(&sink_init_data); 5646 if (!sink) { 5647 DRM_ERROR("Failed to create sink!\n"); 5648 return NULL; 5649 } 5650 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 5651 5652 return sink; 5653 } 5654 5655 static void set_multisync_trigger_params( 5656 struct dc_stream_state *stream) 5657 { 5658 struct dc_stream_state *master = NULL; 5659 5660 if (stream->triggered_crtc_reset.enabled) { 5661 master = stream->triggered_crtc_reset.event_source; 5662 stream->triggered_crtc_reset.event = 5663 master->timing.flags.VSYNC_POSITIVE_POLARITY ? 5664 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; 5665 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; 5666 } 5667 } 5668 5669 static void set_master_stream(struct dc_stream_state *stream_set[], 5670 int stream_count) 5671 { 5672 int j, highest_rfr = 0, master_stream = 0; 5673 5674 for (j = 0; j < stream_count; j++) { 5675 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 5676 int refresh_rate = 0; 5677 5678 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ 5679 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 5680 if (refresh_rate > highest_rfr) { 5681 highest_rfr = refresh_rate; 5682 master_stream = j; 5683 } 5684 } 5685 } 5686 for (j = 0; j < stream_count; j++) { 5687 if (stream_set[j]) 5688 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 5689 } 5690 } 5691 5692 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 5693 { 5694 int i = 0; 5695 struct dc_stream_state *stream; 5696 5697 if (context->stream_count < 2) 5698 return; 5699 for (i = 0; i < context->stream_count ; i++) { 5700 if (!context->streams[i]) 5701 continue; 5702 /* 5703 * TODO: add a function to read AMD VSDB bits and set 5704 * crtc_sync_master.multi_sync_enabled flag 5705 * For now it's set to false 5706 */ 5707 } 5708 5709 set_master_stream(context->streams, context->stream_count); 5710 5711 for (i = 0; i < context->stream_count ; i++) { 5712 stream = context->streams[i]; 5713 5714 if (!stream) 5715 continue; 5716 5717 set_multisync_trigger_params(stream); 5718 } 5719 } 5720 5721 /** 5722 * DOC: FreeSync Video 5723 * 5724 * When a userspace application wants to play a video, the content follows a 5725 * standard format definition that usually specifies the FPS for that format. 5726 * The below list illustrates some video format and the expected FPS, 5727 * respectively: 5728 * 5729 * - TV/NTSC (23.976 FPS) 5730 * - Cinema (24 FPS) 5731 * - TV/PAL (25 FPS) 5732 * - TV/NTSC (29.97 FPS) 5733 * - TV/NTSC (30 FPS) 5734 * - Cinema HFR (48 FPS) 5735 * - TV/PAL (50 FPS) 5736 * - Commonly used (60 FPS) 5737 * - Multiples of 24 (48,72,96 FPS) 5738 * 5739 * The list of standards video format is not huge and can be added to the 5740 * connector modeset list beforehand. With that, userspace can leverage 5741 * FreeSync to extends the front porch in order to attain the target refresh 5742 * rate. Such a switch will happen seamlessly, without screen blanking or 5743 * reprogramming of the output in any other way. If the userspace requests a 5744 * modesetting change compatible with FreeSync modes that only differ in the 5745 * refresh rate, DC will skip the full update and avoid blink during the 5746 * transition. For example, the video player can change the modesetting from 5747 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without 5748 * causing any display blink. This same concept can be applied to a mode 5749 * setting change. 5750 */ 5751 static struct drm_display_mode * 5752 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, 5753 bool use_probed_modes) 5754 { 5755 struct drm_display_mode *m, *m_pref = NULL; 5756 u16 current_refresh, highest_refresh; 5757 struct list_head *list_head = use_probed_modes ? 5758 &aconnector->base.probed_modes : 5759 &aconnector->base.modes; 5760 5761 if (aconnector->freesync_vid_base.clock != 0) 5762 return &aconnector->freesync_vid_base; 5763 5764 /* Find the preferred mode */ 5765 list_for_each_entry(m, list_head, head) { 5766 if (m->type & DRM_MODE_TYPE_PREFERRED) { 5767 m_pref = m; 5768 break; 5769 } 5770 } 5771 5772 if (!m_pref) { 5773 /* Probably an EDID with no preferred mode. Fallback to first entry */ 5774 m_pref = list_first_entry_or_null( 5775 &aconnector->base.modes, struct drm_display_mode, head); 5776 if (!m_pref) { 5777 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); 5778 return NULL; 5779 } 5780 } 5781 5782 highest_refresh = drm_mode_vrefresh(m_pref); 5783 5784 /* 5785 * Find the mode with highest refresh rate with same resolution. 5786 * For some monitors, preferred mode is not the mode with highest 5787 * supported refresh rate. 5788 */ 5789 list_for_each_entry(m, list_head, head) { 5790 current_refresh = drm_mode_vrefresh(m); 5791 5792 if (m->hdisplay == m_pref->hdisplay && 5793 m->vdisplay == m_pref->vdisplay && 5794 highest_refresh < current_refresh) { 5795 highest_refresh = current_refresh; 5796 m_pref = m; 5797 } 5798 } 5799 5800 drm_mode_copy(&aconnector->freesync_vid_base, m_pref); 5801 return m_pref; 5802 } 5803 5804 static bool is_freesync_video_mode(const struct drm_display_mode *mode, 5805 struct amdgpu_dm_connector *aconnector) 5806 { 5807 struct drm_display_mode *high_mode; 5808 int timing_diff; 5809 5810 high_mode = get_highest_refresh_rate_mode(aconnector, false); 5811 if (!high_mode || !mode) 5812 return false; 5813 5814 timing_diff = high_mode->vtotal - mode->vtotal; 5815 5816 if (high_mode->clock == 0 || high_mode->clock != mode->clock || 5817 high_mode->hdisplay != mode->hdisplay || 5818 high_mode->vdisplay != mode->vdisplay || 5819 high_mode->hsync_start != mode->hsync_start || 5820 high_mode->hsync_end != mode->hsync_end || 5821 high_mode->htotal != mode->htotal || 5822 high_mode->hskew != mode->hskew || 5823 high_mode->vscan != mode->vscan || 5824 high_mode->vsync_start - mode->vsync_start != timing_diff || 5825 high_mode->vsync_end - mode->vsync_end != timing_diff) 5826 return false; 5827 else 5828 return true; 5829 } 5830 5831 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, 5832 struct dc_sink *sink, struct dc_stream_state *stream, 5833 struct dsc_dec_dpcd_caps *dsc_caps) 5834 { 5835 stream->timing.flags.DSC = 0; 5836 dsc_caps->is_dsc_supported = false; 5837 5838 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 5839 sink->sink_signal == SIGNAL_TYPE_EDP)) { 5840 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || 5841 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 5842 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 5843 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 5844 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, 5845 dsc_caps); 5846 } 5847 } 5848 5849 5850 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, 5851 struct dc_sink *sink, struct dc_stream_state *stream, 5852 struct dsc_dec_dpcd_caps *dsc_caps, 5853 uint32_t max_dsc_target_bpp_limit_override) 5854 { 5855 const struct dc_link_settings *verified_link_cap = NULL; 5856 u32 link_bw_in_kbps; 5857 u32 edp_min_bpp_x16, edp_max_bpp_x16; 5858 struct dc *dc = sink->ctx->dc; 5859 struct dc_dsc_bw_range bw_range = {0}; 5860 struct dc_dsc_config dsc_cfg = {0}; 5861 struct dc_dsc_config_options dsc_options = {0}; 5862 5863 dc_dsc_get_default_config_option(dc, &dsc_options); 5864 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 5865 5866 verified_link_cap = dc_link_get_link_cap(stream->link); 5867 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); 5868 edp_min_bpp_x16 = 8 * 16; 5869 edp_max_bpp_x16 = 8 * 16; 5870 5871 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) 5872 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; 5873 5874 if (edp_max_bpp_x16 < edp_min_bpp_x16) 5875 edp_min_bpp_x16 = edp_max_bpp_x16; 5876 5877 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], 5878 dc->debug.dsc_min_slice_height_override, 5879 edp_min_bpp_x16, edp_max_bpp_x16, 5880 dsc_caps, 5881 &stream->timing, 5882 dc_link_get_highest_encoding_format(aconnector->dc_link), 5883 &bw_range)) { 5884 5885 if (bw_range.max_kbps < link_bw_in_kbps) { 5886 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 5887 dsc_caps, 5888 &dsc_options, 5889 0, 5890 &stream->timing, 5891 dc_link_get_highest_encoding_format(aconnector->dc_link), 5892 &dsc_cfg)) { 5893 stream->timing.dsc_cfg = dsc_cfg; 5894 stream->timing.flags.DSC = 1; 5895 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; 5896 } 5897 return; 5898 } 5899 } 5900 5901 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 5902 dsc_caps, 5903 &dsc_options, 5904 link_bw_in_kbps, 5905 &stream->timing, 5906 dc_link_get_highest_encoding_format(aconnector->dc_link), 5907 &dsc_cfg)) { 5908 stream->timing.dsc_cfg = dsc_cfg; 5909 stream->timing.flags.DSC = 1; 5910 } 5911 } 5912 5913 5914 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, 5915 struct dc_sink *sink, struct dc_stream_state *stream, 5916 struct dsc_dec_dpcd_caps *dsc_caps) 5917 { 5918 struct drm_connector *drm_connector = &aconnector->base; 5919 u32 link_bandwidth_kbps; 5920 struct dc *dc = sink->ctx->dc; 5921 u32 max_supported_bw_in_kbps, timing_bw_in_kbps; 5922 u32 dsc_max_supported_bw_in_kbps; 5923 u32 max_dsc_target_bpp_limit_override = 5924 drm_connector->display_info.max_dsc_bpp; 5925 struct dc_dsc_config_options dsc_options = {0}; 5926 5927 dc_dsc_get_default_config_option(dc, &dsc_options); 5928 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; 5929 5930 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 5931 dc_link_get_link_cap(aconnector->dc_link)); 5932 5933 /* Set DSC policy according to dsc_clock_en */ 5934 dc_dsc_policy_set_enable_dsc_when_not_needed( 5935 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); 5936 5937 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && 5938 !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && 5939 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { 5940 5941 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); 5942 5943 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 5944 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 5945 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 5946 dsc_caps, 5947 &dsc_options, 5948 link_bandwidth_kbps, 5949 &stream->timing, 5950 dc_link_get_highest_encoding_format(aconnector->dc_link), 5951 &stream->timing.dsc_cfg)) { 5952 stream->timing.flags.DSC = 1; 5953 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); 5954 } 5955 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 5956 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, 5957 dc_link_get_highest_encoding_format(aconnector->dc_link)); 5958 max_supported_bw_in_kbps = link_bandwidth_kbps; 5959 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; 5960 5961 if (timing_bw_in_kbps > max_supported_bw_in_kbps && 5962 max_supported_bw_in_kbps > 0 && 5963 dsc_max_supported_bw_in_kbps > 0) 5964 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 5965 dsc_caps, 5966 &dsc_options, 5967 dsc_max_supported_bw_in_kbps, 5968 &stream->timing, 5969 dc_link_get_highest_encoding_format(aconnector->dc_link), 5970 &stream->timing.dsc_cfg)) { 5971 stream->timing.flags.DSC = 1; 5972 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", 5973 __func__, drm_connector->name); 5974 } 5975 } 5976 } 5977 5978 /* Overwrite the stream flag if DSC is enabled through debugfs */ 5979 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) 5980 stream->timing.flags.DSC = 1; 5981 5982 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) 5983 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 5984 5985 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) 5986 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 5987 5988 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) 5989 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; 5990 } 5991 5992 static struct dc_stream_state * 5993 create_stream_for_sink(struct amdgpu_dm_connector *aconnector, 5994 const struct drm_display_mode *drm_mode, 5995 const struct dm_connector_state *dm_state, 5996 const struct dc_stream_state *old_stream, 5997 int requested_bpc) 5998 { 5999 struct drm_display_mode *preferred_mode = NULL; 6000 struct drm_connector *drm_connector; 6001 const struct drm_connector_state *con_state = &dm_state->base; 6002 struct dc_stream_state *stream = NULL; 6003 struct drm_display_mode mode; 6004 struct drm_display_mode saved_mode; 6005 struct drm_display_mode *freesync_mode = NULL; 6006 bool native_mode_found = false; 6007 bool recalculate_timing = false; 6008 bool scale = dm_state->scaling != RMX_OFF; 6009 int mode_refresh; 6010 int preferred_refresh = 0; 6011 enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; 6012 struct dsc_dec_dpcd_caps dsc_caps; 6013 6014 struct dc_sink *sink = NULL; 6015 6016 drm_mode_init(&mode, drm_mode); 6017 memset(&saved_mode, 0, sizeof(saved_mode)); 6018 6019 if (aconnector == NULL) { 6020 DRM_ERROR("aconnector is NULL!\n"); 6021 return stream; 6022 } 6023 6024 drm_connector = &aconnector->base; 6025 6026 if (!aconnector->dc_sink) { 6027 sink = create_fake_sink(aconnector); 6028 if (!sink) 6029 return stream; 6030 } else { 6031 sink = aconnector->dc_sink; 6032 dc_sink_retain(sink); 6033 } 6034 6035 stream = dc_create_stream_for_sink(sink); 6036 6037 if (stream == NULL) { 6038 DRM_ERROR("Failed to create stream for sink!\n"); 6039 goto finish; 6040 } 6041 6042 stream->dm_stream_context = aconnector; 6043 6044 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 6045 drm_connector->display_info.hdmi.scdc.scrambling.low_rates; 6046 6047 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 6048 /* Search for preferred mode */ 6049 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 6050 native_mode_found = true; 6051 break; 6052 } 6053 } 6054 if (!native_mode_found) 6055 preferred_mode = list_first_entry_or_null( 6056 &aconnector->base.modes, 6057 struct drm_display_mode, 6058 head); 6059 6060 mode_refresh = drm_mode_vrefresh(&mode); 6061 6062 if (preferred_mode == NULL) { 6063 /* 6064 * This may not be an error, the use case is when we have no 6065 * usermode calls to reset and set mode upon hotplug. In this 6066 * case, we call set mode ourselves to restore the previous mode 6067 * and the modelist may not be filled in time. 6068 */ 6069 DRM_DEBUG_DRIVER("No preferred mode found\n"); 6070 } else { 6071 recalculate_timing = is_freesync_video_mode(&mode, aconnector); 6072 if (recalculate_timing) { 6073 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); 6074 drm_mode_copy(&saved_mode, &mode); 6075 drm_mode_copy(&mode, freesync_mode); 6076 } else { 6077 decide_crtc_timing_for_drm_display_mode( 6078 &mode, preferred_mode, scale); 6079 6080 preferred_refresh = drm_mode_vrefresh(preferred_mode); 6081 } 6082 } 6083 6084 if (recalculate_timing) 6085 drm_mode_set_crtcinfo(&saved_mode, 0); 6086 6087 /* 6088 * If scaling is enabled and refresh rate didn't change 6089 * we copy the vic and polarities of the old timings 6090 */ 6091 if (!scale || mode_refresh != preferred_refresh) 6092 fill_stream_properties_from_drm_display_mode( 6093 stream, &mode, &aconnector->base, con_state, NULL, 6094 requested_bpc); 6095 else 6096 fill_stream_properties_from_drm_display_mode( 6097 stream, &mode, &aconnector->base, con_state, old_stream, 6098 requested_bpc); 6099 6100 if (aconnector->timing_changed) { 6101 DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n", 6102 __func__, 6103 stream->timing.display_color_depth, 6104 aconnector->timing_requested->display_color_depth); 6105 stream->timing = *aconnector->timing_requested; 6106 } 6107 6108 /* SST DSC determination policy */ 6109 update_dsc_caps(aconnector, sink, stream, &dsc_caps); 6110 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) 6111 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); 6112 6113 update_stream_scaling_settings(&mode, dm_state, stream); 6114 6115 fill_audio_info( 6116 &stream->audio_info, 6117 drm_connector, 6118 sink); 6119 6120 update_stream_signal(stream, sink); 6121 6122 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6123 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); 6124 else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || 6125 stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || 6126 stream->signal == SIGNAL_TYPE_EDP) { 6127 // 6128 // should decide stream support vsc sdp colorimetry capability 6129 // before building vsc info packet 6130 // 6131 stream->use_vsc_sdp_for_colorimetry = false; 6132 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 6133 stream->use_vsc_sdp_for_colorimetry = 6134 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; 6135 } else { 6136 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) 6137 stream->use_vsc_sdp_for_colorimetry = true; 6138 } 6139 if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) 6140 tf = TRANSFER_FUNC_GAMMA_22; 6141 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); 6142 6143 if (stream->link->psr_settings.psr_feature_enabled) 6144 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 6145 } 6146 finish: 6147 dc_sink_release(sink); 6148 6149 return stream; 6150 } 6151 6152 static enum drm_connector_status 6153 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 6154 { 6155 bool connected; 6156 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6157 6158 /* 6159 * Notes: 6160 * 1. This interface is NOT called in context of HPD irq. 6161 * 2. This interface *is called* in context of user-mode ioctl. Which 6162 * makes it a bad place for *any* MST-related activity. 6163 */ 6164 6165 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && 6166 !aconnector->fake_enable) 6167 connected = (aconnector->dc_sink != NULL); 6168 else 6169 connected = (aconnector->base.force == DRM_FORCE_ON || 6170 aconnector->base.force == DRM_FORCE_ON_DIGITAL); 6171 6172 update_subconnector_property(aconnector); 6173 6174 return (connected ? connector_status_connected : 6175 connector_status_disconnected); 6176 } 6177 6178 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 6179 struct drm_connector_state *connector_state, 6180 struct drm_property *property, 6181 uint64_t val) 6182 { 6183 struct drm_device *dev = connector->dev; 6184 struct amdgpu_device *adev = drm_to_adev(dev); 6185 struct dm_connector_state *dm_old_state = 6186 to_dm_connector_state(connector->state); 6187 struct dm_connector_state *dm_new_state = 6188 to_dm_connector_state(connector_state); 6189 6190 int ret = -EINVAL; 6191 6192 if (property == dev->mode_config.scaling_mode_property) { 6193 enum amdgpu_rmx_type rmx_type; 6194 6195 switch (val) { 6196 case DRM_MODE_SCALE_CENTER: 6197 rmx_type = RMX_CENTER; 6198 break; 6199 case DRM_MODE_SCALE_ASPECT: 6200 rmx_type = RMX_ASPECT; 6201 break; 6202 case DRM_MODE_SCALE_FULLSCREEN: 6203 rmx_type = RMX_FULL; 6204 break; 6205 case DRM_MODE_SCALE_NONE: 6206 default: 6207 rmx_type = RMX_OFF; 6208 break; 6209 } 6210 6211 if (dm_old_state->scaling == rmx_type) 6212 return 0; 6213 6214 dm_new_state->scaling = rmx_type; 6215 ret = 0; 6216 } else if (property == adev->mode_info.underscan_hborder_property) { 6217 dm_new_state->underscan_hborder = val; 6218 ret = 0; 6219 } else if (property == adev->mode_info.underscan_vborder_property) { 6220 dm_new_state->underscan_vborder = val; 6221 ret = 0; 6222 } else if (property == adev->mode_info.underscan_property) { 6223 dm_new_state->underscan_enable = val; 6224 ret = 0; 6225 } else if (property == adev->mode_info.abm_level_property) { 6226 dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE; 6227 ret = 0; 6228 } 6229 6230 return ret; 6231 } 6232 6233 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 6234 const struct drm_connector_state *state, 6235 struct drm_property *property, 6236 uint64_t *val) 6237 { 6238 struct drm_device *dev = connector->dev; 6239 struct amdgpu_device *adev = drm_to_adev(dev); 6240 struct dm_connector_state *dm_state = 6241 to_dm_connector_state(state); 6242 int ret = -EINVAL; 6243 6244 if (property == dev->mode_config.scaling_mode_property) { 6245 switch (dm_state->scaling) { 6246 case RMX_CENTER: 6247 *val = DRM_MODE_SCALE_CENTER; 6248 break; 6249 case RMX_ASPECT: 6250 *val = DRM_MODE_SCALE_ASPECT; 6251 break; 6252 case RMX_FULL: 6253 *val = DRM_MODE_SCALE_FULLSCREEN; 6254 break; 6255 case RMX_OFF: 6256 default: 6257 *val = DRM_MODE_SCALE_NONE; 6258 break; 6259 } 6260 ret = 0; 6261 } else if (property == adev->mode_info.underscan_hborder_property) { 6262 *val = dm_state->underscan_hborder; 6263 ret = 0; 6264 } else if (property == adev->mode_info.underscan_vborder_property) { 6265 *val = dm_state->underscan_vborder; 6266 ret = 0; 6267 } else if (property == adev->mode_info.underscan_property) { 6268 *val = dm_state->underscan_enable; 6269 ret = 0; 6270 } else if (property == adev->mode_info.abm_level_property) { 6271 *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ? 6272 dm_state->abm_level : 0; 6273 ret = 0; 6274 } 6275 6276 return ret; 6277 } 6278 6279 static void amdgpu_dm_connector_unregister(struct drm_connector *connector) 6280 { 6281 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 6282 6283 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); 6284 } 6285 6286 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 6287 { 6288 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6289 struct amdgpu_device *adev = drm_to_adev(connector->dev); 6290 struct amdgpu_display_manager *dm = &adev->dm; 6291 6292 /* 6293 * Call only if mst_mgr was initialized before since it's not done 6294 * for all connector types. 6295 */ 6296 if (aconnector->mst_mgr.dev) 6297 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); 6298 6299 if (aconnector->bl_idx != -1) { 6300 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]); 6301 dm->backlight_dev[aconnector->bl_idx] = NULL; 6302 } 6303 6304 if (aconnector->dc_em_sink) 6305 dc_sink_release(aconnector->dc_em_sink); 6306 aconnector->dc_em_sink = NULL; 6307 if (aconnector->dc_sink) 6308 dc_sink_release(aconnector->dc_sink); 6309 aconnector->dc_sink = NULL; 6310 6311 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 6312 drm_connector_unregister(connector); 6313 drm_connector_cleanup(connector); 6314 if (aconnector->i2c) { 6315 i2c_del_adapter(&aconnector->i2c->base); 6316 kfree(aconnector->i2c); 6317 } 6318 kfree(aconnector->dm_dp_aux.aux.name); 6319 6320 kfree(connector); 6321 } 6322 6323 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 6324 { 6325 struct dm_connector_state *state = 6326 to_dm_connector_state(connector->state); 6327 6328 if (connector->state) 6329 __drm_atomic_helper_connector_destroy_state(connector->state); 6330 6331 kfree(state); 6332 6333 state = kzalloc(sizeof(*state), GFP_KERNEL); 6334 6335 if (state) { 6336 state->scaling = RMX_OFF; 6337 state->underscan_enable = false; 6338 state->underscan_hborder = 0; 6339 state->underscan_vborder = 0; 6340 state->base.max_requested_bpc = 8; 6341 state->vcpi_slots = 0; 6342 state->pbn = 0; 6343 6344 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 6345 state->abm_level = amdgpu_dm_abm_level ?: 6346 ABM_LEVEL_IMMEDIATE_DISABLE; 6347 6348 __drm_atomic_helper_connector_reset(connector, &state->base); 6349 } 6350 } 6351 6352 struct drm_connector_state * 6353 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 6354 { 6355 struct dm_connector_state *state = 6356 to_dm_connector_state(connector->state); 6357 6358 struct dm_connector_state *new_state = 6359 kmemdup(state, sizeof(*state), GFP_KERNEL); 6360 6361 if (!new_state) 6362 return NULL; 6363 6364 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); 6365 6366 new_state->freesync_capable = state->freesync_capable; 6367 new_state->abm_level = state->abm_level; 6368 new_state->scaling = state->scaling; 6369 new_state->underscan_enable = state->underscan_enable; 6370 new_state->underscan_hborder = state->underscan_hborder; 6371 new_state->underscan_vborder = state->underscan_vborder; 6372 new_state->vcpi_slots = state->vcpi_slots; 6373 new_state->pbn = state->pbn; 6374 return &new_state->base; 6375 } 6376 6377 static int 6378 amdgpu_dm_connector_late_register(struct drm_connector *connector) 6379 { 6380 struct amdgpu_dm_connector *amdgpu_dm_connector = 6381 to_amdgpu_dm_connector(connector); 6382 int r; 6383 6384 amdgpu_dm_register_backlight_device(amdgpu_dm_connector); 6385 6386 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 6387 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 6388 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; 6389 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); 6390 if (r) 6391 return r; 6392 } 6393 6394 #if defined(CONFIG_DEBUG_FS) 6395 connector_debugfs_init(amdgpu_dm_connector); 6396 #endif 6397 6398 return 0; 6399 } 6400 6401 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) 6402 { 6403 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6404 struct dc_link *dc_link = aconnector->dc_link; 6405 struct dc_sink *dc_em_sink = aconnector->dc_em_sink; 6406 struct edid *edid; 6407 6408 if (!connector->edid_override) 6409 return; 6410 6411 drm_edid_override_connector_update(&aconnector->base); 6412 edid = aconnector->base.edid_blob_ptr->data; 6413 aconnector->edid = edid; 6414 6415 /* Update emulated (virtual) sink's EDID */ 6416 if (dc_em_sink && dc_link) { 6417 memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); 6418 memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH); 6419 dm_helpers_parse_edid_caps( 6420 dc_link, 6421 &dc_em_sink->dc_edid, 6422 &dc_em_sink->edid_caps); 6423 } 6424 } 6425 6426 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 6427 .reset = amdgpu_dm_connector_funcs_reset, 6428 .detect = amdgpu_dm_connector_detect, 6429 .fill_modes = drm_helper_probe_single_connector_modes, 6430 .destroy = amdgpu_dm_connector_destroy, 6431 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 6432 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6433 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 6434 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 6435 .late_register = amdgpu_dm_connector_late_register, 6436 .early_unregister = amdgpu_dm_connector_unregister, 6437 .force = amdgpu_dm_connector_funcs_force 6438 }; 6439 6440 static int get_modes(struct drm_connector *connector) 6441 { 6442 return amdgpu_dm_connector_get_modes(connector); 6443 } 6444 6445 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 6446 { 6447 struct dc_sink_init_data init_params = { 6448 .link = aconnector->dc_link, 6449 .sink_signal = SIGNAL_TYPE_VIRTUAL 6450 }; 6451 struct edid *edid; 6452 6453 if (!aconnector->base.edid_blob_ptr) { 6454 /* if connector->edid_override valid, pass 6455 * it to edid_override to edid_blob_ptr 6456 */ 6457 6458 drm_edid_override_connector_update(&aconnector->base); 6459 6460 if (!aconnector->base.edid_blob_ptr) { 6461 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", 6462 aconnector->base.name); 6463 6464 aconnector->base.force = DRM_FORCE_OFF; 6465 return; 6466 } 6467 } 6468 6469 edid = (struct edid *) aconnector->base.edid_blob_ptr->data; 6470 6471 aconnector->edid = edid; 6472 6473 aconnector->dc_em_sink = dc_link_add_remote_sink( 6474 aconnector->dc_link, 6475 (uint8_t *)edid, 6476 (edid->extensions + 1) * EDID_LENGTH, 6477 &init_params); 6478 6479 if (aconnector->base.force == DRM_FORCE_ON) { 6480 aconnector->dc_sink = aconnector->dc_link->local_sink ? 6481 aconnector->dc_link->local_sink : 6482 aconnector->dc_em_sink; 6483 dc_sink_retain(aconnector->dc_sink); 6484 } 6485 } 6486 6487 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 6488 { 6489 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 6490 6491 /* 6492 * In case of headless boot with force on for DP managed connector 6493 * Those settings have to be != 0 to get initial modeset 6494 */ 6495 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 6496 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 6497 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 6498 } 6499 6500 create_eml_sink(aconnector); 6501 } 6502 6503 static enum dc_status dm_validate_stream_and_context(struct dc *dc, 6504 struct dc_stream_state *stream) 6505 { 6506 enum dc_status dc_result = DC_ERROR_UNEXPECTED; 6507 struct dc_plane_state *dc_plane_state = NULL; 6508 struct dc_state *dc_state = NULL; 6509 6510 if (!stream) 6511 goto cleanup; 6512 6513 dc_plane_state = dc_create_plane_state(dc); 6514 if (!dc_plane_state) 6515 goto cleanup; 6516 6517 dc_state = dc_create_state(dc); 6518 if (!dc_state) 6519 goto cleanup; 6520 6521 /* populate stream to plane */ 6522 dc_plane_state->src_rect.height = stream->src.height; 6523 dc_plane_state->src_rect.width = stream->src.width; 6524 dc_plane_state->dst_rect.height = stream->src.height; 6525 dc_plane_state->dst_rect.width = stream->src.width; 6526 dc_plane_state->clip_rect.height = stream->src.height; 6527 dc_plane_state->clip_rect.width = stream->src.width; 6528 dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256; 6529 dc_plane_state->plane_size.surface_size.height = stream->src.height; 6530 dc_plane_state->plane_size.surface_size.width = stream->src.width; 6531 dc_plane_state->plane_size.chroma_size.height = stream->src.height; 6532 dc_plane_state->plane_size.chroma_size.width = stream->src.width; 6533 dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 6534 dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; 6535 dc_plane_state->rotation = ROTATION_ANGLE_0; 6536 dc_plane_state->is_tiling_rotated = false; 6537 dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL; 6538 6539 dc_result = dc_validate_stream(dc, stream); 6540 if (dc_result == DC_OK) 6541 dc_result = dc_validate_plane(dc, dc_plane_state); 6542 6543 if (dc_result == DC_OK) 6544 dc_result = dc_add_stream_to_ctx(dc, dc_state, stream); 6545 6546 if (dc_result == DC_OK && !dc_add_plane_to_context( 6547 dc, 6548 stream, 6549 dc_plane_state, 6550 dc_state)) 6551 dc_result = DC_FAIL_ATTACH_SURFACES; 6552 6553 if (dc_result == DC_OK) 6554 dc_result = dc_validate_global_state(dc, dc_state, true); 6555 6556 cleanup: 6557 if (dc_state) 6558 dc_release_state(dc_state); 6559 6560 if (dc_plane_state) 6561 dc_plane_state_release(dc_plane_state); 6562 6563 return dc_result; 6564 } 6565 6566 struct dc_stream_state * 6567 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 6568 const struct drm_display_mode *drm_mode, 6569 const struct dm_connector_state *dm_state, 6570 const struct dc_stream_state *old_stream) 6571 { 6572 struct drm_connector *connector = &aconnector->base; 6573 struct amdgpu_device *adev = drm_to_adev(connector->dev); 6574 struct dc_stream_state *stream; 6575 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; 6576 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; 6577 enum dc_status dc_result = DC_OK; 6578 6579 do { 6580 stream = create_stream_for_sink(aconnector, drm_mode, 6581 dm_state, old_stream, 6582 requested_bpc); 6583 if (stream == NULL) { 6584 DRM_ERROR("Failed to create stream for sink!\n"); 6585 break; 6586 } 6587 6588 dc_result = dc_validate_stream(adev->dm.dc, stream); 6589 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 6590 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); 6591 6592 if (dc_result == DC_OK) 6593 dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); 6594 6595 if (dc_result != DC_OK) { 6596 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", 6597 drm_mode->hdisplay, 6598 drm_mode->vdisplay, 6599 drm_mode->clock, 6600 dc_result, 6601 dc_status_to_str(dc_result)); 6602 6603 dc_stream_release(stream); 6604 stream = NULL; 6605 requested_bpc -= 2; /* lower bpc to retry validation */ 6606 } 6607 6608 } while (stream == NULL && requested_bpc >= 6); 6609 6610 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { 6611 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); 6612 6613 aconnector->force_yuv420_output = true; 6614 stream = create_validate_stream_for_sink(aconnector, drm_mode, 6615 dm_state, old_stream); 6616 aconnector->force_yuv420_output = false; 6617 } 6618 6619 return stream; 6620 } 6621 6622 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 6623 struct drm_display_mode *mode) 6624 { 6625 int result = MODE_ERROR; 6626 struct dc_sink *dc_sink; 6627 /* TODO: Unhardcode stream count */ 6628 struct dc_stream_state *stream; 6629 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6630 6631 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 6632 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 6633 return result; 6634 6635 /* 6636 * Only run this the first time mode_valid is called to initilialize 6637 * EDID mgmt 6638 */ 6639 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 6640 !aconnector->dc_em_sink) 6641 handle_edid_mgmt(aconnector); 6642 6643 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 6644 6645 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && 6646 aconnector->base.force != DRM_FORCE_ON) { 6647 DRM_ERROR("dc_sink is NULL!\n"); 6648 goto fail; 6649 } 6650 6651 drm_mode_set_crtcinfo(mode, 0); 6652 6653 stream = create_validate_stream_for_sink(aconnector, mode, 6654 to_dm_connector_state(connector->state), 6655 NULL); 6656 if (stream) { 6657 dc_stream_release(stream); 6658 result = MODE_OK; 6659 } 6660 6661 fail: 6662 /* TODO: error handling*/ 6663 return result; 6664 } 6665 6666 static int fill_hdr_info_packet(const struct drm_connector_state *state, 6667 struct dc_info_packet *out) 6668 { 6669 struct hdmi_drm_infoframe frame; 6670 unsigned char buf[30]; /* 26 + 4 */ 6671 ssize_t len; 6672 int ret, i; 6673 6674 memset(out, 0, sizeof(*out)); 6675 6676 if (!state->hdr_output_metadata) 6677 return 0; 6678 6679 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); 6680 if (ret) 6681 return ret; 6682 6683 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); 6684 if (len < 0) 6685 return (int)len; 6686 6687 /* Static metadata is a fixed 26 bytes + 4 byte header. */ 6688 if (len != 30) 6689 return -EINVAL; 6690 6691 /* Prepare the infopacket for DC. */ 6692 switch (state->connector->connector_type) { 6693 case DRM_MODE_CONNECTOR_HDMIA: 6694 out->hb0 = 0x87; /* type */ 6695 out->hb1 = 0x01; /* version */ 6696 out->hb2 = 0x1A; /* length */ 6697 out->sb[0] = buf[3]; /* checksum */ 6698 i = 1; 6699 break; 6700 6701 case DRM_MODE_CONNECTOR_DisplayPort: 6702 case DRM_MODE_CONNECTOR_eDP: 6703 out->hb0 = 0x00; /* sdp id, zero */ 6704 out->hb1 = 0x87; /* type */ 6705 out->hb2 = 0x1D; /* payload len - 1 */ 6706 out->hb3 = (0x13 << 2); /* sdp version */ 6707 out->sb[0] = 0x01; /* version */ 6708 out->sb[1] = 0x1A; /* length */ 6709 i = 2; 6710 break; 6711 6712 default: 6713 return -EINVAL; 6714 } 6715 6716 memcpy(&out->sb[i], &buf[4], 26); 6717 out->valid = true; 6718 6719 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, 6720 sizeof(out->sb), false); 6721 6722 return 0; 6723 } 6724 6725 static int 6726 amdgpu_dm_connector_atomic_check(struct drm_connector *conn, 6727 struct drm_atomic_state *state) 6728 { 6729 struct drm_connector_state *new_con_state = 6730 drm_atomic_get_new_connector_state(state, conn); 6731 struct drm_connector_state *old_con_state = 6732 drm_atomic_get_old_connector_state(state, conn); 6733 struct drm_crtc *crtc = new_con_state->crtc; 6734 struct drm_crtc_state *new_crtc_state; 6735 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); 6736 int ret; 6737 6738 trace_amdgpu_dm_connector_atomic_check(new_con_state); 6739 6740 if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 6741 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr); 6742 if (ret < 0) 6743 return ret; 6744 } 6745 6746 if (!crtc) 6747 return 0; 6748 6749 if (new_con_state->colorspace != old_con_state->colorspace) { 6750 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 6751 if (IS_ERR(new_crtc_state)) 6752 return PTR_ERR(new_crtc_state); 6753 6754 new_crtc_state->mode_changed = true; 6755 } 6756 6757 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { 6758 struct dc_info_packet hdr_infopacket; 6759 6760 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); 6761 if (ret) 6762 return ret; 6763 6764 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 6765 if (IS_ERR(new_crtc_state)) 6766 return PTR_ERR(new_crtc_state); 6767 6768 /* 6769 * DC considers the stream backends changed if the 6770 * static metadata changes. Forcing the modeset also 6771 * gives a simple way for userspace to switch from 6772 * 8bpc to 10bpc when setting the metadata to enter 6773 * or exit HDR. 6774 * 6775 * Changing the static metadata after it's been 6776 * set is permissible, however. So only force a 6777 * modeset if we're entering or exiting HDR. 6778 */ 6779 new_crtc_state->mode_changed = new_crtc_state->mode_changed || 6780 !old_con_state->hdr_output_metadata || 6781 !new_con_state->hdr_output_metadata; 6782 } 6783 6784 return 0; 6785 } 6786 6787 static const struct drm_connector_helper_funcs 6788 amdgpu_dm_connector_helper_funcs = { 6789 /* 6790 * If hotplugging a second bigger display in FB Con mode, bigger resolution 6791 * modes will be filtered by drm_mode_validate_size(), and those modes 6792 * are missing after user start lightdm. So we need to renew modes list. 6793 * in get_modes call back, not just return the modes count 6794 */ 6795 .get_modes = get_modes, 6796 .mode_valid = amdgpu_dm_connector_mode_valid, 6797 .atomic_check = amdgpu_dm_connector_atomic_check, 6798 }; 6799 6800 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 6801 { 6802 6803 } 6804 6805 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) 6806 { 6807 switch (display_color_depth) { 6808 case COLOR_DEPTH_666: 6809 return 6; 6810 case COLOR_DEPTH_888: 6811 return 8; 6812 case COLOR_DEPTH_101010: 6813 return 10; 6814 case COLOR_DEPTH_121212: 6815 return 12; 6816 case COLOR_DEPTH_141414: 6817 return 14; 6818 case COLOR_DEPTH_161616: 6819 return 16; 6820 default: 6821 break; 6822 } 6823 return 0; 6824 } 6825 6826 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 6827 struct drm_crtc_state *crtc_state, 6828 struct drm_connector_state *conn_state) 6829 { 6830 struct drm_atomic_state *state = crtc_state->state; 6831 struct drm_connector *connector = conn_state->connector; 6832 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6833 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); 6834 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 6835 struct drm_dp_mst_topology_mgr *mst_mgr; 6836 struct drm_dp_mst_port *mst_port; 6837 struct drm_dp_mst_topology_state *mst_state; 6838 enum dc_color_depth color_depth; 6839 int clock, bpp = 0; 6840 bool is_y420 = false; 6841 6842 if (!aconnector->mst_output_port) 6843 return 0; 6844 6845 mst_port = aconnector->mst_output_port; 6846 mst_mgr = &aconnector->mst_root->mst_mgr; 6847 6848 if (!crtc_state->connectors_changed && !crtc_state->mode_changed) 6849 return 0; 6850 6851 mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr); 6852 if (IS_ERR(mst_state)) 6853 return PTR_ERR(mst_state); 6854 6855 mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); 6856 6857 if (!state->duplicated) { 6858 int max_bpc = conn_state->max_requested_bpc; 6859 6860 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && 6861 aconnector->force_yuv420_output; 6862 color_depth = convert_color_depth_from_display_info(connector, 6863 is_y420, 6864 max_bpc); 6865 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 6866 clock = adjusted_mode->clock; 6867 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4); 6868 } 6869 6870 dm_new_connector_state->vcpi_slots = 6871 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port, 6872 dm_new_connector_state->pbn); 6873 if (dm_new_connector_state->vcpi_slots < 0) { 6874 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); 6875 return dm_new_connector_state->vcpi_slots; 6876 } 6877 return 0; 6878 } 6879 6880 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 6881 .disable = dm_encoder_helper_disable, 6882 .atomic_check = dm_encoder_helper_atomic_check 6883 }; 6884 6885 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, 6886 struct dc_state *dc_state, 6887 struct dsc_mst_fairness_vars *vars) 6888 { 6889 struct dc_stream_state *stream = NULL; 6890 struct drm_connector *connector; 6891 struct drm_connector_state *new_con_state; 6892 struct amdgpu_dm_connector *aconnector; 6893 struct dm_connector_state *dm_conn_state; 6894 int i, j, ret; 6895 int vcpi, pbn_div, pbn, slot_num = 0; 6896 6897 for_each_new_connector_in_state(state, connector, new_con_state, i) { 6898 6899 aconnector = to_amdgpu_dm_connector(connector); 6900 6901 if (!aconnector->mst_output_port) 6902 continue; 6903 6904 if (!new_con_state || !new_con_state->crtc) 6905 continue; 6906 6907 dm_conn_state = to_dm_connector_state(new_con_state); 6908 6909 for (j = 0; j < dc_state->stream_count; j++) { 6910 stream = dc_state->streams[j]; 6911 if (!stream) 6912 continue; 6913 6914 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector) 6915 break; 6916 6917 stream = NULL; 6918 } 6919 6920 if (!stream) 6921 continue; 6922 6923 pbn_div = dm_mst_get_pbn_divider(stream->link); 6924 /* pbn is calculated by compute_mst_dsc_configs_for_state*/ 6925 for (j = 0; j < dc_state->stream_count; j++) { 6926 if (vars[j].aconnector == aconnector) { 6927 pbn = vars[j].pbn; 6928 break; 6929 } 6930 } 6931 6932 if (j == dc_state->stream_count) 6933 continue; 6934 6935 slot_num = DIV_ROUND_UP(pbn, pbn_div); 6936 6937 if (stream->timing.flags.DSC != 1) { 6938 dm_conn_state->pbn = pbn; 6939 dm_conn_state->vcpi_slots = slot_num; 6940 6941 ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, 6942 dm_conn_state->pbn, false); 6943 if (ret < 0) 6944 return ret; 6945 6946 continue; 6947 } 6948 6949 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true); 6950 if (vcpi < 0) 6951 return vcpi; 6952 6953 dm_conn_state->pbn = pbn; 6954 dm_conn_state->vcpi_slots = vcpi; 6955 } 6956 return 0; 6957 } 6958 6959 static int to_drm_connector_type(enum signal_type st) 6960 { 6961 switch (st) { 6962 case SIGNAL_TYPE_HDMI_TYPE_A: 6963 return DRM_MODE_CONNECTOR_HDMIA; 6964 case SIGNAL_TYPE_EDP: 6965 return DRM_MODE_CONNECTOR_eDP; 6966 case SIGNAL_TYPE_LVDS: 6967 return DRM_MODE_CONNECTOR_LVDS; 6968 case SIGNAL_TYPE_RGB: 6969 return DRM_MODE_CONNECTOR_VGA; 6970 case SIGNAL_TYPE_DISPLAY_PORT: 6971 case SIGNAL_TYPE_DISPLAY_PORT_MST: 6972 return DRM_MODE_CONNECTOR_DisplayPort; 6973 case SIGNAL_TYPE_DVI_DUAL_LINK: 6974 case SIGNAL_TYPE_DVI_SINGLE_LINK: 6975 return DRM_MODE_CONNECTOR_DVID; 6976 case SIGNAL_TYPE_VIRTUAL: 6977 return DRM_MODE_CONNECTOR_VIRTUAL; 6978 6979 default: 6980 return DRM_MODE_CONNECTOR_Unknown; 6981 } 6982 } 6983 6984 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) 6985 { 6986 struct drm_encoder *encoder; 6987 6988 /* There is only one encoder per connector */ 6989 drm_connector_for_each_possible_encoder(connector, encoder) 6990 return encoder; 6991 6992 return NULL; 6993 } 6994 6995 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 6996 { 6997 struct drm_encoder *encoder; 6998 struct amdgpu_encoder *amdgpu_encoder; 6999 7000 encoder = amdgpu_dm_connector_to_encoder(connector); 7001 7002 if (encoder == NULL) 7003 return; 7004 7005 amdgpu_encoder = to_amdgpu_encoder(encoder); 7006 7007 amdgpu_encoder->native_mode.clock = 0; 7008 7009 if (!list_empty(&connector->probed_modes)) { 7010 struct drm_display_mode *preferred_mode = NULL; 7011 7012 list_for_each_entry(preferred_mode, 7013 &connector->probed_modes, 7014 head) { 7015 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 7016 amdgpu_encoder->native_mode = *preferred_mode; 7017 7018 break; 7019 } 7020 7021 } 7022 } 7023 7024 static struct drm_display_mode * 7025 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 7026 char *name, 7027 int hdisplay, int vdisplay) 7028 { 7029 struct drm_device *dev = encoder->dev; 7030 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7031 struct drm_display_mode *mode = NULL; 7032 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7033 7034 mode = drm_mode_duplicate(dev, native_mode); 7035 7036 if (mode == NULL) 7037 return NULL; 7038 7039 mode->hdisplay = hdisplay; 7040 mode->vdisplay = vdisplay; 7041 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 7042 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 7043 7044 return mode; 7045 7046 } 7047 7048 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 7049 struct drm_connector *connector) 7050 { 7051 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7052 struct drm_display_mode *mode = NULL; 7053 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7054 struct amdgpu_dm_connector *amdgpu_dm_connector = 7055 to_amdgpu_dm_connector(connector); 7056 int i; 7057 int n; 7058 struct mode_size { 7059 char name[DRM_DISPLAY_MODE_LEN]; 7060 int w; 7061 int h; 7062 } common_modes[] = { 7063 { "640x480", 640, 480}, 7064 { "800x600", 800, 600}, 7065 { "1024x768", 1024, 768}, 7066 { "1280x720", 1280, 720}, 7067 { "1280x800", 1280, 800}, 7068 {"1280x1024", 1280, 1024}, 7069 { "1440x900", 1440, 900}, 7070 {"1680x1050", 1680, 1050}, 7071 {"1600x1200", 1600, 1200}, 7072 {"1920x1080", 1920, 1080}, 7073 {"1920x1200", 1920, 1200} 7074 }; 7075 7076 n = ARRAY_SIZE(common_modes); 7077 7078 for (i = 0; i < n; i++) { 7079 struct drm_display_mode *curmode = NULL; 7080 bool mode_existed = false; 7081 7082 if (common_modes[i].w > native_mode->hdisplay || 7083 common_modes[i].h > native_mode->vdisplay || 7084 (common_modes[i].w == native_mode->hdisplay && 7085 common_modes[i].h == native_mode->vdisplay)) 7086 continue; 7087 7088 list_for_each_entry(curmode, &connector->probed_modes, head) { 7089 if (common_modes[i].w == curmode->hdisplay && 7090 common_modes[i].h == curmode->vdisplay) { 7091 mode_existed = true; 7092 break; 7093 } 7094 } 7095 7096 if (mode_existed) 7097 continue; 7098 7099 mode = amdgpu_dm_create_common_mode(encoder, 7100 common_modes[i].name, common_modes[i].w, 7101 common_modes[i].h); 7102 if (!mode) 7103 continue; 7104 7105 drm_mode_probed_add(connector, mode); 7106 amdgpu_dm_connector->num_modes++; 7107 } 7108 } 7109 7110 static void amdgpu_set_panel_orientation(struct drm_connector *connector) 7111 { 7112 struct drm_encoder *encoder; 7113 struct amdgpu_encoder *amdgpu_encoder; 7114 const struct drm_display_mode *native_mode; 7115 7116 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && 7117 connector->connector_type != DRM_MODE_CONNECTOR_LVDS) 7118 return; 7119 7120 mutex_lock(&connector->dev->mode_config.mutex); 7121 amdgpu_dm_connector_get_modes(connector); 7122 mutex_unlock(&connector->dev->mode_config.mutex); 7123 7124 encoder = amdgpu_dm_connector_to_encoder(connector); 7125 if (!encoder) 7126 return; 7127 7128 amdgpu_encoder = to_amdgpu_encoder(encoder); 7129 7130 native_mode = &amdgpu_encoder->native_mode; 7131 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) 7132 return; 7133 7134 drm_connector_set_panel_orientation_with_quirk(connector, 7135 DRM_MODE_PANEL_ORIENTATION_UNKNOWN, 7136 native_mode->hdisplay, 7137 native_mode->vdisplay); 7138 } 7139 7140 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 7141 struct edid *edid) 7142 { 7143 struct amdgpu_dm_connector *amdgpu_dm_connector = 7144 to_amdgpu_dm_connector(connector); 7145 7146 if (edid) { 7147 /* empty probed_modes */ 7148 INIT_LIST_HEAD(&connector->probed_modes); 7149 amdgpu_dm_connector->num_modes = 7150 drm_add_edid_modes(connector, edid); 7151 7152 /* sorting the probed modes before calling function 7153 * amdgpu_dm_get_native_mode() since EDID can have 7154 * more than one preferred mode. The modes that are 7155 * later in the probed mode list could be of higher 7156 * and preferred resolution. For example, 3840x2160 7157 * resolution in base EDID preferred timing and 4096x2160 7158 * preferred resolution in DID extension block later. 7159 */ 7160 drm_mode_sort(&connector->probed_modes); 7161 amdgpu_dm_get_native_mode(connector); 7162 7163 /* Freesync capabilities are reset by calling 7164 * drm_add_edid_modes() and need to be 7165 * restored here. 7166 */ 7167 amdgpu_dm_update_freesync_caps(connector, edid); 7168 } else { 7169 amdgpu_dm_connector->num_modes = 0; 7170 } 7171 } 7172 7173 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, 7174 struct drm_display_mode *mode) 7175 { 7176 struct drm_display_mode *m; 7177 7178 list_for_each_entry(m, &aconnector->base.probed_modes, head) { 7179 if (drm_mode_equal(m, mode)) 7180 return true; 7181 } 7182 7183 return false; 7184 } 7185 7186 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) 7187 { 7188 const struct drm_display_mode *m; 7189 struct drm_display_mode *new_mode; 7190 uint i; 7191 u32 new_modes_count = 0; 7192 7193 /* Standard FPS values 7194 * 7195 * 23.976 - TV/NTSC 7196 * 24 - Cinema 7197 * 25 - TV/PAL 7198 * 29.97 - TV/NTSC 7199 * 30 - TV/NTSC 7200 * 48 - Cinema HFR 7201 * 50 - TV/PAL 7202 * 60 - Commonly used 7203 * 48,72,96,120 - Multiples of 24 7204 */ 7205 static const u32 common_rates[] = { 7206 23976, 24000, 25000, 29970, 30000, 7207 48000, 50000, 60000, 72000, 96000, 120000 7208 }; 7209 7210 /* 7211 * Find mode with highest refresh rate with the same resolution 7212 * as the preferred mode. Some monitors report a preferred mode 7213 * with lower resolution than the highest refresh rate supported. 7214 */ 7215 7216 m = get_highest_refresh_rate_mode(aconnector, true); 7217 if (!m) 7218 return 0; 7219 7220 for (i = 0; i < ARRAY_SIZE(common_rates); i++) { 7221 u64 target_vtotal, target_vtotal_diff; 7222 u64 num, den; 7223 7224 if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) 7225 continue; 7226 7227 if (common_rates[i] < aconnector->min_vfreq * 1000 || 7228 common_rates[i] > aconnector->max_vfreq * 1000) 7229 continue; 7230 7231 num = (unsigned long long)m->clock * 1000 * 1000; 7232 den = common_rates[i] * (unsigned long long)m->htotal; 7233 target_vtotal = div_u64(num, den); 7234 target_vtotal_diff = target_vtotal - m->vtotal; 7235 7236 /* Check for illegal modes */ 7237 if (m->vsync_start + target_vtotal_diff < m->vdisplay || 7238 m->vsync_end + target_vtotal_diff < m->vsync_start || 7239 m->vtotal + target_vtotal_diff < m->vsync_end) 7240 continue; 7241 7242 new_mode = drm_mode_duplicate(aconnector->base.dev, m); 7243 if (!new_mode) 7244 goto out; 7245 7246 new_mode->vtotal += (u16)target_vtotal_diff; 7247 new_mode->vsync_start += (u16)target_vtotal_diff; 7248 new_mode->vsync_end += (u16)target_vtotal_diff; 7249 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; 7250 new_mode->type |= DRM_MODE_TYPE_DRIVER; 7251 7252 if (!is_duplicate_mode(aconnector, new_mode)) { 7253 drm_mode_probed_add(&aconnector->base, new_mode); 7254 new_modes_count += 1; 7255 } else 7256 drm_mode_destroy(aconnector->base.dev, new_mode); 7257 } 7258 out: 7259 return new_modes_count; 7260 } 7261 7262 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, 7263 struct edid *edid) 7264 { 7265 struct amdgpu_dm_connector *amdgpu_dm_connector = 7266 to_amdgpu_dm_connector(connector); 7267 7268 if (!edid) 7269 return; 7270 7271 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 7272 amdgpu_dm_connector->num_modes += 7273 add_fs_modes(amdgpu_dm_connector); 7274 } 7275 7276 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 7277 { 7278 struct amdgpu_dm_connector *amdgpu_dm_connector = 7279 to_amdgpu_dm_connector(connector); 7280 struct drm_encoder *encoder; 7281 struct edid *edid = amdgpu_dm_connector->edid; 7282 struct dc_link_settings *verified_link_cap = 7283 &amdgpu_dm_connector->dc_link->verified_link_cap; 7284 const struct dc *dc = amdgpu_dm_connector->dc_link->dc; 7285 7286 encoder = amdgpu_dm_connector_to_encoder(connector); 7287 7288 if (!drm_edid_is_valid(edid)) { 7289 amdgpu_dm_connector->num_modes = 7290 drm_add_modes_noedid(connector, 640, 480); 7291 if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) 7292 amdgpu_dm_connector->num_modes += 7293 drm_add_modes_noedid(connector, 1920, 1080); 7294 } else { 7295 amdgpu_dm_connector_ddc_get_modes(connector, edid); 7296 amdgpu_dm_connector_add_common_modes(encoder, connector); 7297 amdgpu_dm_connector_add_freesync_modes(connector, edid); 7298 } 7299 amdgpu_dm_fbc_init(connector); 7300 7301 return amdgpu_dm_connector->num_modes; 7302 } 7303 7304 static const u32 supported_colorspaces = 7305 BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | 7306 BIT(DRM_MODE_COLORIMETRY_OPRGB) | 7307 BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | 7308 BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); 7309 7310 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 7311 struct amdgpu_dm_connector *aconnector, 7312 int connector_type, 7313 struct dc_link *link, 7314 int link_index) 7315 { 7316 struct amdgpu_device *adev = drm_to_adev(dm->ddev); 7317 7318 /* 7319 * Some of the properties below require access to state, like bpc. 7320 * Allocate some default initial connector state with our reset helper. 7321 */ 7322 if (aconnector->base.funcs->reset) 7323 aconnector->base.funcs->reset(&aconnector->base); 7324 7325 aconnector->connector_id = link_index; 7326 aconnector->bl_idx = -1; 7327 aconnector->dc_link = link; 7328 aconnector->base.interlace_allowed = false; 7329 aconnector->base.doublescan_allowed = false; 7330 aconnector->base.stereo_allowed = false; 7331 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 7332 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 7333 aconnector->audio_inst = -1; 7334 aconnector->pack_sdp_v1_3 = false; 7335 aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; 7336 memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); 7337 mutex_init(&aconnector->hpd_lock); 7338 mutex_init(&aconnector->handle_mst_msg_ready); 7339 7340 /* 7341 * configure support HPD hot plug connector_>polled default value is 0 7342 * which means HPD hot plug not supported 7343 */ 7344 switch (connector_type) { 7345 case DRM_MODE_CONNECTOR_HDMIA: 7346 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 7347 aconnector->base.ycbcr_420_allowed = 7348 link->link_enc->features.hdmi_ycbcr420_supported ? true : false; 7349 break; 7350 case DRM_MODE_CONNECTOR_DisplayPort: 7351 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 7352 link->link_enc = link_enc_cfg_get_link_enc(link); 7353 ASSERT(link->link_enc); 7354 if (link->link_enc) 7355 aconnector->base.ycbcr_420_allowed = 7356 link->link_enc->features.dp_ycbcr420_supported ? true : false; 7357 break; 7358 case DRM_MODE_CONNECTOR_DVID: 7359 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 7360 break; 7361 default: 7362 break; 7363 } 7364 7365 drm_object_attach_property(&aconnector->base.base, 7366 dm->ddev->mode_config.scaling_mode_property, 7367 DRM_MODE_SCALE_NONE); 7368 7369 drm_object_attach_property(&aconnector->base.base, 7370 adev->mode_info.underscan_property, 7371 UNDERSCAN_OFF); 7372 drm_object_attach_property(&aconnector->base.base, 7373 adev->mode_info.underscan_hborder_property, 7374 0); 7375 drm_object_attach_property(&aconnector->base.base, 7376 adev->mode_info.underscan_vborder_property, 7377 0); 7378 7379 if (!aconnector->mst_root) 7380 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 7381 7382 aconnector->base.state->max_bpc = 16; 7383 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; 7384 7385 if (connector_type == DRM_MODE_CONNECTOR_eDP && 7386 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) { 7387 drm_object_attach_property(&aconnector->base.base, 7388 adev->mode_info.abm_level_property, 0); 7389 } 7390 7391 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { 7392 if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) 7393 drm_connector_attach_colorspace_property(&aconnector->base); 7394 } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || 7395 connector_type == DRM_MODE_CONNECTOR_eDP) { 7396 if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) 7397 drm_connector_attach_colorspace_property(&aconnector->base); 7398 } 7399 7400 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 7401 connector_type == DRM_MODE_CONNECTOR_DisplayPort || 7402 connector_type == DRM_MODE_CONNECTOR_eDP) { 7403 drm_connector_attach_hdr_output_metadata_property(&aconnector->base); 7404 7405 if (!aconnector->mst_root) 7406 drm_connector_attach_vrr_capable_property(&aconnector->base); 7407 7408 if (adev->dm.hdcp_workqueue) 7409 drm_connector_attach_content_protection_property(&aconnector->base, true); 7410 } 7411 } 7412 7413 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 7414 struct i2c_msg *msgs, int num) 7415 { 7416 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 7417 struct ddc_service *ddc_service = i2c->ddc_service; 7418 struct i2c_command cmd; 7419 int i; 7420 int result = -EIO; 7421 7422 if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) 7423 return result; 7424 7425 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); 7426 7427 if (!cmd.payloads) 7428 return result; 7429 7430 cmd.number_of_payloads = num; 7431 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 7432 cmd.speed = 100; 7433 7434 for (i = 0; i < num; i++) { 7435 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 7436 cmd.payloads[i].address = msgs[i].addr; 7437 cmd.payloads[i].length = msgs[i].len; 7438 cmd.payloads[i].data = msgs[i].buf; 7439 } 7440 7441 if (dc_submit_i2c( 7442 ddc_service->ctx->dc, 7443 ddc_service->link->link_index, 7444 &cmd)) 7445 result = num; 7446 7447 kfree(cmd.payloads); 7448 return result; 7449 } 7450 7451 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 7452 { 7453 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 7454 } 7455 7456 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 7457 .master_xfer = amdgpu_dm_i2c_xfer, 7458 .functionality = amdgpu_dm_i2c_func, 7459 }; 7460 7461 static struct amdgpu_i2c_adapter * 7462 create_i2c(struct ddc_service *ddc_service, 7463 int link_index, 7464 int *res) 7465 { 7466 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 7467 struct amdgpu_i2c_adapter *i2c; 7468 7469 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); 7470 if (!i2c) 7471 return NULL; 7472 i2c->base.owner = THIS_MODULE; 7473 i2c->base.class = I2C_CLASS_DDC; 7474 i2c->base.dev.parent = &adev->pdev->dev; 7475 i2c->base.algo = &amdgpu_dm_i2c_algo; 7476 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); 7477 i2c_set_adapdata(&i2c->base, i2c); 7478 i2c->ddc_service = ddc_service; 7479 7480 return i2c; 7481 } 7482 7483 7484 /* 7485 * Note: this function assumes that dc_link_detect() was called for the 7486 * dc_link which will be represented by this aconnector. 7487 */ 7488 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 7489 struct amdgpu_dm_connector *aconnector, 7490 u32 link_index, 7491 struct amdgpu_encoder *aencoder) 7492 { 7493 int res = 0; 7494 int connector_type; 7495 struct dc *dc = dm->dc; 7496 struct dc_link *link = dc_get_link_at_index(dc, link_index); 7497 struct amdgpu_i2c_adapter *i2c; 7498 7499 link->priv = aconnector; 7500 7501 7502 i2c = create_i2c(link->ddc, link->link_index, &res); 7503 if (!i2c) { 7504 DRM_ERROR("Failed to create i2c adapter data\n"); 7505 return -ENOMEM; 7506 } 7507 7508 aconnector->i2c = i2c; 7509 res = i2c_add_adapter(&i2c->base); 7510 7511 if (res) { 7512 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); 7513 goto out_free; 7514 } 7515 7516 connector_type = to_drm_connector_type(link->connector_signal); 7517 7518 res = drm_connector_init_with_ddc( 7519 dm->ddev, 7520 &aconnector->base, 7521 &amdgpu_dm_connector_funcs, 7522 connector_type, 7523 &i2c->base); 7524 7525 if (res) { 7526 DRM_ERROR("connector_init failed\n"); 7527 aconnector->connector_id = -1; 7528 goto out_free; 7529 } 7530 7531 drm_connector_helper_add( 7532 &aconnector->base, 7533 &amdgpu_dm_connector_helper_funcs); 7534 7535 amdgpu_dm_connector_init_helper( 7536 dm, 7537 aconnector, 7538 connector_type, 7539 link, 7540 link_index); 7541 7542 drm_connector_attach_encoder( 7543 &aconnector->base, &aencoder->base); 7544 7545 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 7546 || connector_type == DRM_MODE_CONNECTOR_eDP) 7547 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); 7548 7549 out_free: 7550 if (res) { 7551 kfree(i2c); 7552 aconnector->i2c = NULL; 7553 } 7554 return res; 7555 } 7556 7557 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 7558 { 7559 switch (adev->mode_info.num_crtc) { 7560 case 1: 7561 return 0x1; 7562 case 2: 7563 return 0x3; 7564 case 3: 7565 return 0x7; 7566 case 4: 7567 return 0xf; 7568 case 5: 7569 return 0x1f; 7570 case 6: 7571 default: 7572 return 0x3f; 7573 } 7574 } 7575 7576 static int amdgpu_dm_encoder_init(struct drm_device *dev, 7577 struct amdgpu_encoder *aencoder, 7578 uint32_t link_index) 7579 { 7580 struct amdgpu_device *adev = drm_to_adev(dev); 7581 7582 int res = drm_encoder_init(dev, 7583 &aencoder->base, 7584 &amdgpu_dm_encoder_funcs, 7585 DRM_MODE_ENCODER_TMDS, 7586 NULL); 7587 7588 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 7589 7590 if (!res) 7591 aencoder->encoder_id = link_index; 7592 else 7593 aencoder->encoder_id = -1; 7594 7595 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 7596 7597 return res; 7598 } 7599 7600 static void manage_dm_interrupts(struct amdgpu_device *adev, 7601 struct amdgpu_crtc *acrtc, 7602 bool enable) 7603 { 7604 /* 7605 * We have no guarantee that the frontend index maps to the same 7606 * backend index - some even map to more than one. 7607 * 7608 * TODO: Use a different interrupt or check DC itself for the mapping. 7609 */ 7610 int irq_type = 7611 amdgpu_display_crtc_idx_to_irq_type( 7612 adev, 7613 acrtc->crtc_id); 7614 7615 if (enable) { 7616 drm_crtc_vblank_on(&acrtc->base); 7617 amdgpu_irq_get( 7618 adev, 7619 &adev->pageflip_irq, 7620 irq_type); 7621 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 7622 amdgpu_irq_get( 7623 adev, 7624 &adev->vline0_irq, 7625 irq_type); 7626 #endif 7627 } else { 7628 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 7629 amdgpu_irq_put( 7630 adev, 7631 &adev->vline0_irq, 7632 irq_type); 7633 #endif 7634 amdgpu_irq_put( 7635 adev, 7636 &adev->pageflip_irq, 7637 irq_type); 7638 drm_crtc_vblank_off(&acrtc->base); 7639 } 7640 } 7641 7642 static void dm_update_pflip_irq_state(struct amdgpu_device *adev, 7643 struct amdgpu_crtc *acrtc) 7644 { 7645 int irq_type = 7646 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 7647 7648 /** 7649 * This reads the current state for the IRQ and force reapplies 7650 * the setting to hardware. 7651 */ 7652 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); 7653 } 7654 7655 static bool 7656 is_scaling_state_different(const struct dm_connector_state *dm_state, 7657 const struct dm_connector_state *old_dm_state) 7658 { 7659 if (dm_state->scaling != old_dm_state->scaling) 7660 return true; 7661 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 7662 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 7663 return true; 7664 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 7665 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 7666 return true; 7667 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 7668 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 7669 return true; 7670 return false; 7671 } 7672 7673 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, 7674 struct drm_crtc_state *old_crtc_state, 7675 struct drm_connector_state *new_conn_state, 7676 struct drm_connector_state *old_conn_state, 7677 const struct drm_connector *connector, 7678 struct hdcp_workqueue *hdcp_w) 7679 { 7680 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7681 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 7682 7683 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 7684 connector->index, connector->status, connector->dpms); 7685 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 7686 old_conn_state->content_protection, new_conn_state->content_protection); 7687 7688 if (old_crtc_state) 7689 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 7690 old_crtc_state->enable, 7691 old_crtc_state->active, 7692 old_crtc_state->mode_changed, 7693 old_crtc_state->active_changed, 7694 old_crtc_state->connectors_changed); 7695 7696 if (new_crtc_state) 7697 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 7698 new_crtc_state->enable, 7699 new_crtc_state->active, 7700 new_crtc_state->mode_changed, 7701 new_crtc_state->active_changed, 7702 new_crtc_state->connectors_changed); 7703 7704 /* hdcp content type change */ 7705 if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && 7706 new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 7707 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7708 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); 7709 return true; 7710 } 7711 7712 /* CP is being re enabled, ignore this */ 7713 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 7714 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 7715 if (new_crtc_state && new_crtc_state->mode_changed) { 7716 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7717 pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); 7718 return true; 7719 } 7720 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 7721 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); 7722 return false; 7723 } 7724 7725 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED 7726 * 7727 * Handles: UNDESIRED -> ENABLED 7728 */ 7729 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 7730 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 7731 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 7732 7733 /* Stream removed and re-enabled 7734 * 7735 * Can sometimes overlap with the HPD case, 7736 * thus set update_hdcp to false to avoid 7737 * setting HDCP multiple times. 7738 * 7739 * Handles: DESIRED -> DESIRED (Special case) 7740 */ 7741 if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && 7742 new_conn_state->crtc && new_conn_state->crtc->enabled && 7743 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 7744 dm_con_state->update_hdcp = false; 7745 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", 7746 __func__); 7747 return true; 7748 } 7749 7750 /* Hot-plug, headless s3, dpms 7751 * 7752 * Only start HDCP if the display is connected/enabled. 7753 * update_hdcp flag will be set to false until the next 7754 * HPD comes in. 7755 * 7756 * Handles: DESIRED -> DESIRED (Special case) 7757 */ 7758 if (dm_con_state->update_hdcp && 7759 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 7760 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 7761 dm_con_state->update_hdcp = false; 7762 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", 7763 __func__); 7764 return true; 7765 } 7766 7767 if (old_conn_state->content_protection == new_conn_state->content_protection) { 7768 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { 7769 if (new_crtc_state && new_crtc_state->mode_changed) { 7770 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", 7771 __func__); 7772 return true; 7773 } 7774 pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", 7775 __func__); 7776 return false; 7777 } 7778 7779 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); 7780 return false; 7781 } 7782 7783 if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { 7784 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", 7785 __func__); 7786 return true; 7787 } 7788 7789 pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); 7790 return false; 7791 } 7792 7793 static void remove_stream(struct amdgpu_device *adev, 7794 struct amdgpu_crtc *acrtc, 7795 struct dc_stream_state *stream) 7796 { 7797 /* this is the update mode case */ 7798 7799 acrtc->otg_inst = -1; 7800 acrtc->enabled = false; 7801 } 7802 7803 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 7804 { 7805 7806 assert_spin_locked(&acrtc->base.dev->event_lock); 7807 WARN_ON(acrtc->event); 7808 7809 acrtc->event = acrtc->base.state->event; 7810 7811 /* Set the flip status */ 7812 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 7813 7814 /* Mark this event as consumed */ 7815 acrtc->base.state->event = NULL; 7816 7817 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 7818 acrtc->crtc_id); 7819 } 7820 7821 static void update_freesync_state_on_stream( 7822 struct amdgpu_display_manager *dm, 7823 struct dm_crtc_state *new_crtc_state, 7824 struct dc_stream_state *new_stream, 7825 struct dc_plane_state *surface, 7826 u32 flip_timestamp_in_us) 7827 { 7828 struct mod_vrr_params vrr_params; 7829 struct dc_info_packet vrr_infopacket = {0}; 7830 struct amdgpu_device *adev = dm->adev; 7831 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 7832 unsigned long flags; 7833 bool pack_sdp_v1_3 = false; 7834 struct amdgpu_dm_connector *aconn; 7835 enum vrr_packet_type packet_type = PACKET_TYPE_VRR; 7836 7837 if (!new_stream) 7838 return; 7839 7840 /* 7841 * TODO: Determine why min/max totals and vrefresh can be 0 here. 7842 * For now it's sufficient to just guard against these conditions. 7843 */ 7844 7845 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 7846 return; 7847 7848 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 7849 vrr_params = acrtc->dm_irq_params.vrr_params; 7850 7851 if (surface) { 7852 mod_freesync_handle_preflip( 7853 dm->freesync_module, 7854 surface, 7855 new_stream, 7856 flip_timestamp_in_us, 7857 &vrr_params); 7858 7859 if (adev->family < AMDGPU_FAMILY_AI && 7860 amdgpu_dm_crtc_vrr_active(new_crtc_state)) { 7861 mod_freesync_handle_v_update(dm->freesync_module, 7862 new_stream, &vrr_params); 7863 7864 /* Need to call this before the frame ends. */ 7865 dc_stream_adjust_vmin_vmax(dm->dc, 7866 new_crtc_state->stream, 7867 &vrr_params.adjust); 7868 } 7869 } 7870 7871 aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; 7872 7873 if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) { 7874 pack_sdp_v1_3 = aconn->pack_sdp_v1_3; 7875 7876 if (aconn->vsdb_info.amd_vsdb_version == 1) 7877 packet_type = PACKET_TYPE_FS_V1; 7878 else if (aconn->vsdb_info.amd_vsdb_version == 2) 7879 packet_type = PACKET_TYPE_FS_V2; 7880 else if (aconn->vsdb_info.amd_vsdb_version == 3) 7881 packet_type = PACKET_TYPE_FS_V3; 7882 7883 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, 7884 &new_stream->adaptive_sync_infopacket); 7885 } 7886 7887 mod_freesync_build_vrr_infopacket( 7888 dm->freesync_module, 7889 new_stream, 7890 &vrr_params, 7891 packet_type, 7892 TRANSFER_FUNC_UNKNOWN, 7893 &vrr_infopacket, 7894 pack_sdp_v1_3); 7895 7896 new_crtc_state->freesync_vrr_info_changed |= 7897 (memcmp(&new_crtc_state->vrr_infopacket, 7898 &vrr_infopacket, 7899 sizeof(vrr_infopacket)) != 0); 7900 7901 acrtc->dm_irq_params.vrr_params = vrr_params; 7902 new_crtc_state->vrr_infopacket = vrr_infopacket; 7903 7904 new_stream->vrr_infopacket = vrr_infopacket; 7905 new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params); 7906 7907 if (new_crtc_state->freesync_vrr_info_changed) 7908 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", 7909 new_crtc_state->base.crtc->base.id, 7910 (int)new_crtc_state->base.vrr_enabled, 7911 (int)vrr_params.state); 7912 7913 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 7914 } 7915 7916 static void update_stream_irq_parameters( 7917 struct amdgpu_display_manager *dm, 7918 struct dm_crtc_state *new_crtc_state) 7919 { 7920 struct dc_stream_state *new_stream = new_crtc_state->stream; 7921 struct mod_vrr_params vrr_params; 7922 struct mod_freesync_config config = new_crtc_state->freesync_config; 7923 struct amdgpu_device *adev = dm->adev; 7924 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 7925 unsigned long flags; 7926 7927 if (!new_stream) 7928 return; 7929 7930 /* 7931 * TODO: Determine why min/max totals and vrefresh can be 0 here. 7932 * For now it's sufficient to just guard against these conditions. 7933 */ 7934 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 7935 return; 7936 7937 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 7938 vrr_params = acrtc->dm_irq_params.vrr_params; 7939 7940 if (new_crtc_state->vrr_supported && 7941 config.min_refresh_in_uhz && 7942 config.max_refresh_in_uhz) { 7943 /* 7944 * if freesync compatible mode was set, config.state will be set 7945 * in atomic check 7946 */ 7947 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && 7948 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || 7949 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { 7950 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; 7951 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; 7952 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; 7953 vrr_params.state = VRR_STATE_ACTIVE_FIXED; 7954 } else { 7955 config.state = new_crtc_state->base.vrr_enabled ? 7956 VRR_STATE_ACTIVE_VARIABLE : 7957 VRR_STATE_INACTIVE; 7958 } 7959 } else { 7960 config.state = VRR_STATE_UNSUPPORTED; 7961 } 7962 7963 mod_freesync_build_vrr_params(dm->freesync_module, 7964 new_stream, 7965 &config, &vrr_params); 7966 7967 new_crtc_state->freesync_config = config; 7968 /* Copy state for access from DM IRQ handler */ 7969 acrtc->dm_irq_params.freesync_config = config; 7970 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; 7971 acrtc->dm_irq_params.vrr_params = vrr_params; 7972 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 7973 } 7974 7975 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, 7976 struct dm_crtc_state *new_state) 7977 { 7978 bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state); 7979 bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state); 7980 7981 if (!old_vrr_active && new_vrr_active) { 7982 /* Transition VRR inactive -> active: 7983 * While VRR is active, we must not disable vblank irq, as a 7984 * reenable after disable would compute bogus vblank/pflip 7985 * timestamps if it likely happened inside display front-porch. 7986 * 7987 * We also need vupdate irq for the actual core vblank handling 7988 * at end of vblank. 7989 */ 7990 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); 7991 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); 7992 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", 7993 __func__, new_state->base.crtc->base.id); 7994 } else if (old_vrr_active && !new_vrr_active) { 7995 /* Transition VRR active -> inactive: 7996 * Allow vblank irq disable again for fixed refresh rate. 7997 */ 7998 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); 7999 drm_crtc_vblank_put(new_state->base.crtc); 8000 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", 8001 __func__, new_state->base.crtc->base.id); 8002 } 8003 } 8004 8005 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 8006 { 8007 struct drm_plane *plane; 8008 struct drm_plane_state *old_plane_state; 8009 int i; 8010 8011 /* 8012 * TODO: Make this per-stream so we don't issue redundant updates for 8013 * commits with multiple streams. 8014 */ 8015 for_each_old_plane_in_state(state, plane, old_plane_state, i) 8016 if (plane->type == DRM_PLANE_TYPE_CURSOR) 8017 amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state); 8018 } 8019 8020 static inline uint32_t get_mem_type(struct drm_framebuffer *fb) 8021 { 8022 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); 8023 8024 return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; 8025 } 8026 8027 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 8028 struct drm_device *dev, 8029 struct amdgpu_display_manager *dm, 8030 struct drm_crtc *pcrtc, 8031 bool wait_for_vblank) 8032 { 8033 u32 i; 8034 u64 timestamp_ns = ktime_get_ns(); 8035 struct drm_plane *plane; 8036 struct drm_plane_state *old_plane_state, *new_plane_state; 8037 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 8038 struct drm_crtc_state *new_pcrtc_state = 8039 drm_atomic_get_new_crtc_state(state, pcrtc); 8040 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 8041 struct dm_crtc_state *dm_old_crtc_state = 8042 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 8043 int planes_count = 0, vpos, hpos; 8044 unsigned long flags; 8045 u32 target_vblank, last_flip_vblank; 8046 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); 8047 bool cursor_update = false; 8048 bool pflip_present = false; 8049 bool dirty_rects_changed = false; 8050 struct { 8051 struct dc_surface_update surface_updates[MAX_SURFACES]; 8052 struct dc_plane_info plane_infos[MAX_SURFACES]; 8053 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 8054 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 8055 struct dc_stream_update stream_update; 8056 } *bundle; 8057 8058 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 8059 8060 if (!bundle) { 8061 dm_error("Failed to allocate update bundle\n"); 8062 goto cleanup; 8063 } 8064 8065 /* 8066 * Disable the cursor first if we're disabling all the planes. 8067 * It'll remain on the screen after the planes are re-enabled 8068 * if we don't. 8069 */ 8070 if (acrtc_state->active_planes == 0) 8071 amdgpu_dm_commit_cursors(state); 8072 8073 /* update planes when needed */ 8074 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 8075 struct drm_crtc *crtc = new_plane_state->crtc; 8076 struct drm_crtc_state *new_crtc_state; 8077 struct drm_framebuffer *fb = new_plane_state->fb; 8078 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; 8079 bool plane_needs_flip; 8080 struct dc_plane_state *dc_plane; 8081 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 8082 8083 /* Cursor plane is handled after stream updates */ 8084 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 8085 if ((fb && crtc == pcrtc) || 8086 (old_plane_state->fb && old_plane_state->crtc == pcrtc)) 8087 cursor_update = true; 8088 8089 continue; 8090 } 8091 8092 if (!fb || !crtc || pcrtc != crtc) 8093 continue; 8094 8095 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 8096 if (!new_crtc_state->active) 8097 continue; 8098 8099 dc_plane = dm_new_plane_state->dc_state; 8100 if (!dc_plane) 8101 continue; 8102 8103 bundle->surface_updates[planes_count].surface = dc_plane; 8104 if (new_pcrtc_state->color_mgmt_changed) { 8105 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; 8106 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; 8107 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; 8108 } 8109 8110 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, 8111 &bundle->scaling_infos[planes_count]); 8112 8113 bundle->surface_updates[planes_count].scaling_info = 8114 &bundle->scaling_infos[planes_count]; 8115 8116 plane_needs_flip = old_plane_state->fb && new_plane_state->fb; 8117 8118 pflip_present = pflip_present || plane_needs_flip; 8119 8120 if (!plane_needs_flip) { 8121 planes_count += 1; 8122 continue; 8123 } 8124 8125 fill_dc_plane_info_and_addr( 8126 dm->adev, new_plane_state, 8127 afb->tiling_flags, 8128 &bundle->plane_infos[planes_count], 8129 &bundle->flip_addrs[planes_count].address, 8130 afb->tmz_surface, false); 8131 8132 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", 8133 new_plane_state->plane->index, 8134 bundle->plane_infos[planes_count].dcc.enable); 8135 8136 bundle->surface_updates[planes_count].plane_info = 8137 &bundle->plane_infos[planes_count]; 8138 8139 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled || 8140 acrtc_state->stream->link->replay_settings.replay_feature_enabled) { 8141 fill_dc_dirty_rects(plane, old_plane_state, 8142 new_plane_state, new_crtc_state, 8143 &bundle->flip_addrs[planes_count], 8144 &dirty_rects_changed); 8145 8146 /* 8147 * If the dirty regions changed, PSR-SU need to be disabled temporarily 8148 * and enabled it again after dirty regions are stable to avoid video glitch. 8149 * PSR-SU will be enabled in vblank_control_worker() if user pause the video 8150 * during the PSR-SU was disabled. 8151 */ 8152 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 8153 acrtc_attach->dm_irq_params.allow_psr_entry && 8154 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 8155 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 8156 #endif 8157 dirty_rects_changed) { 8158 mutex_lock(&dm->dc_lock); 8159 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = 8160 timestamp_ns; 8161 if (acrtc_state->stream->link->psr_settings.psr_allow_active) 8162 amdgpu_dm_psr_disable(acrtc_state->stream); 8163 mutex_unlock(&dm->dc_lock); 8164 } 8165 } 8166 8167 /* 8168 * Only allow immediate flips for fast updates that don't 8169 * change memory domain, FB pitch, DCC state, rotation or 8170 * mirroring. 8171 * 8172 * dm_crtc_helper_atomic_check() only accepts async flips with 8173 * fast updates. 8174 */ 8175 if (crtc->state->async_flip && 8176 (acrtc_state->update_type != UPDATE_TYPE_FAST || 8177 get_mem_type(old_plane_state->fb) != get_mem_type(fb))) 8178 drm_warn_once(state->dev, 8179 "[PLANE:%d:%s] async flip with non-fast update\n", 8180 plane->base.id, plane->name); 8181 8182 bundle->flip_addrs[planes_count].flip_immediate = 8183 crtc->state->async_flip && 8184 acrtc_state->update_type == UPDATE_TYPE_FAST && 8185 get_mem_type(old_plane_state->fb) == get_mem_type(fb); 8186 8187 timestamp_ns = ktime_get_ns(); 8188 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 8189 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; 8190 bundle->surface_updates[planes_count].surface = dc_plane; 8191 8192 if (!bundle->surface_updates[planes_count].surface) { 8193 DRM_ERROR("No surface for CRTC: id=%d\n", 8194 acrtc_attach->crtc_id); 8195 continue; 8196 } 8197 8198 if (plane == pcrtc->primary) 8199 update_freesync_state_on_stream( 8200 dm, 8201 acrtc_state, 8202 acrtc_state->stream, 8203 dc_plane, 8204 bundle->flip_addrs[planes_count].flip_timestamp_in_us); 8205 8206 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n", 8207 __func__, 8208 bundle->flip_addrs[planes_count].address.grph.addr.high_part, 8209 bundle->flip_addrs[planes_count].address.grph.addr.low_part); 8210 8211 planes_count += 1; 8212 8213 } 8214 8215 if (pflip_present) { 8216 if (!vrr_active) { 8217 /* Use old throttling in non-vrr fixed refresh rate mode 8218 * to keep flip scheduling based on target vblank counts 8219 * working in a backwards compatible way, e.g., for 8220 * clients using the GLX_OML_sync_control extension or 8221 * DRI3/Present extension with defined target_msc. 8222 */ 8223 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); 8224 } else { 8225 /* For variable refresh rate mode only: 8226 * Get vblank of last completed flip to avoid > 1 vrr 8227 * flips per video frame by use of throttling, but allow 8228 * flip programming anywhere in the possibly large 8229 * variable vrr vblank interval for fine-grained flip 8230 * timing control and more opportunity to avoid stutter 8231 * on late submission of flips. 8232 */ 8233 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8234 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; 8235 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8236 } 8237 8238 target_vblank = last_flip_vblank + wait_for_vblank; 8239 8240 /* 8241 * Wait until we're out of the vertical blank period before the one 8242 * targeted by the flip 8243 */ 8244 while ((acrtc_attach->enabled && 8245 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 8246 0, &vpos, &hpos, NULL, 8247 NULL, &pcrtc->hwmode) 8248 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 8249 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 8250 (int)(target_vblank - 8251 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { 8252 usleep_range(1000, 1100); 8253 } 8254 8255 /** 8256 * Prepare the flip event for the pageflip interrupt to handle. 8257 * 8258 * This only works in the case where we've already turned on the 8259 * appropriate hardware blocks (eg. HUBP) so in the transition case 8260 * from 0 -> n planes we have to skip a hardware generated event 8261 * and rely on sending it from software. 8262 */ 8263 if (acrtc_attach->base.state->event && 8264 acrtc_state->active_planes > 0) { 8265 drm_crtc_vblank_get(pcrtc); 8266 8267 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8268 8269 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); 8270 prepare_flip_isr(acrtc_attach); 8271 8272 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8273 } 8274 8275 if (acrtc_state->stream) { 8276 if (acrtc_state->freesync_vrr_info_changed) 8277 bundle->stream_update.vrr_infopacket = 8278 &acrtc_state->stream->vrr_infopacket; 8279 } 8280 } else if (cursor_update && acrtc_state->active_planes > 0 && 8281 acrtc_attach->base.state->event) { 8282 drm_crtc_vblank_get(pcrtc); 8283 8284 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8285 8286 acrtc_attach->event = acrtc_attach->base.state->event; 8287 acrtc_attach->base.state->event = NULL; 8288 8289 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8290 } 8291 8292 /* Update the planes if changed or disable if we don't have any. */ 8293 if ((planes_count || acrtc_state->active_planes == 0) && 8294 acrtc_state->stream) { 8295 /* 8296 * If PSR or idle optimizations are enabled then flush out 8297 * any pending work before hardware programming. 8298 */ 8299 if (dm->vblank_control_workqueue) 8300 flush_workqueue(dm->vblank_control_workqueue); 8301 8302 bundle->stream_update.stream = acrtc_state->stream; 8303 if (new_pcrtc_state->mode_changed) { 8304 bundle->stream_update.src = acrtc_state->stream->src; 8305 bundle->stream_update.dst = acrtc_state->stream->dst; 8306 } 8307 8308 if (new_pcrtc_state->color_mgmt_changed) { 8309 /* 8310 * TODO: This isn't fully correct since we've actually 8311 * already modified the stream in place. 8312 */ 8313 bundle->stream_update.gamut_remap = 8314 &acrtc_state->stream->gamut_remap_matrix; 8315 bundle->stream_update.output_csc_transform = 8316 &acrtc_state->stream->csc_color_matrix; 8317 bundle->stream_update.out_transfer_func = 8318 acrtc_state->stream->out_transfer_func; 8319 } 8320 8321 acrtc_state->stream->abm_level = acrtc_state->abm_level; 8322 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) 8323 bundle->stream_update.abm_level = &acrtc_state->abm_level; 8324 8325 mutex_lock(&dm->dc_lock); 8326 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 8327 acrtc_state->stream->link->psr_settings.psr_allow_active) 8328 amdgpu_dm_psr_disable(acrtc_state->stream); 8329 mutex_unlock(&dm->dc_lock); 8330 8331 /* 8332 * If FreeSync state on the stream has changed then we need to 8333 * re-adjust the min/max bounds now that DC doesn't handle this 8334 * as part of commit. 8335 */ 8336 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { 8337 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8338 dc_stream_adjust_vmin_vmax( 8339 dm->dc, acrtc_state->stream, 8340 &acrtc_attach->dm_irq_params.vrr_params.adjust); 8341 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8342 } 8343 mutex_lock(&dm->dc_lock); 8344 update_planes_and_stream_adapter(dm->dc, 8345 acrtc_state->update_type, 8346 planes_count, 8347 acrtc_state->stream, 8348 &bundle->stream_update, 8349 bundle->surface_updates); 8350 8351 /** 8352 * Enable or disable the interrupts on the backend. 8353 * 8354 * Most pipes are put into power gating when unused. 8355 * 8356 * When power gating is enabled on a pipe we lose the 8357 * interrupt enablement state when power gating is disabled. 8358 * 8359 * So we need to update the IRQ control state in hardware 8360 * whenever the pipe turns on (since it could be previously 8361 * power gated) or off (since some pipes can't be power gated 8362 * on some ASICs). 8363 */ 8364 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) 8365 dm_update_pflip_irq_state(drm_to_adev(dev), 8366 acrtc_attach); 8367 8368 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 8369 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && 8370 !acrtc_state->stream->link->psr_settings.psr_feature_enabled) 8371 amdgpu_dm_link_setup_psr(acrtc_state->stream); 8372 8373 /* Decrement skip count when PSR is enabled and we're doing fast updates. */ 8374 if (acrtc_state->update_type == UPDATE_TYPE_FAST && 8375 acrtc_state->stream->link->psr_settings.psr_feature_enabled) { 8376 struct amdgpu_dm_connector *aconn = 8377 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 8378 8379 if (aconn->psr_skip_count > 0) 8380 aconn->psr_skip_count--; 8381 8382 /* Allow PSR when skip count is 0. */ 8383 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; 8384 8385 /* 8386 * If sink supports PSR SU, there is no need to rely on 8387 * a vblank event disable request to enable PSR. PSR SU 8388 * can be enabled immediately once OS demonstrates an 8389 * adequate number of fast atomic commits to notify KMD 8390 * of update events. See `vblank_control_worker()`. 8391 */ 8392 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 8393 acrtc_attach->dm_irq_params.allow_psr_entry && 8394 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 8395 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && 8396 #endif 8397 !acrtc_state->stream->link->psr_settings.psr_allow_active && 8398 (timestamp_ns - 8399 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > 8400 500000000) 8401 amdgpu_dm_psr_enable(acrtc_state->stream); 8402 } else { 8403 acrtc_attach->dm_irq_params.allow_psr_entry = false; 8404 } 8405 8406 mutex_unlock(&dm->dc_lock); 8407 } 8408 8409 /* 8410 * Update cursor state *after* programming all the planes. 8411 * This avoids redundant programming in the case where we're going 8412 * to be disabling a single plane - those pipes are being disabled. 8413 */ 8414 if (acrtc_state->active_planes) 8415 amdgpu_dm_commit_cursors(state); 8416 8417 cleanup: 8418 kfree(bundle); 8419 } 8420 8421 static void amdgpu_dm_commit_audio(struct drm_device *dev, 8422 struct drm_atomic_state *state) 8423 { 8424 struct amdgpu_device *adev = drm_to_adev(dev); 8425 struct amdgpu_dm_connector *aconnector; 8426 struct drm_connector *connector; 8427 struct drm_connector_state *old_con_state, *new_con_state; 8428 struct drm_crtc_state *new_crtc_state; 8429 struct dm_crtc_state *new_dm_crtc_state; 8430 const struct dc_stream_status *status; 8431 int i, inst; 8432 8433 /* Notify device removals. */ 8434 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8435 if (old_con_state->crtc != new_con_state->crtc) { 8436 /* CRTC changes require notification. */ 8437 goto notify; 8438 } 8439 8440 if (!new_con_state->crtc) 8441 continue; 8442 8443 new_crtc_state = drm_atomic_get_new_crtc_state( 8444 state, new_con_state->crtc); 8445 8446 if (!new_crtc_state) 8447 continue; 8448 8449 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 8450 continue; 8451 8452 notify: 8453 aconnector = to_amdgpu_dm_connector(connector); 8454 8455 mutex_lock(&adev->dm.audio_lock); 8456 inst = aconnector->audio_inst; 8457 aconnector->audio_inst = -1; 8458 mutex_unlock(&adev->dm.audio_lock); 8459 8460 amdgpu_dm_audio_eld_notify(adev, inst); 8461 } 8462 8463 /* Notify audio device additions. */ 8464 for_each_new_connector_in_state(state, connector, new_con_state, i) { 8465 if (!new_con_state->crtc) 8466 continue; 8467 8468 new_crtc_state = drm_atomic_get_new_crtc_state( 8469 state, new_con_state->crtc); 8470 8471 if (!new_crtc_state) 8472 continue; 8473 8474 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 8475 continue; 8476 8477 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 8478 if (!new_dm_crtc_state->stream) 8479 continue; 8480 8481 status = dc_stream_get_status(new_dm_crtc_state->stream); 8482 if (!status) 8483 continue; 8484 8485 aconnector = to_amdgpu_dm_connector(connector); 8486 8487 mutex_lock(&adev->dm.audio_lock); 8488 inst = status->audio_inst; 8489 aconnector->audio_inst = inst; 8490 mutex_unlock(&adev->dm.audio_lock); 8491 8492 amdgpu_dm_audio_eld_notify(adev, inst); 8493 } 8494 } 8495 8496 /* 8497 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 8498 * @crtc_state: the DRM CRTC state 8499 * @stream_state: the DC stream state. 8500 * 8501 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 8502 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 8503 */ 8504 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 8505 struct dc_stream_state *stream_state) 8506 { 8507 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 8508 } 8509 8510 static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, 8511 struct dc_state *dc_state) 8512 { 8513 struct drm_device *dev = state->dev; 8514 struct amdgpu_device *adev = drm_to_adev(dev); 8515 struct amdgpu_display_manager *dm = &adev->dm; 8516 struct drm_crtc *crtc; 8517 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 8518 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 8519 bool mode_set_reset_required = false; 8520 u32 i; 8521 8522 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 8523 new_crtc_state, i) { 8524 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8525 8526 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8527 8528 if (old_crtc_state->active && 8529 (!new_crtc_state->active || 8530 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 8531 manage_dm_interrupts(adev, acrtc, false); 8532 dc_stream_release(dm_old_crtc_state->stream); 8533 } 8534 } 8535 8536 drm_atomic_helper_calc_timestamping_constants(state); 8537 8538 /* update changed items */ 8539 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 8540 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8541 8542 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8543 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8544 8545 drm_dbg_state(state->dev, 8546 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 8547 acrtc->crtc_id, 8548 new_crtc_state->enable, 8549 new_crtc_state->active, 8550 new_crtc_state->planes_changed, 8551 new_crtc_state->mode_changed, 8552 new_crtc_state->active_changed, 8553 new_crtc_state->connectors_changed); 8554 8555 /* Disable cursor if disabling crtc */ 8556 if (old_crtc_state->active && !new_crtc_state->active) { 8557 struct dc_cursor_position position; 8558 8559 memset(&position, 0, sizeof(position)); 8560 mutex_lock(&dm->dc_lock); 8561 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position); 8562 mutex_unlock(&dm->dc_lock); 8563 } 8564 8565 /* Copy all transient state flags into dc state */ 8566 if (dm_new_crtc_state->stream) { 8567 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 8568 dm_new_crtc_state->stream); 8569 } 8570 8571 /* handles headless hotplug case, updating new_state and 8572 * aconnector as needed 8573 */ 8574 8575 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 8576 8577 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); 8578 8579 if (!dm_new_crtc_state->stream) { 8580 /* 8581 * this could happen because of issues with 8582 * userspace notifications delivery. 8583 * In this case userspace tries to set mode on 8584 * display which is disconnected in fact. 8585 * dc_sink is NULL in this case on aconnector. 8586 * We expect reset mode will come soon. 8587 * 8588 * This can also happen when unplug is done 8589 * during resume sequence ended 8590 * 8591 * In this case, we want to pretend we still 8592 * have a sink to keep the pipe running so that 8593 * hw state is consistent with the sw state 8594 */ 8595 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 8596 __func__, acrtc->base.base.id); 8597 continue; 8598 } 8599 8600 if (dm_old_crtc_state->stream) 8601 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 8602 8603 pm_runtime_get_noresume(dev->dev); 8604 8605 acrtc->enabled = true; 8606 acrtc->hw_mode = new_crtc_state->mode; 8607 crtc->hwmode = new_crtc_state->mode; 8608 mode_set_reset_required = true; 8609 } else if (modereset_required(new_crtc_state)) { 8610 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); 8611 /* i.e. reset mode */ 8612 if (dm_old_crtc_state->stream) 8613 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 8614 8615 mode_set_reset_required = true; 8616 } 8617 } /* for_each_crtc_in_state() */ 8618 8619 /* if there mode set or reset, disable eDP PSR */ 8620 if (mode_set_reset_required) { 8621 if (dm->vblank_control_workqueue) 8622 flush_workqueue(dm->vblank_control_workqueue); 8623 8624 amdgpu_dm_psr_disable_all(dm); 8625 } 8626 8627 dm_enable_per_frame_crtc_master_sync(dc_state); 8628 mutex_lock(&dm->dc_lock); 8629 WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); 8630 8631 /* Allow idle optimization when vblank count is 0 for display off */ 8632 if (dm->active_vblank_irq_count == 0) 8633 dc_allow_idle_optimizations(dm->dc, true); 8634 mutex_unlock(&dm->dc_lock); 8635 8636 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 8637 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8638 8639 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8640 8641 if (dm_new_crtc_state->stream != NULL) { 8642 const struct dc_stream_status *status = 8643 dc_stream_get_status(dm_new_crtc_state->stream); 8644 8645 if (!status) 8646 status = dc_stream_get_status_from_state(dc_state, 8647 dm_new_crtc_state->stream); 8648 if (!status) 8649 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); 8650 else 8651 acrtc->otg_inst = status->primary_otg_inst; 8652 } 8653 } 8654 } 8655 8656 /** 8657 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 8658 * @state: The atomic state to commit 8659 * 8660 * This will tell DC to commit the constructed DC state from atomic_check, 8661 * programming the hardware. Any failures here implies a hardware failure, since 8662 * atomic check should have filtered anything non-kosher. 8663 */ 8664 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 8665 { 8666 struct drm_device *dev = state->dev; 8667 struct amdgpu_device *adev = drm_to_adev(dev); 8668 struct amdgpu_display_manager *dm = &adev->dm; 8669 struct dm_atomic_state *dm_state; 8670 struct dc_state *dc_state = NULL; 8671 u32 i, j; 8672 struct drm_crtc *crtc; 8673 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 8674 unsigned long flags; 8675 bool wait_for_vblank = true; 8676 struct drm_connector *connector; 8677 struct drm_connector_state *old_con_state, *new_con_state; 8678 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 8679 int crtc_disable_count = 0; 8680 8681 trace_amdgpu_dm_atomic_commit_tail_begin(state); 8682 8683 drm_atomic_helper_update_legacy_modeset_state(dev, state); 8684 drm_dp_mst_atomic_wait_for_dependencies(state); 8685 8686 dm_state = dm_atomic_get_new_state(state); 8687 if (dm_state && dm_state->context) { 8688 dc_state = dm_state->context; 8689 amdgpu_dm_commit_streams(state, dc_state); 8690 } 8691 8692 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8693 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 8694 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 8695 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8696 8697 if (!adev->dm.hdcp_workqueue) 8698 continue; 8699 8700 pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); 8701 8702 if (!connector) 8703 continue; 8704 8705 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", 8706 connector->index, connector->status, connector->dpms); 8707 pr_debug("[HDCP_DM] state protection old: %x new: %x\n", 8708 old_con_state->content_protection, new_con_state->content_protection); 8709 8710 if (aconnector->dc_sink) { 8711 if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && 8712 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { 8713 pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", 8714 aconnector->dc_sink->edid_caps.display_name); 8715 } 8716 } 8717 8718 new_crtc_state = NULL; 8719 old_crtc_state = NULL; 8720 8721 if (acrtc) { 8722 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 8723 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 8724 } 8725 8726 if (old_crtc_state) 8727 pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8728 old_crtc_state->enable, 8729 old_crtc_state->active, 8730 old_crtc_state->mode_changed, 8731 old_crtc_state->active_changed, 8732 old_crtc_state->connectors_changed); 8733 8734 if (new_crtc_state) 8735 pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", 8736 new_crtc_state->enable, 8737 new_crtc_state->active, 8738 new_crtc_state->mode_changed, 8739 new_crtc_state->active_changed, 8740 new_crtc_state->connectors_changed); 8741 } 8742 8743 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8744 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 8745 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 8746 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8747 8748 if (!adev->dm.hdcp_workqueue) 8749 continue; 8750 8751 new_crtc_state = NULL; 8752 old_crtc_state = NULL; 8753 8754 if (acrtc) { 8755 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 8756 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 8757 } 8758 8759 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8760 8761 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && 8762 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 8763 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 8764 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8765 dm_new_con_state->update_hdcp = true; 8766 continue; 8767 } 8768 8769 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, 8770 old_con_state, connector, adev->dm.hdcp_workqueue)) { 8771 /* when display is unplugged from mst hub, connctor will 8772 * be destroyed within dm_dp_mst_connector_destroy. connector 8773 * hdcp perperties, like type, undesired, desired, enabled, 8774 * will be lost. So, save hdcp properties into hdcp_work within 8775 * amdgpu_dm_atomic_commit_tail. if the same display is 8776 * plugged back with same display index, its hdcp properties 8777 * will be retrieved from hdcp_work within dm_dp_mst_get_modes 8778 */ 8779 8780 bool enable_encryption = false; 8781 8782 if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) 8783 enable_encryption = true; 8784 8785 if (aconnector->dc_link && aconnector->dc_sink && 8786 aconnector->dc_link->type == dc_connection_mst_branch) { 8787 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; 8788 struct hdcp_workqueue *hdcp_w = 8789 &hdcp_work[aconnector->dc_link->link_index]; 8790 8791 hdcp_w->hdcp_content_type[connector->index] = 8792 new_con_state->hdcp_content_type; 8793 hdcp_w->content_protection[connector->index] = 8794 new_con_state->content_protection; 8795 } 8796 8797 if (new_crtc_state && new_crtc_state->mode_changed && 8798 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) 8799 enable_encryption = true; 8800 8801 DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); 8802 8803 hdcp_update_display( 8804 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, 8805 new_con_state->hdcp_content_type, enable_encryption); 8806 } 8807 } 8808 8809 /* Handle connector state changes */ 8810 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 8811 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 8812 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 8813 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 8814 struct dc_surface_update *dummy_updates; 8815 struct dc_stream_update stream_update; 8816 struct dc_info_packet hdr_packet; 8817 struct dc_stream_status *status = NULL; 8818 bool abm_changed, hdr_changed, scaling_changed; 8819 8820 memset(&stream_update, 0, sizeof(stream_update)); 8821 8822 if (acrtc) { 8823 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 8824 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 8825 } 8826 8827 /* Skip any modesets/resets */ 8828 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 8829 continue; 8830 8831 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8832 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8833 8834 scaling_changed = is_scaling_state_different(dm_new_con_state, 8835 dm_old_con_state); 8836 8837 abm_changed = dm_new_crtc_state->abm_level != 8838 dm_old_crtc_state->abm_level; 8839 8840 hdr_changed = 8841 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); 8842 8843 if (!scaling_changed && !abm_changed && !hdr_changed) 8844 continue; 8845 8846 stream_update.stream = dm_new_crtc_state->stream; 8847 if (scaling_changed) { 8848 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 8849 dm_new_con_state, dm_new_crtc_state->stream); 8850 8851 stream_update.src = dm_new_crtc_state->stream->src; 8852 stream_update.dst = dm_new_crtc_state->stream->dst; 8853 } 8854 8855 if (abm_changed) { 8856 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 8857 8858 stream_update.abm_level = &dm_new_crtc_state->abm_level; 8859 } 8860 8861 if (hdr_changed) { 8862 fill_hdr_info_packet(new_con_state, &hdr_packet); 8863 stream_update.hdr_static_metadata = &hdr_packet; 8864 } 8865 8866 status = dc_stream_get_status(dm_new_crtc_state->stream); 8867 8868 if (WARN_ON(!status)) 8869 continue; 8870 8871 WARN_ON(!status->plane_count); 8872 8873 /* 8874 * TODO: DC refuses to perform stream updates without a dc_surface_update. 8875 * Here we create an empty update on each plane. 8876 * To fix this, DC should permit updating only stream properties. 8877 */ 8878 dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); 8879 for (j = 0; j < status->plane_count; j++) 8880 dummy_updates[j].surface = status->plane_states[0]; 8881 8882 8883 mutex_lock(&dm->dc_lock); 8884 dc_update_planes_and_stream(dm->dc, 8885 dummy_updates, 8886 status->plane_count, 8887 dm_new_crtc_state->stream, 8888 &stream_update); 8889 mutex_unlock(&dm->dc_lock); 8890 kfree(dummy_updates); 8891 } 8892 8893 /** 8894 * Enable interrupts for CRTCs that are newly enabled or went through 8895 * a modeset. It was intentionally deferred until after the front end 8896 * state was modified to wait until the OTG was on and so the IRQ 8897 * handlers didn't access stale or invalid state. 8898 */ 8899 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 8900 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8901 #ifdef CONFIG_DEBUG_FS 8902 enum amdgpu_dm_pipe_crc_source cur_crc_src; 8903 #endif 8904 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 8905 if (old_crtc_state->active && !new_crtc_state->active) 8906 crtc_disable_count++; 8907 8908 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8909 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8910 8911 /* For freesync config update on crtc state and params for irq */ 8912 update_stream_irq_parameters(dm, dm_new_crtc_state); 8913 8914 #ifdef CONFIG_DEBUG_FS 8915 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8916 cur_crc_src = acrtc->dm_irq_params.crc_src; 8917 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8918 #endif 8919 8920 if (new_crtc_state->active && 8921 (!old_crtc_state->active || 8922 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 8923 dc_stream_retain(dm_new_crtc_state->stream); 8924 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; 8925 manage_dm_interrupts(adev, acrtc, true); 8926 } 8927 /* Handle vrr on->off / off->on transitions */ 8928 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); 8929 8930 #ifdef CONFIG_DEBUG_FS 8931 if (new_crtc_state->active && 8932 (!old_crtc_state->active || 8933 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 8934 /** 8935 * Frontend may have changed so reapply the CRC capture 8936 * settings for the stream. 8937 */ 8938 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { 8939 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8940 if (amdgpu_dm_crc_window_is_activated(crtc)) { 8941 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8942 acrtc->dm_irq_params.window_param.update_win = true; 8943 8944 /** 8945 * It takes 2 frames for HW to stably generate CRC when 8946 * resuming from suspend, so we set skip_frame_cnt 2. 8947 */ 8948 acrtc->dm_irq_params.window_param.skip_frame_cnt = 2; 8949 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8950 } 8951 #endif 8952 if (amdgpu_dm_crtc_configure_crc_source( 8953 crtc, dm_new_crtc_state, cur_crc_src)) 8954 DRM_DEBUG_DRIVER("Failed to configure crc source"); 8955 } 8956 } 8957 #endif 8958 } 8959 8960 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 8961 if (new_crtc_state->async_flip) 8962 wait_for_vblank = false; 8963 8964 /* update planes when needed per crtc*/ 8965 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 8966 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8967 8968 if (dm_new_crtc_state->stream) 8969 amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); 8970 } 8971 8972 /* Update audio instances for each connector. */ 8973 amdgpu_dm_commit_audio(dev, state); 8974 8975 /* restore the backlight level */ 8976 for (i = 0; i < dm->num_of_edps; i++) { 8977 if (dm->backlight_dev[i] && 8978 (dm->actual_brightness[i] != dm->brightness[i])) 8979 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 8980 } 8981 8982 /* 8983 * send vblank event on all events not handled in flip and 8984 * mark consumed event for drm_atomic_helper_commit_hw_done 8985 */ 8986 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8987 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 8988 8989 if (new_crtc_state->event) 8990 drm_send_event_locked(dev, &new_crtc_state->event->base); 8991 8992 new_crtc_state->event = NULL; 8993 } 8994 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8995 8996 /* Signal HW programming completion */ 8997 drm_atomic_helper_commit_hw_done(state); 8998 8999 if (wait_for_vblank) 9000 drm_atomic_helper_wait_for_flip_done(dev, state); 9001 9002 drm_atomic_helper_cleanup_planes(dev, state); 9003 9004 /* Don't free the memory if we are hitting this as part of suspend. 9005 * This way we don't free any memory during suspend; see 9006 * amdgpu_bo_free_kernel(). The memory will be freed in the first 9007 * non-suspend modeset or when the driver is torn down. 9008 */ 9009 if (!adev->in_suspend) { 9010 /* return the stolen vga memory back to VRAM */ 9011 if (!adev->mman.keep_stolen_vga_memory) 9012 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 9013 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 9014 } 9015 9016 /* 9017 * Finally, drop a runtime PM reference for each newly disabled CRTC, 9018 * so we can put the GPU into runtime suspend if we're not driving any 9019 * displays anymore 9020 */ 9021 for (i = 0; i < crtc_disable_count; i++) 9022 pm_runtime_put_autosuspend(dev->dev); 9023 pm_runtime_mark_last_busy(dev->dev); 9024 } 9025 9026 static int dm_force_atomic_commit(struct drm_connector *connector) 9027 { 9028 int ret = 0; 9029 struct drm_device *ddev = connector->dev; 9030 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 9031 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 9032 struct drm_plane *plane = disconnected_acrtc->base.primary; 9033 struct drm_connector_state *conn_state; 9034 struct drm_crtc_state *crtc_state; 9035 struct drm_plane_state *plane_state; 9036 9037 if (!state) 9038 return -ENOMEM; 9039 9040 state->acquire_ctx = ddev->mode_config.acquire_ctx; 9041 9042 /* Construct an atomic state to restore previous display setting */ 9043 9044 /* 9045 * Attach connectors to drm_atomic_state 9046 */ 9047 conn_state = drm_atomic_get_connector_state(state, connector); 9048 9049 ret = PTR_ERR_OR_ZERO(conn_state); 9050 if (ret) 9051 goto out; 9052 9053 /* Attach crtc to drm_atomic_state*/ 9054 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 9055 9056 ret = PTR_ERR_OR_ZERO(crtc_state); 9057 if (ret) 9058 goto out; 9059 9060 /* force a restore */ 9061 crtc_state->mode_changed = true; 9062 9063 /* Attach plane to drm_atomic_state */ 9064 plane_state = drm_atomic_get_plane_state(state, plane); 9065 9066 ret = PTR_ERR_OR_ZERO(plane_state); 9067 if (ret) 9068 goto out; 9069 9070 /* Call commit internally with the state we just constructed */ 9071 ret = drm_atomic_commit(state); 9072 9073 out: 9074 drm_atomic_state_put(state); 9075 if (ret) 9076 DRM_ERROR("Restoring old state failed with %i\n", ret); 9077 9078 return ret; 9079 } 9080 9081 /* 9082 * This function handles all cases when set mode does not come upon hotplug. 9083 * This includes when a display is unplugged then plugged back into the 9084 * same port and when running without usermode desktop manager supprot 9085 */ 9086 void dm_restore_drm_connector_state(struct drm_device *dev, 9087 struct drm_connector *connector) 9088 { 9089 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9090 struct amdgpu_crtc *disconnected_acrtc; 9091 struct dm_crtc_state *acrtc_state; 9092 9093 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 9094 return; 9095 9096 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 9097 if (!disconnected_acrtc) 9098 return; 9099 9100 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 9101 if (!acrtc_state->stream) 9102 return; 9103 9104 /* 9105 * If the previous sink is not released and different from the current, 9106 * we deduce we are in a state where we can not rely on usermode call 9107 * to turn on the display, so we do it here 9108 */ 9109 if (acrtc_state->stream->sink != aconnector->dc_sink) 9110 dm_force_atomic_commit(&aconnector->base); 9111 } 9112 9113 /* 9114 * Grabs all modesetting locks to serialize against any blocking commits, 9115 * Waits for completion of all non blocking commits. 9116 */ 9117 static int do_aquire_global_lock(struct drm_device *dev, 9118 struct drm_atomic_state *state) 9119 { 9120 struct drm_crtc *crtc; 9121 struct drm_crtc_commit *commit; 9122 long ret; 9123 9124 /* 9125 * Adding all modeset locks to aquire_ctx will 9126 * ensure that when the framework release it the 9127 * extra locks we are locking here will get released to 9128 */ 9129 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 9130 if (ret) 9131 return ret; 9132 9133 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 9134 spin_lock(&crtc->commit_lock); 9135 commit = list_first_entry_or_null(&crtc->commit_list, 9136 struct drm_crtc_commit, commit_entry); 9137 if (commit) 9138 drm_crtc_commit_get(commit); 9139 spin_unlock(&crtc->commit_lock); 9140 9141 if (!commit) 9142 continue; 9143 9144 /* 9145 * Make sure all pending HW programming completed and 9146 * page flips done 9147 */ 9148 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 9149 9150 if (ret > 0) 9151 ret = wait_for_completion_interruptible_timeout( 9152 &commit->flip_done, 10*HZ); 9153 9154 if (ret == 0) 9155 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", 9156 crtc->base.id, crtc->name); 9157 9158 drm_crtc_commit_put(commit); 9159 } 9160 9161 return ret < 0 ? ret : 0; 9162 } 9163 9164 static void get_freesync_config_for_crtc( 9165 struct dm_crtc_state *new_crtc_state, 9166 struct dm_connector_state *new_con_state) 9167 { 9168 struct mod_freesync_config config = {0}; 9169 struct amdgpu_dm_connector *aconnector = 9170 to_amdgpu_dm_connector(new_con_state->base.connector); 9171 struct drm_display_mode *mode = &new_crtc_state->base.mode; 9172 int vrefresh = drm_mode_vrefresh(mode); 9173 bool fs_vid_mode = false; 9174 9175 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 9176 vrefresh >= aconnector->min_vfreq && 9177 vrefresh <= aconnector->max_vfreq; 9178 9179 if (new_crtc_state->vrr_supported) { 9180 new_crtc_state->stream->ignore_msa_timing_param = true; 9181 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 9182 9183 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; 9184 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; 9185 config.vsif_supported = true; 9186 config.btr = true; 9187 9188 if (fs_vid_mode) { 9189 config.state = VRR_STATE_ACTIVE_FIXED; 9190 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; 9191 goto out; 9192 } else if (new_crtc_state->base.vrr_enabled) { 9193 config.state = VRR_STATE_ACTIVE_VARIABLE; 9194 } else { 9195 config.state = VRR_STATE_INACTIVE; 9196 } 9197 } 9198 out: 9199 new_crtc_state->freesync_config = config; 9200 } 9201 9202 static void reset_freesync_config_for_crtc( 9203 struct dm_crtc_state *new_crtc_state) 9204 { 9205 new_crtc_state->vrr_supported = false; 9206 9207 memset(&new_crtc_state->vrr_infopacket, 0, 9208 sizeof(new_crtc_state->vrr_infopacket)); 9209 } 9210 9211 static bool 9212 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 9213 struct drm_crtc_state *new_crtc_state) 9214 { 9215 const struct drm_display_mode *old_mode, *new_mode; 9216 9217 if (!old_crtc_state || !new_crtc_state) 9218 return false; 9219 9220 old_mode = &old_crtc_state->mode; 9221 new_mode = &new_crtc_state->mode; 9222 9223 if (old_mode->clock == new_mode->clock && 9224 old_mode->hdisplay == new_mode->hdisplay && 9225 old_mode->vdisplay == new_mode->vdisplay && 9226 old_mode->htotal == new_mode->htotal && 9227 old_mode->vtotal != new_mode->vtotal && 9228 old_mode->hsync_start == new_mode->hsync_start && 9229 old_mode->vsync_start != new_mode->vsync_start && 9230 old_mode->hsync_end == new_mode->hsync_end && 9231 old_mode->vsync_end != new_mode->vsync_end && 9232 old_mode->hskew == new_mode->hskew && 9233 old_mode->vscan == new_mode->vscan && 9234 (old_mode->vsync_end - old_mode->vsync_start) == 9235 (new_mode->vsync_end - new_mode->vsync_start)) 9236 return true; 9237 9238 return false; 9239 } 9240 9241 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) 9242 { 9243 u64 num, den, res; 9244 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; 9245 9246 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; 9247 9248 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; 9249 den = (unsigned long long)new_crtc_state->mode.htotal * 9250 (unsigned long long)new_crtc_state->mode.vtotal; 9251 9252 res = div_u64(num, den); 9253 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; 9254 } 9255 9256 static int dm_update_crtc_state(struct amdgpu_display_manager *dm, 9257 struct drm_atomic_state *state, 9258 struct drm_crtc *crtc, 9259 struct drm_crtc_state *old_crtc_state, 9260 struct drm_crtc_state *new_crtc_state, 9261 bool enable, 9262 bool *lock_and_validation_needed) 9263 { 9264 struct dm_atomic_state *dm_state = NULL; 9265 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9266 struct dc_stream_state *new_stream; 9267 int ret = 0; 9268 9269 /* 9270 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 9271 * update changed items 9272 */ 9273 struct amdgpu_crtc *acrtc = NULL; 9274 struct amdgpu_dm_connector *aconnector = NULL; 9275 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 9276 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 9277 9278 new_stream = NULL; 9279 9280 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9281 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9282 acrtc = to_amdgpu_crtc(crtc); 9283 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 9284 9285 /* TODO This hack should go away */ 9286 if (aconnector && enable) { 9287 /* Make sure fake sink is created in plug-in scenario */ 9288 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 9289 &aconnector->base); 9290 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 9291 &aconnector->base); 9292 9293 if (IS_ERR(drm_new_conn_state)) { 9294 ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 9295 goto fail; 9296 } 9297 9298 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 9299 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 9300 9301 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9302 goto skip_modeset; 9303 9304 new_stream = create_validate_stream_for_sink(aconnector, 9305 &new_crtc_state->mode, 9306 dm_new_conn_state, 9307 dm_old_crtc_state->stream); 9308 9309 /* 9310 * we can have no stream on ACTION_SET if a display 9311 * was disconnected during S3, in this case it is not an 9312 * error, the OS will be updated after detection, and 9313 * will do the right thing on next atomic commit 9314 */ 9315 9316 if (!new_stream) { 9317 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 9318 __func__, acrtc->base.base.id); 9319 ret = -ENOMEM; 9320 goto fail; 9321 } 9322 9323 /* 9324 * TODO: Check VSDB bits to decide whether this should 9325 * be enabled or not. 9326 */ 9327 new_stream->triggered_crtc_reset.enabled = 9328 dm->force_timing_sync; 9329 9330 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 9331 9332 ret = fill_hdr_info_packet(drm_new_conn_state, 9333 &new_stream->hdr_static_metadata); 9334 if (ret) 9335 goto fail; 9336 9337 /* 9338 * If we already removed the old stream from the context 9339 * (and set the new stream to NULL) then we can't reuse 9340 * the old stream even if the stream and scaling are unchanged. 9341 * We'll hit the BUG_ON and black screen. 9342 * 9343 * TODO: Refactor this function to allow this check to work 9344 * in all conditions. 9345 */ 9346 if (dm_new_crtc_state->stream && 9347 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) 9348 goto skip_modeset; 9349 9350 if (dm_new_crtc_state->stream && 9351 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 9352 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 9353 new_crtc_state->mode_changed = false; 9354 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", 9355 new_crtc_state->mode_changed); 9356 } 9357 } 9358 9359 /* mode_changed flag may get updated above, need to check again */ 9360 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9361 goto skip_modeset; 9362 9363 drm_dbg_state(state->dev, 9364 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", 9365 acrtc->crtc_id, 9366 new_crtc_state->enable, 9367 new_crtc_state->active, 9368 new_crtc_state->planes_changed, 9369 new_crtc_state->mode_changed, 9370 new_crtc_state->active_changed, 9371 new_crtc_state->connectors_changed); 9372 9373 /* Remove stream for any changed/disabled CRTC */ 9374 if (!enable) { 9375 9376 if (!dm_old_crtc_state->stream) 9377 goto skip_modeset; 9378 9379 /* Unset freesync video if it was active before */ 9380 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { 9381 dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; 9382 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; 9383 } 9384 9385 /* Now check if we should set freesync video mode */ 9386 if (dm_new_crtc_state->stream && 9387 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 9388 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && 9389 is_timing_unchanged_for_freesync(new_crtc_state, 9390 old_crtc_state)) { 9391 new_crtc_state->mode_changed = false; 9392 DRM_DEBUG_DRIVER( 9393 "Mode change not required for front porch change, setting mode_changed to %d", 9394 new_crtc_state->mode_changed); 9395 9396 set_freesync_fixed_config(dm_new_crtc_state); 9397 9398 goto skip_modeset; 9399 } else if (aconnector && 9400 is_freesync_video_mode(&new_crtc_state->mode, 9401 aconnector)) { 9402 struct drm_display_mode *high_mode; 9403 9404 high_mode = get_highest_refresh_rate_mode(aconnector, false); 9405 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) 9406 set_freesync_fixed_config(dm_new_crtc_state); 9407 } 9408 9409 ret = dm_atomic_get_state(state, &dm_state); 9410 if (ret) 9411 goto fail; 9412 9413 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 9414 crtc->base.id); 9415 9416 /* i.e. reset mode */ 9417 if (dc_remove_stream_from_ctx( 9418 dm->dc, 9419 dm_state->context, 9420 dm_old_crtc_state->stream) != DC_OK) { 9421 ret = -EINVAL; 9422 goto fail; 9423 } 9424 9425 dc_stream_release(dm_old_crtc_state->stream); 9426 dm_new_crtc_state->stream = NULL; 9427 9428 reset_freesync_config_for_crtc(dm_new_crtc_state); 9429 9430 *lock_and_validation_needed = true; 9431 9432 } else {/* Add stream for any updated/enabled CRTC */ 9433 /* 9434 * Quick fix to prevent NULL pointer on new_stream when 9435 * added MST connectors not found in existing crtc_state in the chained mode 9436 * TODO: need to dig out the root cause of that 9437 */ 9438 if (!aconnector) 9439 goto skip_modeset; 9440 9441 if (modereset_required(new_crtc_state)) 9442 goto skip_modeset; 9443 9444 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream, 9445 dm_old_crtc_state->stream)) { 9446 9447 WARN_ON(dm_new_crtc_state->stream); 9448 9449 ret = dm_atomic_get_state(state, &dm_state); 9450 if (ret) 9451 goto fail; 9452 9453 dm_new_crtc_state->stream = new_stream; 9454 9455 dc_stream_retain(new_stream); 9456 9457 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", 9458 crtc->base.id); 9459 9460 if (dc_add_stream_to_ctx( 9461 dm->dc, 9462 dm_state->context, 9463 dm_new_crtc_state->stream) != DC_OK) { 9464 ret = -EINVAL; 9465 goto fail; 9466 } 9467 9468 *lock_and_validation_needed = true; 9469 } 9470 } 9471 9472 skip_modeset: 9473 /* Release extra reference */ 9474 if (new_stream) 9475 dc_stream_release(new_stream); 9476 9477 /* 9478 * We want to do dc stream updates that do not require a 9479 * full modeset below. 9480 */ 9481 if (!(enable && aconnector && new_crtc_state->active)) 9482 return 0; 9483 /* 9484 * Given above conditions, the dc state cannot be NULL because: 9485 * 1. We're in the process of enabling CRTCs (just been added 9486 * to the dc context, or already is on the context) 9487 * 2. Has a valid connector attached, and 9488 * 3. Is currently active and enabled. 9489 * => The dc stream state currently exists. 9490 */ 9491 BUG_ON(dm_new_crtc_state->stream == NULL); 9492 9493 /* Scaling or underscan settings */ 9494 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || 9495 drm_atomic_crtc_needs_modeset(new_crtc_state)) 9496 update_stream_scaling_settings( 9497 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 9498 9499 /* ABM settings */ 9500 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 9501 9502 /* 9503 * Color management settings. We also update color properties 9504 * when a modeset is needed, to ensure it gets reprogrammed. 9505 */ 9506 if (dm_new_crtc_state->base.color_mgmt_changed || 9507 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 9508 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); 9509 if (ret) 9510 goto fail; 9511 } 9512 9513 /* Update Freesync settings. */ 9514 get_freesync_config_for_crtc(dm_new_crtc_state, 9515 dm_new_conn_state); 9516 9517 return ret; 9518 9519 fail: 9520 if (new_stream) 9521 dc_stream_release(new_stream); 9522 return ret; 9523 } 9524 9525 static bool should_reset_plane(struct drm_atomic_state *state, 9526 struct drm_plane *plane, 9527 struct drm_plane_state *old_plane_state, 9528 struct drm_plane_state *new_plane_state) 9529 { 9530 struct drm_plane *other; 9531 struct drm_plane_state *old_other_state, *new_other_state; 9532 struct drm_crtc_state *new_crtc_state; 9533 struct amdgpu_device *adev = drm_to_adev(plane->dev); 9534 int i; 9535 9536 /* 9537 * TODO: Remove this hack for all asics once it proves that the 9538 * fast updates works fine on DCN3.2+. 9539 */ 9540 if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset) 9541 return true; 9542 9543 /* Exit early if we know that we're adding or removing the plane. */ 9544 if (old_plane_state->crtc != new_plane_state->crtc) 9545 return true; 9546 9547 /* old crtc == new_crtc == NULL, plane not in context. */ 9548 if (!new_plane_state->crtc) 9549 return false; 9550 9551 new_crtc_state = 9552 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 9553 9554 if (!new_crtc_state) 9555 return true; 9556 9557 /* CRTC Degamma changes currently require us to recreate planes. */ 9558 if (new_crtc_state->color_mgmt_changed) 9559 return true; 9560 9561 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 9562 return true; 9563 9564 /* 9565 * If there are any new primary or overlay planes being added or 9566 * removed then the z-order can potentially change. To ensure 9567 * correct z-order and pipe acquisition the current DC architecture 9568 * requires us to remove and recreate all existing planes. 9569 * 9570 * TODO: Come up with a more elegant solution for this. 9571 */ 9572 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { 9573 struct amdgpu_framebuffer *old_afb, *new_afb; 9574 9575 if (other->type == DRM_PLANE_TYPE_CURSOR) 9576 continue; 9577 9578 if (old_other_state->crtc != new_plane_state->crtc && 9579 new_other_state->crtc != new_plane_state->crtc) 9580 continue; 9581 9582 if (old_other_state->crtc != new_other_state->crtc) 9583 return true; 9584 9585 /* Src/dst size and scaling updates. */ 9586 if (old_other_state->src_w != new_other_state->src_w || 9587 old_other_state->src_h != new_other_state->src_h || 9588 old_other_state->crtc_w != new_other_state->crtc_w || 9589 old_other_state->crtc_h != new_other_state->crtc_h) 9590 return true; 9591 9592 /* Rotation / mirroring updates. */ 9593 if (old_other_state->rotation != new_other_state->rotation) 9594 return true; 9595 9596 /* Blending updates. */ 9597 if (old_other_state->pixel_blend_mode != 9598 new_other_state->pixel_blend_mode) 9599 return true; 9600 9601 /* Alpha updates. */ 9602 if (old_other_state->alpha != new_other_state->alpha) 9603 return true; 9604 9605 /* Colorspace changes. */ 9606 if (old_other_state->color_range != new_other_state->color_range || 9607 old_other_state->color_encoding != new_other_state->color_encoding) 9608 return true; 9609 9610 /* Framebuffer checks fall at the end. */ 9611 if (!old_other_state->fb || !new_other_state->fb) 9612 continue; 9613 9614 /* Pixel format changes can require bandwidth updates. */ 9615 if (old_other_state->fb->format != new_other_state->fb->format) 9616 return true; 9617 9618 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; 9619 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; 9620 9621 /* Tiling and DCC changes also require bandwidth updates. */ 9622 if (old_afb->tiling_flags != new_afb->tiling_flags || 9623 old_afb->base.modifier != new_afb->base.modifier) 9624 return true; 9625 } 9626 9627 return false; 9628 } 9629 9630 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, 9631 struct drm_plane_state *new_plane_state, 9632 struct drm_framebuffer *fb) 9633 { 9634 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); 9635 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 9636 unsigned int pitch; 9637 bool linear; 9638 9639 if (fb->width > new_acrtc->max_cursor_width || 9640 fb->height > new_acrtc->max_cursor_height) { 9641 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n", 9642 new_plane_state->fb->width, 9643 new_plane_state->fb->height); 9644 return -EINVAL; 9645 } 9646 if (new_plane_state->src_w != fb->width << 16 || 9647 new_plane_state->src_h != fb->height << 16) { 9648 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 9649 return -EINVAL; 9650 } 9651 9652 /* Pitch in pixels */ 9653 pitch = fb->pitches[0] / fb->format->cpp[0]; 9654 9655 if (fb->width != pitch) { 9656 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d", 9657 fb->width, pitch); 9658 return -EINVAL; 9659 } 9660 9661 switch (pitch) { 9662 case 64: 9663 case 128: 9664 case 256: 9665 /* FB pitch is supported by cursor plane */ 9666 break; 9667 default: 9668 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch); 9669 return -EINVAL; 9670 } 9671 9672 /* Core DRM takes care of checking FB modifiers, so we only need to 9673 * check tiling flags when the FB doesn't have a modifier. 9674 */ 9675 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { 9676 if (adev->family < AMDGPU_FAMILY_AI) { 9677 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && 9678 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && 9679 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; 9680 } else { 9681 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; 9682 } 9683 if (!linear) { 9684 DRM_DEBUG_ATOMIC("Cursor FB not linear"); 9685 return -EINVAL; 9686 } 9687 } 9688 9689 return 0; 9690 } 9691 9692 static int dm_update_plane_state(struct dc *dc, 9693 struct drm_atomic_state *state, 9694 struct drm_plane *plane, 9695 struct drm_plane_state *old_plane_state, 9696 struct drm_plane_state *new_plane_state, 9697 bool enable, 9698 bool *lock_and_validation_needed, 9699 bool *is_top_most_overlay) 9700 { 9701 9702 struct dm_atomic_state *dm_state = NULL; 9703 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 9704 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 9705 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 9706 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 9707 struct amdgpu_crtc *new_acrtc; 9708 bool needs_reset; 9709 int ret = 0; 9710 9711 9712 new_plane_crtc = new_plane_state->crtc; 9713 old_plane_crtc = old_plane_state->crtc; 9714 dm_new_plane_state = to_dm_plane_state(new_plane_state); 9715 dm_old_plane_state = to_dm_plane_state(old_plane_state); 9716 9717 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 9718 if (!enable || !new_plane_crtc || 9719 drm_atomic_plane_disabling(plane->state, new_plane_state)) 9720 return 0; 9721 9722 new_acrtc = to_amdgpu_crtc(new_plane_crtc); 9723 9724 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { 9725 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 9726 return -EINVAL; 9727 } 9728 9729 if (new_plane_state->fb) { 9730 ret = dm_check_cursor_fb(new_acrtc, new_plane_state, 9731 new_plane_state->fb); 9732 if (ret) 9733 return ret; 9734 } 9735 9736 return 0; 9737 } 9738 9739 needs_reset = should_reset_plane(state, plane, old_plane_state, 9740 new_plane_state); 9741 9742 /* Remove any changed/removed planes */ 9743 if (!enable) { 9744 if (!needs_reset) 9745 return 0; 9746 9747 if (!old_plane_crtc) 9748 return 0; 9749 9750 old_crtc_state = drm_atomic_get_old_crtc_state( 9751 state, old_plane_crtc); 9752 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9753 9754 if (!dm_old_crtc_state->stream) 9755 return 0; 9756 9757 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 9758 plane->base.id, old_plane_crtc->base.id); 9759 9760 ret = dm_atomic_get_state(state, &dm_state); 9761 if (ret) 9762 return ret; 9763 9764 if (!dc_remove_plane_from_context( 9765 dc, 9766 dm_old_crtc_state->stream, 9767 dm_old_plane_state->dc_state, 9768 dm_state->context)) { 9769 9770 return -EINVAL; 9771 } 9772 9773 if (dm_old_plane_state->dc_state) 9774 dc_plane_state_release(dm_old_plane_state->dc_state); 9775 9776 dm_new_plane_state->dc_state = NULL; 9777 9778 *lock_and_validation_needed = true; 9779 9780 } else { /* Add new planes */ 9781 struct dc_plane_state *dc_new_plane_state; 9782 9783 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 9784 return 0; 9785 9786 if (!new_plane_crtc) 9787 return 0; 9788 9789 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 9790 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9791 9792 if (!dm_new_crtc_state->stream) 9793 return 0; 9794 9795 if (!needs_reset) 9796 return 0; 9797 9798 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); 9799 if (ret) 9800 return ret; 9801 9802 WARN_ON(dm_new_plane_state->dc_state); 9803 9804 dc_new_plane_state = dc_create_plane_state(dc); 9805 if (!dc_new_plane_state) 9806 return -ENOMEM; 9807 9808 /* Block top most plane from being a video plane */ 9809 if (plane->type == DRM_PLANE_TYPE_OVERLAY) { 9810 if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) 9811 return -EINVAL; 9812 9813 *is_top_most_overlay = false; 9814 } 9815 9816 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", 9817 plane->base.id, new_plane_crtc->base.id); 9818 9819 ret = fill_dc_plane_attributes( 9820 drm_to_adev(new_plane_crtc->dev), 9821 dc_new_plane_state, 9822 new_plane_state, 9823 new_crtc_state); 9824 if (ret) { 9825 dc_plane_state_release(dc_new_plane_state); 9826 return ret; 9827 } 9828 9829 ret = dm_atomic_get_state(state, &dm_state); 9830 if (ret) { 9831 dc_plane_state_release(dc_new_plane_state); 9832 return ret; 9833 } 9834 9835 /* 9836 * Any atomic check errors that occur after this will 9837 * not need a release. The plane state will be attached 9838 * to the stream, and therefore part of the atomic 9839 * state. It'll be released when the atomic state is 9840 * cleaned. 9841 */ 9842 if (!dc_add_plane_to_context( 9843 dc, 9844 dm_new_crtc_state->stream, 9845 dc_new_plane_state, 9846 dm_state->context)) { 9847 9848 dc_plane_state_release(dc_new_plane_state); 9849 return -EINVAL; 9850 } 9851 9852 dm_new_plane_state->dc_state = dc_new_plane_state; 9853 9854 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); 9855 9856 /* Tell DC to do a full surface update every time there 9857 * is a plane change. Inefficient, but works for now. 9858 */ 9859 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 9860 9861 *lock_and_validation_needed = true; 9862 } 9863 9864 9865 return ret; 9866 } 9867 9868 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, 9869 int *src_w, int *src_h) 9870 { 9871 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 9872 case DRM_MODE_ROTATE_90: 9873 case DRM_MODE_ROTATE_270: 9874 *src_w = plane_state->src_h >> 16; 9875 *src_h = plane_state->src_w >> 16; 9876 break; 9877 case DRM_MODE_ROTATE_0: 9878 case DRM_MODE_ROTATE_180: 9879 default: 9880 *src_w = plane_state->src_w >> 16; 9881 *src_h = plane_state->src_h >> 16; 9882 break; 9883 } 9884 } 9885 9886 static void 9887 dm_get_plane_scale(struct drm_plane_state *plane_state, 9888 int *out_plane_scale_w, int *out_plane_scale_h) 9889 { 9890 int plane_src_w, plane_src_h; 9891 9892 dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h); 9893 *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w; 9894 *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h; 9895 } 9896 9897 static int dm_check_crtc_cursor(struct drm_atomic_state *state, 9898 struct drm_crtc *crtc, 9899 struct drm_crtc_state *new_crtc_state) 9900 { 9901 struct drm_plane *cursor = crtc->cursor, *plane, *underlying; 9902 struct drm_plane_state *old_plane_state, *new_plane_state; 9903 struct drm_plane_state *new_cursor_state, *new_underlying_state; 9904 int i; 9905 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; 9906 bool any_relevant_change = false; 9907 9908 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a 9909 * cursor per pipe but it's going to inherit the scaling and 9910 * positioning from the underlying pipe. Check the cursor plane's 9911 * blending properties match the underlying planes'. 9912 */ 9913 9914 /* If no plane was enabled or changed scaling, no need to check again */ 9915 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 9916 int new_scale_w, new_scale_h, old_scale_w, old_scale_h; 9917 9918 if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc) 9919 continue; 9920 9921 if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) { 9922 any_relevant_change = true; 9923 break; 9924 } 9925 9926 if (new_plane_state->fb == old_plane_state->fb && 9927 new_plane_state->crtc_w == old_plane_state->crtc_w && 9928 new_plane_state->crtc_h == old_plane_state->crtc_h) 9929 continue; 9930 9931 dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h); 9932 dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); 9933 9934 if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { 9935 any_relevant_change = true; 9936 break; 9937 } 9938 } 9939 9940 if (!any_relevant_change) 9941 return 0; 9942 9943 new_cursor_state = drm_atomic_get_plane_state(state, cursor); 9944 if (IS_ERR(new_cursor_state)) 9945 return PTR_ERR(new_cursor_state); 9946 9947 if (!new_cursor_state->fb) 9948 return 0; 9949 9950 dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h); 9951 9952 /* Need to check all enabled planes, even if this commit doesn't change 9953 * their state 9954 */ 9955 i = drm_atomic_add_affected_planes(state, crtc); 9956 if (i) 9957 return i; 9958 9959 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { 9960 /* Narrow down to non-cursor planes on the same CRTC as the cursor */ 9961 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor) 9962 continue; 9963 9964 /* Ignore disabled planes */ 9965 if (!new_underlying_state->fb) 9966 continue; 9967 9968 dm_get_plane_scale(new_underlying_state, 9969 &underlying_scale_w, &underlying_scale_h); 9970 9971 if (cursor_scale_w != underlying_scale_w || 9972 cursor_scale_h != underlying_scale_h) { 9973 drm_dbg_atomic(crtc->dev, 9974 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n", 9975 cursor->base.id, cursor->name, underlying->base.id, underlying->name); 9976 return -EINVAL; 9977 } 9978 9979 /* If this plane covers the whole CRTC, no need to check planes underneath */ 9980 if (new_underlying_state->crtc_x <= 0 && 9981 new_underlying_state->crtc_y <= 0 && 9982 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay && 9983 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay) 9984 break; 9985 } 9986 9987 return 0; 9988 } 9989 9990 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) 9991 { 9992 struct drm_connector *connector; 9993 struct drm_connector_state *conn_state, *old_conn_state; 9994 struct amdgpu_dm_connector *aconnector = NULL; 9995 int i; 9996 9997 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { 9998 if (!conn_state->crtc) 9999 conn_state = old_conn_state; 10000 10001 if (conn_state->crtc != crtc) 10002 continue; 10003 10004 aconnector = to_amdgpu_dm_connector(connector); 10005 if (!aconnector->mst_output_port || !aconnector->mst_root) 10006 aconnector = NULL; 10007 else 10008 break; 10009 } 10010 10011 if (!aconnector) 10012 return 0; 10013 10014 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); 10015 } 10016 10017 /** 10018 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 10019 * 10020 * @dev: The DRM device 10021 * @state: The atomic state to commit 10022 * 10023 * Validate that the given atomic state is programmable by DC into hardware. 10024 * This involves constructing a &struct dc_state reflecting the new hardware 10025 * state we wish to commit, then querying DC to see if it is programmable. It's 10026 * important not to modify the existing DC state. Otherwise, atomic_check 10027 * may unexpectedly commit hardware changes. 10028 * 10029 * When validating the DC state, it's important that the right locks are 10030 * acquired. For full updates case which removes/adds/updates streams on one 10031 * CRTC while flipping on another CRTC, acquiring global lock will guarantee 10032 * that any such full update commit will wait for completion of any outstanding 10033 * flip using DRMs synchronization events. 10034 * 10035 * Note that DM adds the affected connectors for all CRTCs in state, when that 10036 * might not seem necessary. This is because DC stream creation requires the 10037 * DC sink, which is tied to the DRM connector state. Cleaning this up should 10038 * be possible but non-trivial - a possible TODO item. 10039 * 10040 * Return: -Error code if validation failed. 10041 */ 10042 static int amdgpu_dm_atomic_check(struct drm_device *dev, 10043 struct drm_atomic_state *state) 10044 { 10045 struct amdgpu_device *adev = drm_to_adev(dev); 10046 struct dm_atomic_state *dm_state = NULL; 10047 struct dc *dc = adev->dm.dc; 10048 struct drm_connector *connector; 10049 struct drm_connector_state *old_con_state, *new_con_state; 10050 struct drm_crtc *crtc; 10051 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10052 struct drm_plane *plane; 10053 struct drm_plane_state *old_plane_state, *new_plane_state; 10054 enum dc_status status; 10055 int ret, i; 10056 bool lock_and_validation_needed = false; 10057 bool is_top_most_overlay = true; 10058 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10059 struct drm_dp_mst_topology_mgr *mgr; 10060 struct drm_dp_mst_topology_state *mst_state; 10061 struct dsc_mst_fairness_vars vars[MAX_PIPES]; 10062 10063 trace_amdgpu_dm_atomic_check_begin(state); 10064 10065 ret = drm_atomic_helper_check_modeset(dev, state); 10066 if (ret) { 10067 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n"); 10068 goto fail; 10069 } 10070 10071 /* Check connector changes */ 10072 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10073 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 10074 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10075 10076 /* Skip connectors that are disabled or part of modeset already. */ 10077 if (!new_con_state->crtc) 10078 continue; 10079 10080 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); 10081 if (IS_ERR(new_crtc_state)) { 10082 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n"); 10083 ret = PTR_ERR(new_crtc_state); 10084 goto fail; 10085 } 10086 10087 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || 10088 dm_old_con_state->scaling != dm_new_con_state->scaling) 10089 new_crtc_state->connectors_changed = true; 10090 } 10091 10092 if (dc_resource_is_dsc_encoding_supported(dc)) { 10093 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10094 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 10095 ret = add_affected_mst_dsc_crtcs(state, crtc); 10096 if (ret) { 10097 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n"); 10098 goto fail; 10099 } 10100 } 10101 } 10102 } 10103 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10104 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10105 10106 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 10107 !new_crtc_state->color_mgmt_changed && 10108 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && 10109 dm_old_crtc_state->dsc_force_changed == false) 10110 continue; 10111 10112 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); 10113 if (ret) { 10114 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n"); 10115 goto fail; 10116 } 10117 10118 if (!new_crtc_state->enable) 10119 continue; 10120 10121 ret = drm_atomic_add_affected_connectors(state, crtc); 10122 if (ret) { 10123 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n"); 10124 goto fail; 10125 } 10126 10127 ret = drm_atomic_add_affected_planes(state, crtc); 10128 if (ret) { 10129 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n"); 10130 goto fail; 10131 } 10132 10133 if (dm_old_crtc_state->dsc_force_changed) 10134 new_crtc_state->mode_changed = true; 10135 } 10136 10137 /* 10138 * Add all primary and overlay planes on the CRTC to the state 10139 * whenever a plane is enabled to maintain correct z-ordering 10140 * and to enable fast surface updates. 10141 */ 10142 drm_for_each_crtc(crtc, dev) { 10143 bool modified = false; 10144 10145 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 10146 if (plane->type == DRM_PLANE_TYPE_CURSOR) 10147 continue; 10148 10149 if (new_plane_state->crtc == crtc || 10150 old_plane_state->crtc == crtc) { 10151 modified = true; 10152 break; 10153 } 10154 } 10155 10156 if (!modified) 10157 continue; 10158 10159 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 10160 if (plane->type == DRM_PLANE_TYPE_CURSOR) 10161 continue; 10162 10163 new_plane_state = 10164 drm_atomic_get_plane_state(state, plane); 10165 10166 if (IS_ERR(new_plane_state)) { 10167 ret = PTR_ERR(new_plane_state); 10168 DRM_DEBUG_DRIVER("new_plane_state is BAD\n"); 10169 goto fail; 10170 } 10171 } 10172 } 10173 10174 /* 10175 * DC consults the zpos (layer_index in DC terminology) to determine the 10176 * hw plane on which to enable the hw cursor (see 10177 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in 10178 * atomic state, so call drm helper to normalize zpos. 10179 */ 10180 ret = drm_atomic_normalize_zpos(dev, state); 10181 if (ret) { 10182 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); 10183 goto fail; 10184 } 10185 10186 /* Remove exiting planes if they are modified */ 10187 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 10188 if (old_plane_state->fb && new_plane_state->fb && 10189 get_mem_type(old_plane_state->fb) != 10190 get_mem_type(new_plane_state->fb)) 10191 lock_and_validation_needed = true; 10192 10193 ret = dm_update_plane_state(dc, state, plane, 10194 old_plane_state, 10195 new_plane_state, 10196 false, 10197 &lock_and_validation_needed, 10198 &is_top_most_overlay); 10199 if (ret) { 10200 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); 10201 goto fail; 10202 } 10203 } 10204 10205 /* Disable all crtcs which require disable */ 10206 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10207 ret = dm_update_crtc_state(&adev->dm, state, crtc, 10208 old_crtc_state, 10209 new_crtc_state, 10210 false, 10211 &lock_and_validation_needed); 10212 if (ret) { 10213 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n"); 10214 goto fail; 10215 } 10216 } 10217 10218 /* Enable all crtcs which require enable */ 10219 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10220 ret = dm_update_crtc_state(&adev->dm, state, crtc, 10221 old_crtc_state, 10222 new_crtc_state, 10223 true, 10224 &lock_and_validation_needed); 10225 if (ret) { 10226 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n"); 10227 goto fail; 10228 } 10229 } 10230 10231 /* Add new/modified planes */ 10232 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 10233 ret = dm_update_plane_state(dc, state, plane, 10234 old_plane_state, 10235 new_plane_state, 10236 true, 10237 &lock_and_validation_needed, 10238 &is_top_most_overlay); 10239 if (ret) { 10240 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); 10241 goto fail; 10242 } 10243 } 10244 10245 if (dc_resource_is_dsc_encoding_supported(dc)) { 10246 ret = pre_validate_dsc(state, &dm_state, vars); 10247 if (ret != 0) 10248 goto fail; 10249 } 10250 10251 /* Run this here since we want to validate the streams we created */ 10252 ret = drm_atomic_helper_check_planes(dev, state); 10253 if (ret) { 10254 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n"); 10255 goto fail; 10256 } 10257 10258 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10259 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10260 if (dm_new_crtc_state->mpo_requested) 10261 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc); 10262 } 10263 10264 /* Check cursor planes scaling */ 10265 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10266 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); 10267 if (ret) { 10268 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n"); 10269 goto fail; 10270 } 10271 } 10272 10273 if (state->legacy_cursor_update) { 10274 /* 10275 * This is a fast cursor update coming from the plane update 10276 * helper, check if it can be done asynchronously for better 10277 * performance. 10278 */ 10279 state->async_update = 10280 !drm_atomic_helper_async_check(dev, state); 10281 10282 /* 10283 * Skip the remaining global validation if this is an async 10284 * update. Cursor updates can be done without affecting 10285 * state or bandwidth calcs and this avoids the performance 10286 * penalty of locking the private state object and 10287 * allocating a new dc_state. 10288 */ 10289 if (state->async_update) 10290 return 0; 10291 } 10292 10293 /* Check scaling and underscan changes*/ 10294 /* TODO Removed scaling changes validation due to inability to commit 10295 * new stream into context w\o causing full reset. Need to 10296 * decide how to handle. 10297 */ 10298 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10299 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 10300 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10301 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10302 10303 /* Skip any modesets/resets */ 10304 if (!acrtc || drm_atomic_crtc_needs_modeset( 10305 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 10306 continue; 10307 10308 /* Skip any thing not scale or underscan changes */ 10309 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 10310 continue; 10311 10312 lock_and_validation_needed = true; 10313 } 10314 10315 /* set the slot info for each mst_state based on the link encoding format */ 10316 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 10317 struct amdgpu_dm_connector *aconnector; 10318 struct drm_connector *connector; 10319 struct drm_connector_list_iter iter; 10320 u8 link_coding_cap; 10321 10322 drm_connector_list_iter_begin(dev, &iter); 10323 drm_for_each_connector_iter(connector, &iter) { 10324 if (connector->index == mst_state->mgr->conn_base_id) { 10325 aconnector = to_amdgpu_dm_connector(connector); 10326 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 10327 drm_dp_mst_update_slots(mst_state, link_coding_cap); 10328 10329 break; 10330 } 10331 } 10332 drm_connector_list_iter_end(&iter); 10333 } 10334 10335 /** 10336 * Streams and planes are reset when there are changes that affect 10337 * bandwidth. Anything that affects bandwidth needs to go through 10338 * DC global validation to ensure that the configuration can be applied 10339 * to hardware. 10340 * 10341 * We have to currently stall out here in atomic_check for outstanding 10342 * commits to finish in this case because our IRQ handlers reference 10343 * DRM state directly - we can end up disabling interrupts too early 10344 * if we don't. 10345 * 10346 * TODO: Remove this stall and drop DM state private objects. 10347 */ 10348 if (lock_and_validation_needed) { 10349 ret = dm_atomic_get_state(state, &dm_state); 10350 if (ret) { 10351 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n"); 10352 goto fail; 10353 } 10354 10355 ret = do_aquire_global_lock(dev, state); 10356 if (ret) { 10357 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n"); 10358 goto fail; 10359 } 10360 10361 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); 10362 if (ret) { 10363 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); 10364 ret = -EINVAL; 10365 goto fail; 10366 } 10367 10368 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 10369 if (ret) { 10370 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n"); 10371 goto fail; 10372 } 10373 10374 /* 10375 * Perform validation of MST topology in the state: 10376 * We need to perform MST atomic check before calling 10377 * dc_validate_global_state(), or there is a chance 10378 * to get stuck in an infinite loop and hang eventually. 10379 */ 10380 ret = drm_dp_mst_atomic_check(state); 10381 if (ret) { 10382 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n"); 10383 goto fail; 10384 } 10385 status = dc_validate_global_state(dc, dm_state->context, true); 10386 if (status != DC_OK) { 10387 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)", 10388 dc_status_to_str(status), status); 10389 ret = -EINVAL; 10390 goto fail; 10391 } 10392 } else { 10393 /* 10394 * The commit is a fast update. Fast updates shouldn't change 10395 * the DC context, affect global validation, and can have their 10396 * commit work done in parallel with other commits not touching 10397 * the same resource. If we have a new DC context as part of 10398 * the DM atomic state from validation we need to free it and 10399 * retain the existing one instead. 10400 * 10401 * Furthermore, since the DM atomic state only contains the DC 10402 * context and can safely be annulled, we can free the state 10403 * and clear the associated private object now to free 10404 * some memory and avoid a possible use-after-free later. 10405 */ 10406 10407 for (i = 0; i < state->num_private_objs; i++) { 10408 struct drm_private_obj *obj = state->private_objs[i].ptr; 10409 10410 if (obj->funcs == adev->dm.atomic_obj.funcs) { 10411 int j = state->num_private_objs-1; 10412 10413 dm_atomic_destroy_state(obj, 10414 state->private_objs[i].state); 10415 10416 /* If i is not at the end of the array then the 10417 * last element needs to be moved to where i was 10418 * before the array can safely be truncated. 10419 */ 10420 if (i != j) 10421 state->private_objs[i] = 10422 state->private_objs[j]; 10423 10424 state->private_objs[j].ptr = NULL; 10425 state->private_objs[j].state = NULL; 10426 state->private_objs[j].old_state = NULL; 10427 state->private_objs[j].new_state = NULL; 10428 10429 state->num_private_objs = j; 10430 break; 10431 } 10432 } 10433 } 10434 10435 /* Store the overall update type for use later in atomic check. */ 10436 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10437 struct dm_crtc_state *dm_new_crtc_state = 10438 to_dm_crtc_state(new_crtc_state); 10439 10440 /* 10441 * Only allow async flips for fast updates that don't change 10442 * the FB pitch, the DCC state, rotation, etc. 10443 */ 10444 if (new_crtc_state->async_flip && lock_and_validation_needed) { 10445 drm_dbg_atomic(crtc->dev, 10446 "[CRTC:%d:%s] async flips are only supported for fast updates\n", 10447 crtc->base.id, crtc->name); 10448 ret = -EINVAL; 10449 goto fail; 10450 } 10451 10452 dm_new_crtc_state->update_type = lock_and_validation_needed ? 10453 UPDATE_TYPE_FULL : UPDATE_TYPE_FAST; 10454 } 10455 10456 /* Must be success */ 10457 WARN_ON(ret); 10458 10459 trace_amdgpu_dm_atomic_check_finish(state, ret); 10460 10461 return ret; 10462 10463 fail: 10464 if (ret == -EDEADLK) 10465 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n"); 10466 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 10467 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); 10468 else 10469 DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret); 10470 10471 trace_amdgpu_dm_atomic_check_finish(state, ret); 10472 10473 return ret; 10474 } 10475 10476 static bool is_dp_capable_without_timing_msa(struct dc *dc, 10477 struct amdgpu_dm_connector *amdgpu_dm_connector) 10478 { 10479 u8 dpcd_data; 10480 bool capable = false; 10481 10482 if (amdgpu_dm_connector->dc_link && 10483 dm_helpers_dp_read_dpcd( 10484 NULL, 10485 amdgpu_dm_connector->dc_link, 10486 DP_DOWN_STREAM_PORT_COUNT, 10487 &dpcd_data, 10488 sizeof(dpcd_data))) { 10489 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; 10490 } 10491 10492 return capable; 10493 } 10494 10495 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, 10496 unsigned int offset, 10497 unsigned int total_length, 10498 u8 *data, 10499 unsigned int length, 10500 struct amdgpu_hdmi_vsdb_info *vsdb) 10501 { 10502 bool res; 10503 union dmub_rb_cmd cmd; 10504 struct dmub_cmd_send_edid_cea *input; 10505 struct dmub_cmd_edid_cea_output *output; 10506 10507 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) 10508 return false; 10509 10510 memset(&cmd, 0, sizeof(cmd)); 10511 10512 input = &cmd.edid_cea.data.input; 10513 10514 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; 10515 cmd.edid_cea.header.sub_type = 0; 10516 cmd.edid_cea.header.payload_bytes = 10517 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); 10518 input->offset = offset; 10519 input->length = length; 10520 input->cea_total_length = total_length; 10521 memcpy(input->payload, data, length); 10522 10523 res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); 10524 if (!res) { 10525 DRM_ERROR("EDID CEA parser failed\n"); 10526 return false; 10527 } 10528 10529 output = &cmd.edid_cea.data.output; 10530 10531 if (output->type == DMUB_CMD__EDID_CEA_ACK) { 10532 if (!output->ack.success) { 10533 DRM_ERROR("EDID CEA ack failed at offset %d\n", 10534 output->ack.offset); 10535 } 10536 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { 10537 if (!output->amd_vsdb.vsdb_found) 10538 return false; 10539 10540 vsdb->freesync_supported = output->amd_vsdb.freesync_supported; 10541 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; 10542 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; 10543 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; 10544 } else { 10545 DRM_WARN("Unknown EDID CEA parser results\n"); 10546 return false; 10547 } 10548 10549 return true; 10550 } 10551 10552 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, 10553 u8 *edid_ext, int len, 10554 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10555 { 10556 int i; 10557 10558 /* send extension block to DMCU for parsing */ 10559 for (i = 0; i < len; i += 8) { 10560 bool res; 10561 int offset; 10562 10563 /* send 8 bytes a time */ 10564 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) 10565 return false; 10566 10567 if (i+8 == len) { 10568 /* EDID block sent completed, expect result */ 10569 int version, min_rate, max_rate; 10570 10571 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); 10572 if (res) { 10573 /* amd vsdb found */ 10574 vsdb_info->freesync_supported = 1; 10575 vsdb_info->amd_vsdb_version = version; 10576 vsdb_info->min_refresh_rate_hz = min_rate; 10577 vsdb_info->max_refresh_rate_hz = max_rate; 10578 return true; 10579 } 10580 /* not amd vsdb */ 10581 return false; 10582 } 10583 10584 /* check for ack*/ 10585 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); 10586 if (!res) 10587 return false; 10588 } 10589 10590 return false; 10591 } 10592 10593 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, 10594 u8 *edid_ext, int len, 10595 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10596 { 10597 int i; 10598 10599 /* send extension block to DMCU for parsing */ 10600 for (i = 0; i < len; i += 8) { 10601 /* send 8 bytes a time */ 10602 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) 10603 return false; 10604 } 10605 10606 return vsdb_info->freesync_supported; 10607 } 10608 10609 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, 10610 u8 *edid_ext, int len, 10611 struct amdgpu_hdmi_vsdb_info *vsdb_info) 10612 { 10613 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 10614 bool ret; 10615 10616 mutex_lock(&adev->dm.dc_lock); 10617 if (adev->dm.dmub_srv) 10618 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); 10619 else 10620 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); 10621 mutex_unlock(&adev->dm.dc_lock); 10622 return ret; 10623 } 10624 10625 static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, 10626 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 10627 { 10628 u8 *edid_ext = NULL; 10629 int i; 10630 int j = 0; 10631 10632 if (edid == NULL || edid->extensions == 0) 10633 return -ENODEV; 10634 10635 /* Find DisplayID extension */ 10636 for (i = 0; i < edid->extensions; i++) { 10637 edid_ext = (void *)(edid + (i + 1)); 10638 if (edid_ext[0] == DISPLAYID_EXT) 10639 break; 10640 } 10641 10642 while (j < EDID_LENGTH) { 10643 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; 10644 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); 10645 10646 if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID && 10647 amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) { 10648 vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false; 10649 vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3; 10650 DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode); 10651 10652 return true; 10653 } 10654 j++; 10655 } 10656 10657 return false; 10658 } 10659 10660 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, 10661 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 10662 { 10663 u8 *edid_ext = NULL; 10664 int i; 10665 bool valid_vsdb_found = false; 10666 10667 /*----- drm_find_cea_extension() -----*/ 10668 /* No EDID or EDID extensions */ 10669 if (edid == NULL || edid->extensions == 0) 10670 return -ENODEV; 10671 10672 /* Find CEA extension */ 10673 for (i = 0; i < edid->extensions; i++) { 10674 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); 10675 if (edid_ext[0] == CEA_EXT) 10676 break; 10677 } 10678 10679 if (i == edid->extensions) 10680 return -ENODEV; 10681 10682 /*----- cea_db_offsets() -----*/ 10683 if (edid_ext[0] != CEA_EXT) 10684 return -ENODEV; 10685 10686 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); 10687 10688 return valid_vsdb_found ? i : -ENODEV; 10689 } 10690 10691 /** 10692 * amdgpu_dm_update_freesync_caps - Update Freesync capabilities 10693 * 10694 * @connector: Connector to query. 10695 * @edid: EDID from monitor 10696 * 10697 * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep 10698 * track of some of the display information in the internal data struct used by 10699 * amdgpu_dm. This function checks which type of connector we need to set the 10700 * FreeSync parameters. 10701 */ 10702 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 10703 struct edid *edid) 10704 { 10705 int i = 0; 10706 struct detailed_timing *timing; 10707 struct detailed_non_pixel *data; 10708 struct detailed_data_monitor_range *range; 10709 struct amdgpu_dm_connector *amdgpu_dm_connector = 10710 to_amdgpu_dm_connector(connector); 10711 struct dm_connector_state *dm_con_state = NULL; 10712 struct dc_sink *sink; 10713 10714 struct drm_device *dev = connector->dev; 10715 struct amdgpu_device *adev = drm_to_adev(dev); 10716 struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; 10717 bool freesync_capable = false; 10718 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; 10719 10720 if (!connector->state) { 10721 DRM_ERROR("%s - Connector has no state", __func__); 10722 goto update; 10723 } 10724 10725 sink = amdgpu_dm_connector->dc_sink ? 10726 amdgpu_dm_connector->dc_sink : 10727 amdgpu_dm_connector->dc_em_sink; 10728 10729 if (!edid || !sink) { 10730 dm_con_state = to_dm_connector_state(connector->state); 10731 10732 amdgpu_dm_connector->min_vfreq = 0; 10733 amdgpu_dm_connector->max_vfreq = 0; 10734 amdgpu_dm_connector->pixel_clock_mhz = 0; 10735 connector->display_info.monitor_range.min_vfreq = 0; 10736 connector->display_info.monitor_range.max_vfreq = 0; 10737 freesync_capable = false; 10738 10739 goto update; 10740 } 10741 10742 dm_con_state = to_dm_connector_state(connector->state); 10743 10744 if (!adev->dm.freesync_module) 10745 goto update; 10746 10747 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT 10748 || sink->sink_signal == SIGNAL_TYPE_EDP) { 10749 bool edid_check_required = false; 10750 10751 if (edid) { 10752 edid_check_required = is_dp_capable_without_timing_msa( 10753 adev->dm.dc, 10754 amdgpu_dm_connector); 10755 } 10756 10757 if (edid_check_required == true && (edid->version > 1 || 10758 (edid->version == 1 && edid->revision > 1))) { 10759 for (i = 0; i < 4; i++) { 10760 10761 timing = &edid->detailed_timings[i]; 10762 data = &timing->data.other_data; 10763 range = &data->data.range; 10764 /* 10765 * Check if monitor has continuous frequency mode 10766 */ 10767 if (data->type != EDID_DETAIL_MONITOR_RANGE) 10768 continue; 10769 /* 10770 * Check for flag range limits only. If flag == 1 then 10771 * no additional timing information provided. 10772 * Default GTF, GTF Secondary curve and CVT are not 10773 * supported 10774 */ 10775 if (range->flags != 1) 10776 continue; 10777 10778 amdgpu_dm_connector->min_vfreq = range->min_vfreq; 10779 amdgpu_dm_connector->max_vfreq = range->max_vfreq; 10780 amdgpu_dm_connector->pixel_clock_mhz = 10781 range->pixel_clock_mhz * 10; 10782 10783 connector->display_info.monitor_range.min_vfreq = range->min_vfreq; 10784 connector->display_info.monitor_range.max_vfreq = range->max_vfreq; 10785 10786 break; 10787 } 10788 10789 if (amdgpu_dm_connector->max_vfreq - 10790 amdgpu_dm_connector->min_vfreq > 10) { 10791 10792 freesync_capable = true; 10793 } 10794 } 10795 parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 10796 10797 if (vsdb_info.replay_mode) { 10798 amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode; 10799 amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version; 10800 amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; 10801 } 10802 10803 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { 10804 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 10805 if (i >= 0 && vsdb_info.freesync_supported) { 10806 timing = &edid->detailed_timings[i]; 10807 data = &timing->data.other_data; 10808 10809 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 10810 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 10811 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 10812 freesync_capable = true; 10813 10814 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 10815 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 10816 } 10817 } 10818 10819 as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); 10820 10821 if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { 10822 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 10823 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { 10824 10825 amdgpu_dm_connector->pack_sdp_v1_3 = true; 10826 amdgpu_dm_connector->as_type = as_type; 10827 amdgpu_dm_connector->vsdb_info = vsdb_info; 10828 10829 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 10830 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 10831 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 10832 freesync_capable = true; 10833 10834 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 10835 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 10836 } 10837 } 10838 10839 update: 10840 if (dm_con_state) 10841 dm_con_state->freesync_capable = freesync_capable; 10842 10843 if (connector->vrr_capable_property) 10844 drm_connector_set_vrr_capable_property(connector, 10845 freesync_capable); 10846 } 10847 10848 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) 10849 { 10850 struct amdgpu_device *adev = drm_to_adev(dev); 10851 struct dc *dc = adev->dm.dc; 10852 int i; 10853 10854 mutex_lock(&adev->dm.dc_lock); 10855 if (dc->current_state) { 10856 for (i = 0; i < dc->current_state->stream_count; ++i) 10857 dc->current_state->streams[i] 10858 ->triggered_crtc_reset.enabled = 10859 adev->dm.force_timing_sync; 10860 10861 dm_enable_per_frame_crtc_master_sync(dc->current_state); 10862 dc_trigger_sync(dc, dc->current_state); 10863 } 10864 mutex_unlock(&adev->dm.dc_lock); 10865 } 10866 10867 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 10868 u32 value, const char *func_name) 10869 { 10870 #ifdef DM_CHECK_ADDR_0 10871 if (address == 0) { 10872 DC_ERR("invalid register write. address = 0"); 10873 return; 10874 } 10875 #endif 10876 cgs_write_register(ctx->cgs_device, address, value); 10877 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); 10878 } 10879 10880 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, 10881 const char *func_name) 10882 { 10883 u32 value; 10884 #ifdef DM_CHECK_ADDR_0 10885 if (address == 0) { 10886 DC_ERR("invalid register read; address = 0\n"); 10887 return 0; 10888 } 10889 #endif 10890 10891 if (ctx->dmub_srv && 10892 ctx->dmub_srv->reg_helper_offload.gather_in_progress && 10893 !ctx->dmub_srv->reg_helper_offload.should_burst_write) { 10894 ASSERT(false); 10895 return 0; 10896 } 10897 10898 value = cgs_read_register(ctx->cgs_device, address); 10899 10900 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); 10901 10902 return value; 10903 } 10904 10905 int amdgpu_dm_process_dmub_aux_transfer_sync( 10906 struct dc_context *ctx, 10907 unsigned int link_index, 10908 struct aux_payload *payload, 10909 enum aux_return_code_type *operation_result) 10910 { 10911 struct amdgpu_device *adev = ctx->driver_context; 10912 struct dmub_notification *p_notify = adev->dm.dmub_notify; 10913 int ret = -1; 10914 10915 mutex_lock(&adev->dm.dpia_aux_lock); 10916 if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) { 10917 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; 10918 goto out; 10919 } 10920 10921 if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 10922 DRM_ERROR("wait_for_completion_timeout timeout!"); 10923 *operation_result = AUX_RET_ERROR_TIMEOUT; 10924 goto out; 10925 } 10926 10927 if (p_notify->result != AUX_RET_SUCCESS) { 10928 /* 10929 * Transient states before tunneling is enabled could 10930 * lead to this error. We can ignore this for now. 10931 */ 10932 if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { 10933 DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", 10934 payload->address, payload->length, 10935 p_notify->result); 10936 } 10937 *operation_result = AUX_RET_ERROR_INVALID_REPLY; 10938 goto out; 10939 } 10940 10941 10942 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; 10943 if (!payload->write && p_notify->aux_reply.length && 10944 (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { 10945 10946 if (payload->length != p_notify->aux_reply.length) { 10947 DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", 10948 p_notify->aux_reply.length, 10949 payload->address, payload->length); 10950 *operation_result = AUX_RET_ERROR_INVALID_REPLY; 10951 goto out; 10952 } 10953 10954 memcpy(payload->data, p_notify->aux_reply.data, 10955 p_notify->aux_reply.length); 10956 } 10957 10958 /* success */ 10959 ret = p_notify->aux_reply.length; 10960 *operation_result = p_notify->result; 10961 out: 10962 reinit_completion(&adev->dm.dmub_aux_transfer_done); 10963 mutex_unlock(&adev->dm.dpia_aux_lock); 10964 return ret; 10965 } 10966 10967 int amdgpu_dm_process_dmub_set_config_sync( 10968 struct dc_context *ctx, 10969 unsigned int link_index, 10970 struct set_config_cmd_payload *payload, 10971 enum set_config_status *operation_result) 10972 { 10973 struct amdgpu_device *adev = ctx->driver_context; 10974 bool is_cmd_complete; 10975 int ret; 10976 10977 mutex_lock(&adev->dm.dpia_aux_lock); 10978 is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc, 10979 link_index, payload, adev->dm.dmub_notify); 10980 10981 if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { 10982 ret = 0; 10983 *operation_result = adev->dm.dmub_notify->sc_status; 10984 } else { 10985 DRM_ERROR("wait_for_completion_timeout timeout!"); 10986 ret = -1; 10987 *operation_result = SET_CONFIG_UNKNOWN_ERROR; 10988 } 10989 10990 if (!is_cmd_complete) 10991 reinit_completion(&adev->dm.dmub_aux_transfer_done); 10992 mutex_unlock(&adev->dm.dpia_aux_lock); 10993 return ret; 10994 } 10995 10996 /* 10997 * Check whether seamless boot is supported. 10998 * 10999 * So far we only support seamless boot on CHIP_VANGOGH. 11000 * If everything goes well, we may consider expanding 11001 * seamless boot to other ASICs. 11002 */ 11003 bool check_seamless_boot_capability(struct amdgpu_device *adev) 11004 { 11005 switch (adev->ip_versions[DCE_HWIP][0]) { 11006 case IP_VERSION(3, 0, 1): 11007 if (!adev->mman.keep_stolen_vga_memory) 11008 return true; 11009 break; 11010 default: 11011 break; 11012 } 11013 11014 return false; 11015 } 11016 11017 bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 11018 { 11019 return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); 11020 } 11021 11022 bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 11023 { 11024 return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); 11025 } 11026