1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 /* The caprices of the preprocessor require that this be declared right here */ 27 #define CREATE_TRACE_POINTS 28 29 #include "dm_services_types.h" 30 #include "dc.h" 31 #include "dc_link_dp.h" 32 #include "link_enc_cfg.h" 33 #include "dc/inc/core_types.h" 34 #include "dal_asic_id.h" 35 #include "dmub/dmub_srv.h" 36 #include "dc/inc/hw/dmcu.h" 37 #include "dc/inc/hw/abm.h" 38 #include "dc/dc_dmub_srv.h" 39 #include "dc/dc_edid_parser.h" 40 #include "dc/dc_stat.h" 41 #include "amdgpu_dm_trace.h" 42 43 #include "vid.h" 44 #include "amdgpu.h" 45 #include "amdgpu_display.h" 46 #include "amdgpu_ucode.h" 47 #include "atom.h" 48 #include "amdgpu_dm.h" 49 #ifdef CONFIG_DRM_AMD_DC_HDCP 50 #include "amdgpu_dm_hdcp.h" 51 #include <drm/display/drm_hdcp_helper.h> 52 #endif 53 #include "amdgpu_pm.h" 54 #include "amdgpu_atombios.h" 55 56 #include "amd_shared.h" 57 #include "amdgpu_dm_irq.h" 58 #include "dm_helpers.h" 59 #include "amdgpu_dm_mst_types.h" 60 #if defined(CONFIG_DEBUG_FS) 61 #include "amdgpu_dm_debugfs.h" 62 #endif 63 #include "amdgpu_dm_psr.h" 64 65 #include "ivsrcid/ivsrcid_vislands30.h" 66 67 #include "i2caux_interface.h" 68 #include <linux/module.h> 69 #include <linux/moduleparam.h> 70 #include <linux/types.h> 71 #include <linux/pm_runtime.h> 72 #include <linux/pci.h> 73 #include <linux/firmware.h> 74 #include <linux/component.h> 75 76 #include <drm/display/drm_dp_mst_helper.h> 77 #include <drm/display/drm_hdmi_helper.h> 78 #include <drm/drm_atomic.h> 79 #include <drm/drm_atomic_uapi.h> 80 #include <drm/drm_atomic_helper.h> 81 #include <drm/drm_fb_helper.h> 82 #include <drm/drm_fourcc.h> 83 #include <drm/drm_edid.h> 84 #include <drm/drm_vblank.h> 85 #include <drm/drm_audio_component.h> 86 87 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" 88 89 #include "dcn/dcn_1_0_offset.h" 90 #include "dcn/dcn_1_0_sh_mask.h" 91 #include "soc15_hw_ip.h" 92 #include "soc15_common.h" 93 #include "vega10_ip_offset.h" 94 95 #include "soc15_common.h" 96 97 #include "gc/gc_11_0_0_offset.h" 98 #include "gc/gc_11_0_0_sh_mask.h" 99 100 #include "modules/inc/mod_freesync.h" 101 #include "modules/power/power_helpers.h" 102 #include "modules/inc/mod_info_packet.h" 103 104 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 105 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 106 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" 107 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); 108 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" 109 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); 110 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" 111 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); 112 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" 113 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); 114 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" 115 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); 116 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" 117 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); 118 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" 119 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); 120 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" 121 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); 122 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" 123 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); 124 125 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" 126 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); 127 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" 128 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); 129 130 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 131 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 132 133 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" 134 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); 135 136 /* Number of bytes in PSP header for firmware. */ 137 #define PSP_HEADER_BYTES 0x100 138 139 /* Number of bytes in PSP footer for firmware. */ 140 #define PSP_FOOTER_BYTES 0x100 141 142 /** 143 * DOC: overview 144 * 145 * The AMDgpu display manager, **amdgpu_dm** (or even simpler, 146 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM 147 * requests into DC requests, and DC responses into DRM responses. 148 * 149 * The root control structure is &struct amdgpu_display_manager. 150 */ 151 152 /* basic init/fini API */ 153 static int amdgpu_dm_init(struct amdgpu_device *adev); 154 static void amdgpu_dm_fini(struct amdgpu_device *adev); 155 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); 156 157 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) 158 { 159 switch (link->dpcd_caps.dongle_type) { 160 case DISPLAY_DONGLE_NONE: 161 return DRM_MODE_SUBCONNECTOR_Native; 162 case DISPLAY_DONGLE_DP_VGA_CONVERTER: 163 return DRM_MODE_SUBCONNECTOR_VGA; 164 case DISPLAY_DONGLE_DP_DVI_CONVERTER: 165 case DISPLAY_DONGLE_DP_DVI_DONGLE: 166 return DRM_MODE_SUBCONNECTOR_DVID; 167 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 168 case DISPLAY_DONGLE_DP_HDMI_DONGLE: 169 return DRM_MODE_SUBCONNECTOR_HDMIA; 170 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: 171 default: 172 return DRM_MODE_SUBCONNECTOR_Unknown; 173 } 174 } 175 176 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) 177 { 178 struct dc_link *link = aconnector->dc_link; 179 struct drm_connector *connector = &aconnector->base; 180 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 181 182 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 183 return; 184 185 if (aconnector->dc_sink) 186 subconnector = get_subconnector_type(link); 187 188 drm_object_property_set_value(&connector->base, 189 connector->dev->mode_config.dp_subconnector_property, 190 subconnector); 191 } 192 193 /* 194 * initializes drm_device display related structures, based on the information 195 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 196 * drm_encoder, drm_mode_config 197 * 198 * Returns 0 on success 199 */ 200 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 201 /* removes and deallocates the drm structures, created by the above function */ 202 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 203 204 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 205 struct drm_plane *plane, 206 unsigned long possible_crtcs, 207 const struct dc_plane_cap *plane_cap); 208 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 209 struct drm_plane *plane, 210 uint32_t link_index); 211 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 212 struct amdgpu_dm_connector *amdgpu_dm_connector, 213 uint32_t link_index, 214 struct amdgpu_encoder *amdgpu_encoder); 215 static int amdgpu_dm_encoder_init(struct drm_device *dev, 216 struct amdgpu_encoder *aencoder, 217 uint32_t link_index); 218 219 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 220 221 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 222 223 static int amdgpu_dm_atomic_check(struct drm_device *dev, 224 struct drm_atomic_state *state); 225 226 static void handle_cursor_update(struct drm_plane *plane, 227 struct drm_plane_state *old_plane_state); 228 229 static const struct drm_format_info * 230 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); 231 232 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); 233 static void handle_hpd_rx_irq(void *param); 234 235 static bool 236 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 237 struct drm_crtc_state *new_crtc_state); 238 /* 239 * dm_vblank_get_counter 240 * 241 * @brief 242 * Get counter for number of vertical blanks 243 * 244 * @param 245 * struct amdgpu_device *adev - [in] desired amdgpu device 246 * int disp_idx - [in] which CRTC to get the counter from 247 * 248 * @return 249 * Counter for vertical blanks 250 */ 251 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 252 { 253 if (crtc >= adev->mode_info.num_crtc) 254 return 0; 255 else { 256 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; 257 258 if (acrtc->dm_irq_params.stream == NULL) { 259 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 260 crtc); 261 return 0; 262 } 263 264 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); 265 } 266 } 267 268 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 269 u32 *vbl, u32 *position) 270 { 271 uint32_t v_blank_start, v_blank_end, h_position, v_position; 272 273 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 274 return -EINVAL; 275 else { 276 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; 277 278 if (acrtc->dm_irq_params.stream == NULL) { 279 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 280 crtc); 281 return 0; 282 } 283 284 /* 285 * TODO rework base driver to use values directly. 286 * for now parse it back into reg-format 287 */ 288 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, 289 &v_blank_start, 290 &v_blank_end, 291 &h_position, 292 &v_position); 293 294 *position = v_position | (h_position << 16); 295 *vbl = v_blank_start | (v_blank_end << 16); 296 } 297 298 return 0; 299 } 300 301 static bool dm_is_idle(void *handle) 302 { 303 /* XXX todo */ 304 return true; 305 } 306 307 static int dm_wait_for_idle(void *handle) 308 { 309 /* XXX todo */ 310 return 0; 311 } 312 313 static bool dm_check_soft_reset(void *handle) 314 { 315 return false; 316 } 317 318 static int dm_soft_reset(void *handle) 319 { 320 /* XXX todo */ 321 return 0; 322 } 323 324 static struct amdgpu_crtc * 325 get_crtc_by_otg_inst(struct amdgpu_device *adev, 326 int otg_inst) 327 { 328 struct drm_device *dev = adev_to_drm(adev); 329 struct drm_crtc *crtc; 330 struct amdgpu_crtc *amdgpu_crtc; 331 332 if (WARN_ON(otg_inst == -1)) 333 return adev->mode_info.crtcs[0]; 334 335 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 336 amdgpu_crtc = to_amdgpu_crtc(crtc); 337 338 if (amdgpu_crtc->otg_inst == otg_inst) 339 return amdgpu_crtc; 340 } 341 342 return NULL; 343 } 344 345 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc) 346 { 347 return acrtc->dm_irq_params.freesync_config.state == 348 VRR_STATE_ACTIVE_VARIABLE || 349 acrtc->dm_irq_params.freesync_config.state == 350 VRR_STATE_ACTIVE_FIXED; 351 } 352 353 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) 354 { 355 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || 356 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 357 } 358 359 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, 360 struct dm_crtc_state *new_state) 361 { 362 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) 363 return true; 364 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state)) 365 return true; 366 else 367 return false; 368 } 369 370 /** 371 * dm_pflip_high_irq() - Handle pageflip interrupt 372 * @interrupt_params: ignored 373 * 374 * Handles the pageflip interrupt by notifying all interested parties 375 * that the pageflip has been completed. 376 */ 377 static void dm_pflip_high_irq(void *interrupt_params) 378 { 379 struct amdgpu_crtc *amdgpu_crtc; 380 struct common_irq_params *irq_params = interrupt_params; 381 struct amdgpu_device *adev = irq_params->adev; 382 unsigned long flags; 383 struct drm_pending_vblank_event *e; 384 uint32_t vpos, hpos, v_blank_start, v_blank_end; 385 bool vrr_active; 386 387 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 388 389 /* IRQ could occur when in initial stage */ 390 /* TODO work and BO cleanup */ 391 if (amdgpu_crtc == NULL) { 392 DC_LOG_PFLIP("CRTC is null, returning.\n"); 393 return; 394 } 395 396 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 397 398 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ 399 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", 400 amdgpu_crtc->pflip_status, 401 AMDGPU_FLIP_SUBMITTED, 402 amdgpu_crtc->crtc_id, 403 amdgpu_crtc); 404 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 405 return; 406 } 407 408 /* page flip completed. */ 409 e = amdgpu_crtc->event; 410 amdgpu_crtc->event = NULL; 411 412 WARN_ON(!e); 413 414 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc); 415 416 /* Fixed refresh rate, or VRR scanout position outside front-porch? */ 417 if (!vrr_active || 418 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, 419 &v_blank_end, &hpos, &vpos) || 420 (vpos < v_blank_start)) { 421 /* Update to correct count and vblank timestamp if racing with 422 * vblank irq. This also updates to the correct vblank timestamp 423 * even in VRR mode, as scanout is past the front-porch atm. 424 */ 425 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 426 427 /* Wake up userspace by sending the pageflip event with proper 428 * count and timestamp of vblank of flip completion. 429 */ 430 if (e) { 431 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); 432 433 /* Event sent, so done with vblank for this flip */ 434 drm_crtc_vblank_put(&amdgpu_crtc->base); 435 } 436 } else if (e) { 437 /* VRR active and inside front-porch: vblank count and 438 * timestamp for pageflip event will only be up to date after 439 * drm_crtc_handle_vblank() has been executed from late vblank 440 * irq handler after start of back-porch (vline 0). We queue the 441 * pageflip event for send-out by drm_crtc_handle_vblank() with 442 * updated timestamp and count, once it runs after us. 443 * 444 * We need to open-code this instead of using the helper 445 * drm_crtc_arm_vblank_event(), as that helper would 446 * call drm_crtc_accurate_vblank_count(), which we must 447 * not call in VRR mode while we are in front-porch! 448 */ 449 450 /* sequence will be replaced by real count during send-out. */ 451 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); 452 e->pipe = amdgpu_crtc->crtc_id; 453 454 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); 455 e = NULL; 456 } 457 458 /* Keep track of vblank of this flip for flip throttling. We use the 459 * cooked hw counter, as that one incremented at start of this vblank 460 * of pageflip completion, so last_flip_vblank is the forbidden count 461 * for queueing new pageflips if vsync + VRR is enabled. 462 */ 463 amdgpu_crtc->dm_irq_params.last_flip_vblank = 464 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); 465 466 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 467 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 468 469 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", 470 amdgpu_crtc->crtc_id, amdgpu_crtc, 471 vrr_active, (int) !e); 472 } 473 474 static void dm_vupdate_high_irq(void *interrupt_params) 475 { 476 struct common_irq_params *irq_params = interrupt_params; 477 struct amdgpu_device *adev = irq_params->adev; 478 struct amdgpu_crtc *acrtc; 479 struct drm_device *drm_dev; 480 struct drm_vblank_crtc *vblank; 481 ktime_t frame_duration_ns, previous_timestamp; 482 unsigned long flags; 483 int vrr_active; 484 485 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 486 487 if (acrtc) { 488 vrr_active = amdgpu_dm_vrr_active_irq(acrtc); 489 drm_dev = acrtc->base.dev; 490 vblank = &drm_dev->vblank[acrtc->base.index]; 491 previous_timestamp = atomic64_read(&irq_params->previous_timestamp); 492 frame_duration_ns = vblank->time - previous_timestamp; 493 494 if (frame_duration_ns > 0) { 495 trace_amdgpu_refresh_rate_track(acrtc->base.index, 496 frame_duration_ns, 497 ktime_divns(NSEC_PER_SEC, frame_duration_ns)); 498 atomic64_set(&irq_params->previous_timestamp, vblank->time); 499 } 500 501 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n", 502 acrtc->crtc_id, 503 vrr_active); 504 505 /* Core vblank handling is done here after end of front-porch in 506 * vrr mode, as vblank timestamping will give valid results 507 * while now done after front-porch. This will also deliver 508 * page-flip completion events that have been queued to us 509 * if a pageflip happened inside front-porch. 510 */ 511 if (vrr_active) { 512 drm_crtc_handle_vblank(&acrtc->base); 513 514 /* BTR processing for pre-DCE12 ASICs */ 515 if (acrtc->dm_irq_params.stream && 516 adev->family < AMDGPU_FAMILY_AI) { 517 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 518 mod_freesync_handle_v_update( 519 adev->dm.freesync_module, 520 acrtc->dm_irq_params.stream, 521 &acrtc->dm_irq_params.vrr_params); 522 523 dc_stream_adjust_vmin_vmax( 524 adev->dm.dc, 525 acrtc->dm_irq_params.stream, 526 &acrtc->dm_irq_params.vrr_params.adjust); 527 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 528 } 529 } 530 } 531 } 532 533 /** 534 * dm_crtc_high_irq() - Handles CRTC interrupt 535 * @interrupt_params: used for determining the CRTC instance 536 * 537 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK 538 * event handler. 539 */ 540 static void dm_crtc_high_irq(void *interrupt_params) 541 { 542 struct common_irq_params *irq_params = interrupt_params; 543 struct amdgpu_device *adev = irq_params->adev; 544 struct amdgpu_crtc *acrtc; 545 unsigned long flags; 546 int vrr_active; 547 548 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 549 if (!acrtc) 550 return; 551 552 vrr_active = amdgpu_dm_vrr_active_irq(acrtc); 553 554 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, 555 vrr_active, acrtc->dm_irq_params.active_planes); 556 557 /** 558 * Core vblank handling at start of front-porch is only possible 559 * in non-vrr mode, as only there vblank timestamping will give 560 * valid results while done in front-porch. Otherwise defer it 561 * to dm_vupdate_high_irq after end of front-porch. 562 */ 563 if (!vrr_active) 564 drm_crtc_handle_vblank(&acrtc->base); 565 566 /** 567 * Following stuff must happen at start of vblank, for crc 568 * computation and below-the-range btr support in vrr mode. 569 */ 570 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 571 572 /* BTR updates need to happen before VUPDATE on Vega and above. */ 573 if (adev->family < AMDGPU_FAMILY_AI) 574 return; 575 576 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 577 578 if (acrtc->dm_irq_params.stream && 579 acrtc->dm_irq_params.vrr_params.supported && 580 acrtc->dm_irq_params.freesync_config.state == 581 VRR_STATE_ACTIVE_VARIABLE) { 582 mod_freesync_handle_v_update(adev->dm.freesync_module, 583 acrtc->dm_irq_params.stream, 584 &acrtc->dm_irq_params.vrr_params); 585 586 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, 587 &acrtc->dm_irq_params.vrr_params.adjust); 588 } 589 590 /* 591 * If there aren't any active_planes then DCH HUBP may be clock-gated. 592 * In that case, pageflip completion interrupts won't fire and pageflip 593 * completion events won't get delivered. Prevent this by sending 594 * pending pageflip events from here if a flip is still pending. 595 * 596 * If any planes are enabled, use dm_pflip_high_irq() instead, to 597 * avoid race conditions between flip programming and completion, 598 * which could cause too early flip completion events. 599 */ 600 if (adev->family >= AMDGPU_FAMILY_RV && 601 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && 602 acrtc->dm_irq_params.active_planes == 0) { 603 if (acrtc->event) { 604 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); 605 acrtc->event = NULL; 606 drm_crtc_vblank_put(&acrtc->base); 607 } 608 acrtc->pflip_status = AMDGPU_FLIP_NONE; 609 } 610 611 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 612 } 613 614 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 615 /** 616 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for 617 * DCN generation ASICs 618 * @interrupt_params: interrupt parameters 619 * 620 * Used to set crc window/read out crc value at vertical line 0 position 621 */ 622 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) 623 { 624 struct common_irq_params *irq_params = interrupt_params; 625 struct amdgpu_device *adev = irq_params->adev; 626 struct amdgpu_crtc *acrtc; 627 628 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); 629 630 if (!acrtc) 631 return; 632 633 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); 634 } 635 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ 636 637 /** 638 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. 639 * @adev: amdgpu_device pointer 640 * @notify: dmub notification structure 641 * 642 * Dmub AUX or SET_CONFIG command completion processing callback 643 * Copies dmub notification to DM which is to be read by AUX command. 644 * issuing thread and also signals the event to wake up the thread. 645 */ 646 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, 647 struct dmub_notification *notify) 648 { 649 if (adev->dm.dmub_notify) 650 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); 651 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) 652 complete(&adev->dm.dmub_aux_transfer_done); 653 } 654 655 /** 656 * dmub_hpd_callback - DMUB HPD interrupt processing callback. 657 * @adev: amdgpu_device pointer 658 * @notify: dmub notification structure 659 * 660 * Dmub Hpd interrupt processing callback. Gets displayindex through the 661 * ink index and calls helper to do the processing. 662 */ 663 static void dmub_hpd_callback(struct amdgpu_device *adev, 664 struct dmub_notification *notify) 665 { 666 struct amdgpu_dm_connector *aconnector; 667 struct amdgpu_dm_connector *hpd_aconnector = NULL; 668 struct drm_connector *connector; 669 struct drm_connector_list_iter iter; 670 struct dc_link *link; 671 uint8_t link_index = 0; 672 struct drm_device *dev; 673 674 if (adev == NULL) 675 return; 676 677 if (notify == NULL) { 678 DRM_ERROR("DMUB HPD callback notification was NULL"); 679 return; 680 } 681 682 if (notify->link_index > adev->dm.dc->link_count) { 683 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); 684 return; 685 } 686 687 link_index = notify->link_index; 688 link = adev->dm.dc->links[link_index]; 689 dev = adev->dm.ddev; 690 691 drm_connector_list_iter_begin(dev, &iter); 692 drm_for_each_connector_iter(connector, &iter) { 693 aconnector = to_amdgpu_dm_connector(connector); 694 if (link && aconnector->dc_link == link) { 695 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); 696 hpd_aconnector = aconnector; 697 break; 698 } 699 } 700 drm_connector_list_iter_end(&iter); 701 702 if (hpd_aconnector) { 703 if (notify->type == DMUB_NOTIFICATION_HPD) 704 handle_hpd_irq_helper(hpd_aconnector); 705 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) 706 handle_hpd_rx_irq(hpd_aconnector); 707 } 708 } 709 710 /** 711 * register_dmub_notify_callback - Sets callback for DMUB notify 712 * @adev: amdgpu_device pointer 713 * @type: Type of dmub notification 714 * @callback: Dmub interrupt callback function 715 * @dmub_int_thread_offload: offload indicator 716 * 717 * API to register a dmub callback handler for a dmub notification 718 * Also sets indicator whether callback processing to be offloaded. 719 * to dmub interrupt handling thread 720 * Return: true if successfully registered, false if there is existing registration 721 */ 722 static bool register_dmub_notify_callback(struct amdgpu_device *adev, 723 enum dmub_notification_type type, 724 dmub_notify_interrupt_callback_t callback, 725 bool dmub_int_thread_offload) 726 { 727 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { 728 adev->dm.dmub_callback[type] = callback; 729 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; 730 } else 731 return false; 732 733 return true; 734 } 735 736 static void dm_handle_hpd_work(struct work_struct *work) 737 { 738 struct dmub_hpd_work *dmub_hpd_wrk; 739 740 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); 741 742 if (!dmub_hpd_wrk->dmub_notify) { 743 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); 744 return; 745 } 746 747 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { 748 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, 749 dmub_hpd_wrk->dmub_notify); 750 } 751 752 kfree(dmub_hpd_wrk->dmub_notify); 753 kfree(dmub_hpd_wrk); 754 755 } 756 757 #define DMUB_TRACE_MAX_READ 64 758 /** 759 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt 760 * @interrupt_params: used for determining the Outbox instance 761 * 762 * Handles the Outbox Interrupt 763 * event handler. 764 */ 765 static void dm_dmub_outbox1_low_irq(void *interrupt_params) 766 { 767 struct dmub_notification notify; 768 struct common_irq_params *irq_params = interrupt_params; 769 struct amdgpu_device *adev = irq_params->adev; 770 struct amdgpu_display_manager *dm = &adev->dm; 771 struct dmcub_trace_buf_entry entry = { 0 }; 772 uint32_t count = 0; 773 struct dmub_hpd_work *dmub_hpd_wrk; 774 struct dc_link *plink = NULL; 775 776 if (dc_enable_dmub_notifications(adev->dm.dc) && 777 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 778 779 do { 780 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); 781 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { 782 DRM_ERROR("DM: notify type %d invalid!", notify.type); 783 continue; 784 } 785 if (!dm->dmub_callback[notify.type]) { 786 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); 787 continue; 788 } 789 if (dm->dmub_thread_offload[notify.type] == true) { 790 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); 791 if (!dmub_hpd_wrk) { 792 DRM_ERROR("Failed to allocate dmub_hpd_wrk"); 793 return; 794 } 795 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC); 796 if (!dmub_hpd_wrk->dmub_notify) { 797 kfree(dmub_hpd_wrk); 798 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); 799 return; 800 } 801 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); 802 if (dmub_hpd_wrk->dmub_notify) 803 memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification)); 804 dmub_hpd_wrk->adev = adev; 805 if (notify.type == DMUB_NOTIFICATION_HPD) { 806 plink = adev->dm.dc->links[notify.link_index]; 807 if (plink) { 808 plink->hpd_status = 809 notify.hpd_status == DP_HPD_PLUG; 810 } 811 } 812 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); 813 } else { 814 dm->dmub_callback[notify.type](adev, ¬ify); 815 } 816 } while (notify.pending_notification); 817 } 818 819 820 do { 821 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 822 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 823 entry.param0, entry.param1); 824 825 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 826 entry.trace_code, entry.tick_count, entry.param0, entry.param1); 827 } else 828 break; 829 830 count++; 831 832 } while (count <= DMUB_TRACE_MAX_READ); 833 834 if (count > DMUB_TRACE_MAX_READ) 835 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); 836 } 837 838 static int dm_set_clockgating_state(void *handle, 839 enum amd_clockgating_state state) 840 { 841 return 0; 842 } 843 844 static int dm_set_powergating_state(void *handle, 845 enum amd_powergating_state state) 846 { 847 return 0; 848 } 849 850 /* Prototypes of private functions */ 851 static int dm_early_init(void* handle); 852 853 /* Allocate memory for FBC compressed data */ 854 static void amdgpu_dm_fbc_init(struct drm_connector *connector) 855 { 856 struct drm_device *dev = connector->dev; 857 struct amdgpu_device *adev = drm_to_adev(dev); 858 struct dm_compressor_info *compressor = &adev->dm.compressor; 859 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); 860 struct drm_display_mode *mode; 861 unsigned long max_size = 0; 862 863 if (adev->dm.dc->fbc_compressor == NULL) 864 return; 865 866 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) 867 return; 868 869 if (compressor->bo_ptr) 870 return; 871 872 873 list_for_each_entry(mode, &connector->modes, head) { 874 if (max_size < mode->htotal * mode->vtotal) 875 max_size = mode->htotal * mode->vtotal; 876 } 877 878 if (max_size) { 879 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 880 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, 881 &compressor->gpu_addr, &compressor->cpu_addr); 882 883 if (r) 884 DRM_ERROR("DM: Failed to initialize FBC\n"); 885 else { 886 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 887 DRM_INFO("DM: FBC alloc %lu\n", max_size*4); 888 } 889 890 } 891 892 } 893 894 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, 895 int pipe, bool *enabled, 896 unsigned char *buf, int max_bytes) 897 { 898 struct drm_device *dev = dev_get_drvdata(kdev); 899 struct amdgpu_device *adev = drm_to_adev(dev); 900 struct drm_connector *connector; 901 struct drm_connector_list_iter conn_iter; 902 struct amdgpu_dm_connector *aconnector; 903 int ret = 0; 904 905 *enabled = false; 906 907 mutex_lock(&adev->dm.audio_lock); 908 909 drm_connector_list_iter_begin(dev, &conn_iter); 910 drm_for_each_connector_iter(connector, &conn_iter) { 911 aconnector = to_amdgpu_dm_connector(connector); 912 if (aconnector->audio_inst != port) 913 continue; 914 915 *enabled = true; 916 ret = drm_eld_size(connector->eld); 917 memcpy(buf, connector->eld, min(max_bytes, ret)); 918 919 break; 920 } 921 drm_connector_list_iter_end(&conn_iter); 922 923 mutex_unlock(&adev->dm.audio_lock); 924 925 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); 926 927 return ret; 928 } 929 930 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { 931 .get_eld = amdgpu_dm_audio_component_get_eld, 932 }; 933 934 static int amdgpu_dm_audio_component_bind(struct device *kdev, 935 struct device *hda_kdev, void *data) 936 { 937 struct drm_device *dev = dev_get_drvdata(kdev); 938 struct amdgpu_device *adev = drm_to_adev(dev); 939 struct drm_audio_component *acomp = data; 940 941 acomp->ops = &amdgpu_dm_audio_component_ops; 942 acomp->dev = kdev; 943 adev->dm.audio_component = acomp; 944 945 return 0; 946 } 947 948 static void amdgpu_dm_audio_component_unbind(struct device *kdev, 949 struct device *hda_kdev, void *data) 950 { 951 struct drm_device *dev = dev_get_drvdata(kdev); 952 struct amdgpu_device *adev = drm_to_adev(dev); 953 struct drm_audio_component *acomp = data; 954 955 acomp->ops = NULL; 956 acomp->dev = NULL; 957 adev->dm.audio_component = NULL; 958 } 959 960 static const struct component_ops amdgpu_dm_audio_component_bind_ops = { 961 .bind = amdgpu_dm_audio_component_bind, 962 .unbind = amdgpu_dm_audio_component_unbind, 963 }; 964 965 static int amdgpu_dm_audio_init(struct amdgpu_device *adev) 966 { 967 int i, ret; 968 969 if (!amdgpu_audio) 970 return 0; 971 972 adev->mode_info.audio.enabled = true; 973 974 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; 975 976 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 977 adev->mode_info.audio.pin[i].channels = -1; 978 adev->mode_info.audio.pin[i].rate = -1; 979 adev->mode_info.audio.pin[i].bits_per_sample = -1; 980 adev->mode_info.audio.pin[i].status_bits = 0; 981 adev->mode_info.audio.pin[i].category_code = 0; 982 adev->mode_info.audio.pin[i].connected = false; 983 adev->mode_info.audio.pin[i].id = 984 adev->dm.dc->res_pool->audios[i]->inst; 985 adev->mode_info.audio.pin[i].offset = 0; 986 } 987 988 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); 989 if (ret < 0) 990 return ret; 991 992 adev->dm.audio_registered = true; 993 994 return 0; 995 } 996 997 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) 998 { 999 if (!amdgpu_audio) 1000 return; 1001 1002 if (!adev->mode_info.audio.enabled) 1003 return; 1004 1005 if (adev->dm.audio_registered) { 1006 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); 1007 adev->dm.audio_registered = false; 1008 } 1009 1010 /* TODO: Disable audio? */ 1011 1012 adev->mode_info.audio.enabled = false; 1013 } 1014 1015 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) 1016 { 1017 struct drm_audio_component *acomp = adev->dm.audio_component; 1018 1019 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { 1020 DRM_DEBUG_KMS("Notify ELD: %d\n", pin); 1021 1022 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 1023 pin, -1); 1024 } 1025 } 1026 1027 static int dm_dmub_hw_init(struct amdgpu_device *adev) 1028 { 1029 const struct dmcub_firmware_header_v1_0 *hdr; 1030 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1031 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 1032 const struct firmware *dmub_fw = adev->dm.dmub_fw; 1033 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 1034 struct abm *abm = adev->dm.dc->res_pool->abm; 1035 struct dmub_srv_hw_params hw_params; 1036 enum dmub_status status; 1037 const unsigned char *fw_inst_const, *fw_bss_data; 1038 uint32_t i, fw_inst_const_size, fw_bss_data_size; 1039 bool has_hw_support; 1040 1041 if (!dmub_srv) 1042 /* DMUB isn't supported on the ASIC. */ 1043 return 0; 1044 1045 if (!fb_info) { 1046 DRM_ERROR("No framebuffer info for DMUB service.\n"); 1047 return -EINVAL; 1048 } 1049 1050 if (!dmub_fw) { 1051 /* Firmware required for DMUB support. */ 1052 DRM_ERROR("No firmware provided for DMUB.\n"); 1053 return -EINVAL; 1054 } 1055 1056 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); 1057 if (status != DMUB_STATUS_OK) { 1058 DRM_ERROR("Error checking HW support for DMUB: %d\n", status); 1059 return -EINVAL; 1060 } 1061 1062 if (!has_hw_support) { 1063 DRM_INFO("DMUB unsupported on ASIC\n"); 1064 return 0; 1065 } 1066 1067 /* Reset DMCUB if it was previously running - before we overwrite its memory. */ 1068 status = dmub_srv_hw_reset(dmub_srv); 1069 if (status != DMUB_STATUS_OK) 1070 DRM_WARN("Error resetting DMUB HW: %d\n", status); 1071 1072 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 1073 1074 fw_inst_const = dmub_fw->data + 1075 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1076 PSP_HEADER_BYTES; 1077 1078 fw_bss_data = dmub_fw->data + 1079 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1080 le32_to_cpu(hdr->inst_const_bytes); 1081 1082 /* Copy firmware and bios info into FB memory. */ 1083 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1084 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1085 1086 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1087 1088 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, 1089 * amdgpu_ucode_init_single_fw will load dmub firmware 1090 * fw_inst_const part to cw0; otherwise, the firmware back door load 1091 * will be done by dm_dmub_hw_init 1092 */ 1093 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1094 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 1095 fw_inst_const_size); 1096 } 1097 1098 if (fw_bss_data_size) 1099 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, 1100 fw_bss_data, fw_bss_data_size); 1101 1102 /* Copy firmware bios info into FB memory. */ 1103 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, 1104 adev->bios_size); 1105 1106 /* Reset regions that need to be reset. */ 1107 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, 1108 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); 1109 1110 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, 1111 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); 1112 1113 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, 1114 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); 1115 1116 /* Initialize hardware. */ 1117 memset(&hw_params, 0, sizeof(hw_params)); 1118 hw_params.fb_base = adev->gmc.fb_start; 1119 hw_params.fb_offset = adev->gmc.aper_base; 1120 1121 /* backdoor load firmware and trigger dmub running */ 1122 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1123 hw_params.load_inst_const = true; 1124 1125 if (dmcu) 1126 hw_params.psp_version = dmcu->psp_version; 1127 1128 for (i = 0; i < fb_info->num_fb; ++i) 1129 hw_params.fb[i] = &fb_info->fb[i]; 1130 1131 switch (adev->ip_versions[DCE_HWIP][0]) { 1132 case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */ 1133 hw_params.dpia_supported = true; 1134 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; 1135 break; 1136 default: 1137 break; 1138 } 1139 1140 status = dmub_srv_hw_init(dmub_srv, &hw_params); 1141 if (status != DMUB_STATUS_OK) { 1142 DRM_ERROR("Error initializing DMUB HW: %d\n", status); 1143 return -EINVAL; 1144 } 1145 1146 /* Wait for firmware load to finish. */ 1147 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1148 if (status != DMUB_STATUS_OK) 1149 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1150 1151 /* Init DMCU and ABM if available. */ 1152 if (dmcu && abm) { 1153 dmcu->funcs->dmcu_init(dmcu); 1154 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 1155 } 1156 1157 if (!adev->dm.dc->ctx->dmub_srv) 1158 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); 1159 if (!adev->dm.dc->ctx->dmub_srv) { 1160 DRM_ERROR("Couldn't allocate DC DMUB server!\n"); 1161 return -ENOMEM; 1162 } 1163 1164 DRM_INFO("DMUB hardware initialized: version=0x%08X\n", 1165 adev->dm.dmcub_fw_version); 1166 1167 return 0; 1168 } 1169 1170 static void dm_dmub_hw_resume(struct amdgpu_device *adev) 1171 { 1172 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 1173 enum dmub_status status; 1174 bool init; 1175 1176 if (!dmub_srv) { 1177 /* DMUB isn't supported on the ASIC. */ 1178 return; 1179 } 1180 1181 status = dmub_srv_is_hw_init(dmub_srv, &init); 1182 if (status != DMUB_STATUS_OK) 1183 DRM_WARN("DMUB hardware init check failed: %d\n", status); 1184 1185 if (status == DMUB_STATUS_OK && init) { 1186 /* Wait for firmware load to finish. */ 1187 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1188 if (status != DMUB_STATUS_OK) 1189 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1190 } else { 1191 /* Perform the full hardware initialization. */ 1192 dm_dmub_hw_init(adev); 1193 } 1194 } 1195 1196 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 1197 { 1198 uint64_t pt_base; 1199 uint32_t logical_addr_low; 1200 uint32_t logical_addr_high; 1201 uint32_t agp_base, agp_bot, agp_top; 1202 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1203 1204 memset(pa_config, 0, sizeof(*pa_config)); 1205 1206 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; 1207 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 1208 1209 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1210 /* 1211 * Raven2 has a HW issue that it is unable to use the vram which 1212 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1213 * workaround that increase system aperture high address (add 1) 1214 * to get rid of the VM fault and hardware hang. 1215 */ 1216 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); 1217 else 1218 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; 1219 1220 agp_base = 0; 1221 agp_bot = adev->gmc.agp_start >> 24; 1222 agp_top = adev->gmc.agp_end >> 24; 1223 1224 1225 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF; 1226 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12); 1227 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF; 1228 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12); 1229 page_table_base.high_part = upper_32_bits(pt_base) & 0xF; 1230 page_table_base.low_part = lower_32_bits(pt_base); 1231 1232 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; 1233 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; 1234 1235 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ; 1236 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; 1237 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; 1238 1239 pa_config->system_aperture.fb_base = adev->gmc.fb_start; 1240 pa_config->system_aperture.fb_offset = adev->gmc.aper_base; 1241 pa_config->system_aperture.fb_top = adev->gmc.fb_end; 1242 1243 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; 1244 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; 1245 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; 1246 1247 pa_config->is_hvm_enabled = 0; 1248 1249 } 1250 1251 static void vblank_control_worker(struct work_struct *work) 1252 { 1253 struct vblank_control_work *vblank_work = 1254 container_of(work, struct vblank_control_work, work); 1255 struct amdgpu_display_manager *dm = vblank_work->dm; 1256 1257 mutex_lock(&dm->dc_lock); 1258 1259 if (vblank_work->enable) 1260 dm->active_vblank_irq_count++; 1261 else if(dm->active_vblank_irq_count) 1262 dm->active_vblank_irq_count--; 1263 1264 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0); 1265 1266 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); 1267 1268 /* 1269 * Control PSR based on vblank requirements from OS 1270 * 1271 * If panel supports PSR SU, there's no need to disable PSR when OS is 1272 * submitting fast atomic commits (we infer this by whether the OS 1273 * requests vblank events). Fast atomic commits will simply trigger a 1274 * full-frame-update (FFU); a specific case of selective-update (SU) 1275 * where the SU region is the full hactive*vactive region. See 1276 * fill_dc_dirty_rects(). 1277 */ 1278 if (vblank_work->stream && vblank_work->stream->link) { 1279 if (vblank_work->enable) { 1280 if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && 1281 vblank_work->stream->link->psr_settings.psr_allow_active) 1282 amdgpu_dm_psr_disable(vblank_work->stream); 1283 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && 1284 !vblank_work->stream->link->psr_settings.psr_allow_active && 1285 vblank_work->acrtc->dm_irq_params.allow_psr_entry) { 1286 amdgpu_dm_psr_enable(vblank_work->stream); 1287 } 1288 } 1289 1290 mutex_unlock(&dm->dc_lock); 1291 1292 dc_stream_release(vblank_work->stream); 1293 1294 kfree(vblank_work); 1295 } 1296 1297 static void dm_handle_hpd_rx_offload_work(struct work_struct *work) 1298 { 1299 struct hpd_rx_irq_offload_work *offload_work; 1300 struct amdgpu_dm_connector *aconnector; 1301 struct dc_link *dc_link; 1302 struct amdgpu_device *adev; 1303 enum dc_connection_type new_connection_type = dc_connection_none; 1304 unsigned long flags; 1305 1306 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); 1307 aconnector = offload_work->offload_wq->aconnector; 1308 1309 if (!aconnector) { 1310 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); 1311 goto skip; 1312 } 1313 1314 adev = drm_to_adev(aconnector->base.dev); 1315 dc_link = aconnector->dc_link; 1316 1317 mutex_lock(&aconnector->hpd_lock); 1318 if (!dc_link_detect_sink(dc_link, &new_connection_type)) 1319 DRM_ERROR("KMS: Failed to detect connector\n"); 1320 mutex_unlock(&aconnector->hpd_lock); 1321 1322 if (new_connection_type == dc_connection_none) 1323 goto skip; 1324 1325 if (amdgpu_in_reset(adev)) 1326 goto skip; 1327 1328 mutex_lock(&adev->dm.dc_lock); 1329 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) 1330 dc_link_dp_handle_automated_test(dc_link); 1331 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && 1332 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) && 1333 dc_link_dp_allow_hpd_rx_irq(dc_link)) { 1334 dc_link_dp_handle_link_loss(dc_link); 1335 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1336 offload_work->offload_wq->is_handling_link_loss = false; 1337 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1338 } 1339 mutex_unlock(&adev->dm.dc_lock); 1340 1341 skip: 1342 kfree(offload_work); 1343 1344 } 1345 1346 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) 1347 { 1348 int max_caps = dc->caps.max_links; 1349 int i = 0; 1350 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; 1351 1352 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); 1353 1354 if (!hpd_rx_offload_wq) 1355 return NULL; 1356 1357 1358 for (i = 0; i < max_caps; i++) { 1359 hpd_rx_offload_wq[i].wq = 1360 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); 1361 1362 if (hpd_rx_offload_wq[i].wq == NULL) { 1363 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); 1364 return NULL; 1365 } 1366 1367 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); 1368 } 1369 1370 return hpd_rx_offload_wq; 1371 } 1372 1373 struct amdgpu_stutter_quirk { 1374 u16 chip_vendor; 1375 u16 chip_device; 1376 u16 subsys_vendor; 1377 u16 subsys_device; 1378 u8 revision; 1379 }; 1380 1381 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { 1382 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ 1383 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, 1384 { 0, 0, 0, 0, 0 }, 1385 }; 1386 1387 static bool dm_should_disable_stutter(struct pci_dev *pdev) 1388 { 1389 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; 1390 1391 while (p && p->chip_device != 0) { 1392 if (pdev->vendor == p->chip_vendor && 1393 pdev->device == p->chip_device && 1394 pdev->subsystem_vendor == p->subsys_vendor && 1395 pdev->subsystem_device == p->subsys_device && 1396 pdev->revision == p->revision) { 1397 return true; 1398 } 1399 ++p; 1400 } 1401 return false; 1402 } 1403 1404 static int amdgpu_dm_init(struct amdgpu_device *adev) 1405 { 1406 struct dc_init_data init_data; 1407 #ifdef CONFIG_DRM_AMD_DC_HDCP 1408 struct dc_callback_init init_params; 1409 #endif 1410 int r; 1411 1412 adev->dm.ddev = adev_to_drm(adev); 1413 adev->dm.adev = adev; 1414 1415 /* Zero all the fields */ 1416 memset(&init_data, 0, sizeof(init_data)); 1417 #ifdef CONFIG_DRM_AMD_DC_HDCP 1418 memset(&init_params, 0, sizeof(init_params)); 1419 #endif 1420 1421 mutex_init(&adev->dm.dc_lock); 1422 mutex_init(&adev->dm.audio_lock); 1423 spin_lock_init(&adev->dm.vblank_lock); 1424 1425 if(amdgpu_dm_irq_init(adev)) { 1426 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); 1427 goto error; 1428 } 1429 1430 init_data.asic_id.chip_family = adev->family; 1431 1432 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1433 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 1434 init_data.asic_id.chip_id = adev->pdev->device; 1435 1436 init_data.asic_id.vram_width = adev->gmc.vram_width; 1437 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 1438 init_data.asic_id.atombios_base_address = 1439 adev->mode_info.atom_context->bios; 1440 1441 init_data.driver = adev; 1442 1443 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 1444 1445 if (!adev->dm.cgs_device) { 1446 DRM_ERROR("amdgpu: failed to create cgs device.\n"); 1447 goto error; 1448 } 1449 1450 init_data.cgs_device = adev->dm.cgs_device; 1451 1452 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 1453 1454 switch (adev->ip_versions[DCE_HWIP][0]) { 1455 case IP_VERSION(2, 1, 0): 1456 switch (adev->dm.dmcub_fw_version) { 1457 case 0: /* development */ 1458 case 0x1: /* linux-firmware.git hash 6d9f399 */ 1459 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ 1460 init_data.flags.disable_dmcu = false; 1461 break; 1462 default: 1463 init_data.flags.disable_dmcu = true; 1464 } 1465 break; 1466 case IP_VERSION(2, 0, 3): 1467 init_data.flags.disable_dmcu = true; 1468 break; 1469 default: 1470 break; 1471 } 1472 1473 switch (adev->asic_type) { 1474 case CHIP_CARRIZO: 1475 case CHIP_STONEY: 1476 init_data.flags.gpu_vm_support = true; 1477 break; 1478 default: 1479 switch (adev->ip_versions[DCE_HWIP][0]) { 1480 case IP_VERSION(1, 0, 0): 1481 case IP_VERSION(1, 0, 1): 1482 /* enable S/G on PCO and RV2 */ 1483 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || 1484 (adev->apu_flags & AMD_APU_IS_PICASSO)) 1485 init_data.flags.gpu_vm_support = true; 1486 break; 1487 case IP_VERSION(2, 1, 0): 1488 case IP_VERSION(3, 0, 1): 1489 case IP_VERSION(3, 1, 2): 1490 case IP_VERSION(3, 1, 3): 1491 case IP_VERSION(3, 1, 5): 1492 case IP_VERSION(3, 1, 6): 1493 init_data.flags.gpu_vm_support = true; 1494 break; 1495 default: 1496 break; 1497 } 1498 break; 1499 } 1500 1501 if (init_data.flags.gpu_vm_support) 1502 adev->mode_info.gpu_vm_support = true; 1503 1504 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 1505 init_data.flags.fbc_support = true; 1506 1507 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) 1508 init_data.flags.multi_mon_pp_mclk_switch = true; 1509 1510 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) 1511 init_data.flags.disable_fractional_pwm = true; 1512 1513 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) 1514 init_data.flags.edp_no_power_sequencing = true; 1515 1516 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) 1517 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; 1518 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) 1519 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; 1520 1521 init_data.flags.seamless_boot_edp_requested = false; 1522 1523 if (check_seamless_boot_capability(adev)) { 1524 init_data.flags.seamless_boot_edp_requested = true; 1525 init_data.flags.allow_seamless_boot_optimization = true; 1526 DRM_INFO("Seamless boot condition check passed\n"); 1527 } 1528 1529 init_data.flags.enable_mipi_converter_optimization = true; 1530 1531 INIT_LIST_HEAD(&adev->dm.da_list); 1532 /* Display Core create. */ 1533 adev->dm.dc = dc_create(&init_data); 1534 1535 if (adev->dm.dc) { 1536 DRM_INFO("Display Core initialized with v%s!\n", DC_VER); 1537 } else { 1538 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); 1539 goto error; 1540 } 1541 1542 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { 1543 adev->dm.dc->debug.force_single_disp_pipe_split = false; 1544 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 1545 } 1546 1547 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 1548 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 1549 if (dm_should_disable_stutter(adev->pdev)) 1550 adev->dm.dc->debug.disable_stutter = true; 1551 1552 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) 1553 adev->dm.dc->debug.disable_stutter = true; 1554 1555 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { 1556 adev->dm.dc->debug.disable_dsc = true; 1557 adev->dm.dc->debug.disable_dsc_edp = true; 1558 } 1559 1560 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) 1561 adev->dm.dc->debug.disable_clock_gate = true; 1562 1563 r = dm_dmub_hw_init(adev); 1564 if (r) { 1565 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 1566 goto error; 1567 } 1568 1569 dc_hardware_init(adev->dm.dc); 1570 1571 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); 1572 if (!adev->dm.hpd_rx_offload_wq) { 1573 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); 1574 goto error; 1575 } 1576 1577 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { 1578 struct dc_phy_addr_space_config pa_config; 1579 1580 mmhub_read_system_context(adev, &pa_config); 1581 1582 // Call the DC init_memory func 1583 dc_setup_system_context(adev->dm.dc, &pa_config); 1584 } 1585 1586 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 1587 if (!adev->dm.freesync_module) { 1588 DRM_ERROR( 1589 "amdgpu: failed to initialize freesync_module.\n"); 1590 } else 1591 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", 1592 adev->dm.freesync_module); 1593 1594 amdgpu_dm_init_color_mod(); 1595 1596 if (adev->dm.dc->caps.max_links > 0) { 1597 adev->dm.vblank_control_workqueue = 1598 create_singlethread_workqueue("dm_vblank_control_workqueue"); 1599 if (!adev->dm.vblank_control_workqueue) 1600 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); 1601 } 1602 1603 #ifdef CONFIG_DRM_AMD_DC_HDCP 1604 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { 1605 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); 1606 1607 if (!adev->dm.hdcp_workqueue) 1608 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); 1609 else 1610 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); 1611 1612 dc_init_callbacks(adev->dm.dc, &init_params); 1613 } 1614 #endif 1615 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1616 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); 1617 #endif 1618 if (dc_enable_dmub_notifications(adev->dm.dc)) { 1619 init_completion(&adev->dm.dmub_aux_transfer_done); 1620 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); 1621 if (!adev->dm.dmub_notify) { 1622 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); 1623 goto error; 1624 } 1625 1626 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); 1627 if (!adev->dm.delayed_hpd_wq) { 1628 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); 1629 goto error; 1630 } 1631 1632 amdgpu_dm_outbox_init(adev); 1633 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, 1634 dmub_aux_setconfig_callback, false)) { 1635 DRM_ERROR("amdgpu: fail to register dmub aux callback"); 1636 goto error; 1637 } 1638 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { 1639 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 1640 goto error; 1641 } 1642 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { 1643 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 1644 goto error; 1645 } 1646 } 1647 1648 if (amdgpu_dm_initialize_drm_device(adev)) { 1649 DRM_ERROR( 1650 "amdgpu: failed to initialize sw for display support.\n"); 1651 goto error; 1652 } 1653 1654 /* create fake encoders for MST */ 1655 dm_dp_create_fake_mst_encoders(adev); 1656 1657 /* TODO: Add_display_info? */ 1658 1659 /* TODO use dynamic cursor width */ 1660 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 1661 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 1662 1663 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { 1664 DRM_ERROR( 1665 "amdgpu: failed to initialize sw for display support.\n"); 1666 goto error; 1667 } 1668 1669 1670 DRM_DEBUG_DRIVER("KMS initialized.\n"); 1671 1672 return 0; 1673 error: 1674 amdgpu_dm_fini(adev); 1675 1676 return -EINVAL; 1677 } 1678 1679 static int amdgpu_dm_early_fini(void *handle) 1680 { 1681 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1682 1683 amdgpu_dm_audio_fini(adev); 1684 1685 return 0; 1686 } 1687 1688 static void amdgpu_dm_fini(struct amdgpu_device *adev) 1689 { 1690 int i; 1691 1692 if (adev->dm.vblank_control_workqueue) { 1693 destroy_workqueue(adev->dm.vblank_control_workqueue); 1694 adev->dm.vblank_control_workqueue = NULL; 1695 } 1696 1697 for (i = 0; i < adev->dm.display_indexes_num; i++) { 1698 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); 1699 } 1700 1701 amdgpu_dm_destroy_drm_device(&adev->dm); 1702 1703 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1704 if (adev->dm.crc_rd_wrk) { 1705 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); 1706 kfree(adev->dm.crc_rd_wrk); 1707 adev->dm.crc_rd_wrk = NULL; 1708 } 1709 #endif 1710 #ifdef CONFIG_DRM_AMD_DC_HDCP 1711 if (adev->dm.hdcp_workqueue) { 1712 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); 1713 adev->dm.hdcp_workqueue = NULL; 1714 } 1715 1716 if (adev->dm.dc) 1717 dc_deinit_callbacks(adev->dm.dc); 1718 #endif 1719 1720 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 1721 1722 if (dc_enable_dmub_notifications(adev->dm.dc)) { 1723 kfree(adev->dm.dmub_notify); 1724 adev->dm.dmub_notify = NULL; 1725 destroy_workqueue(adev->dm.delayed_hpd_wq); 1726 adev->dm.delayed_hpd_wq = NULL; 1727 } 1728 1729 if (adev->dm.dmub_bo) 1730 amdgpu_bo_free_kernel(&adev->dm.dmub_bo, 1731 &adev->dm.dmub_bo_gpu_addr, 1732 &adev->dm.dmub_bo_cpu_addr); 1733 1734 if (adev->dm.hpd_rx_offload_wq) { 1735 for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 1736 if (adev->dm.hpd_rx_offload_wq[i].wq) { 1737 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); 1738 adev->dm.hpd_rx_offload_wq[i].wq = NULL; 1739 } 1740 } 1741 1742 kfree(adev->dm.hpd_rx_offload_wq); 1743 adev->dm.hpd_rx_offload_wq = NULL; 1744 } 1745 1746 /* DC Destroy TODO: Replace destroy DAL */ 1747 if (adev->dm.dc) 1748 dc_destroy(&adev->dm.dc); 1749 /* 1750 * TODO: pageflip, vlank interrupt 1751 * 1752 * amdgpu_dm_irq_fini(adev); 1753 */ 1754 1755 if (adev->dm.cgs_device) { 1756 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 1757 adev->dm.cgs_device = NULL; 1758 } 1759 if (adev->dm.freesync_module) { 1760 mod_freesync_destroy(adev->dm.freesync_module); 1761 adev->dm.freesync_module = NULL; 1762 } 1763 1764 mutex_destroy(&adev->dm.audio_lock); 1765 mutex_destroy(&adev->dm.dc_lock); 1766 1767 return; 1768 } 1769 1770 static int load_dmcu_fw(struct amdgpu_device *adev) 1771 { 1772 const char *fw_name_dmcu = NULL; 1773 int r; 1774 const struct dmcu_firmware_header_v1_0 *hdr; 1775 1776 switch(adev->asic_type) { 1777 #if defined(CONFIG_DRM_AMD_DC_SI) 1778 case CHIP_TAHITI: 1779 case CHIP_PITCAIRN: 1780 case CHIP_VERDE: 1781 case CHIP_OLAND: 1782 #endif 1783 case CHIP_BONAIRE: 1784 case CHIP_HAWAII: 1785 case CHIP_KAVERI: 1786 case CHIP_KABINI: 1787 case CHIP_MULLINS: 1788 case CHIP_TONGA: 1789 case CHIP_FIJI: 1790 case CHIP_CARRIZO: 1791 case CHIP_STONEY: 1792 case CHIP_POLARIS11: 1793 case CHIP_POLARIS10: 1794 case CHIP_POLARIS12: 1795 case CHIP_VEGAM: 1796 case CHIP_VEGA10: 1797 case CHIP_VEGA12: 1798 case CHIP_VEGA20: 1799 return 0; 1800 case CHIP_NAVI12: 1801 fw_name_dmcu = FIRMWARE_NAVI12_DMCU; 1802 break; 1803 case CHIP_RAVEN: 1804 if (ASICREV_IS_PICASSO(adev->external_rev_id)) 1805 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1806 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) 1807 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1808 else 1809 return 0; 1810 break; 1811 default: 1812 switch (adev->ip_versions[DCE_HWIP][0]) { 1813 case IP_VERSION(2, 0, 2): 1814 case IP_VERSION(2, 0, 3): 1815 case IP_VERSION(2, 0, 0): 1816 case IP_VERSION(2, 1, 0): 1817 case IP_VERSION(3, 0, 0): 1818 case IP_VERSION(3, 0, 2): 1819 case IP_VERSION(3, 0, 3): 1820 case IP_VERSION(3, 0, 1): 1821 case IP_VERSION(3, 1, 2): 1822 case IP_VERSION(3, 1, 3): 1823 case IP_VERSION(3, 1, 5): 1824 case IP_VERSION(3, 1, 6): 1825 case IP_VERSION(3, 2, 0): 1826 case IP_VERSION(3, 2, 1): 1827 return 0; 1828 default: 1829 break; 1830 } 1831 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 1832 return -EINVAL; 1833 } 1834 1835 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1836 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); 1837 return 0; 1838 } 1839 1840 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev); 1841 if (r == -ENOENT) { 1842 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ 1843 DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); 1844 adev->dm.fw_dmcu = NULL; 1845 return 0; 1846 } 1847 if (r) { 1848 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n", 1849 fw_name_dmcu); 1850 return r; 1851 } 1852 1853 r = amdgpu_ucode_validate(adev->dm.fw_dmcu); 1854 if (r) { 1855 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", 1856 fw_name_dmcu); 1857 release_firmware(adev->dm.fw_dmcu); 1858 adev->dm.fw_dmcu = NULL; 1859 return r; 1860 } 1861 1862 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; 1863 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; 1864 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; 1865 adev->firmware.fw_size += 1866 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 1867 1868 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; 1869 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; 1870 adev->firmware.fw_size += 1871 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 1872 1873 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); 1874 1875 DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); 1876 1877 return 0; 1878 } 1879 1880 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) 1881 { 1882 struct amdgpu_device *adev = ctx; 1883 1884 return dm_read_reg(adev->dm.dc->ctx, address); 1885 } 1886 1887 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, 1888 uint32_t value) 1889 { 1890 struct amdgpu_device *adev = ctx; 1891 1892 return dm_write_reg(adev->dm.dc->ctx, address, value); 1893 } 1894 1895 static int dm_dmub_sw_init(struct amdgpu_device *adev) 1896 { 1897 struct dmub_srv_create_params create_params; 1898 struct dmub_srv_region_params region_params; 1899 struct dmub_srv_region_info region_info; 1900 struct dmub_srv_fb_params fb_params; 1901 struct dmub_srv_fb_info *fb_info; 1902 struct dmub_srv *dmub_srv; 1903 const struct dmcub_firmware_header_v1_0 *hdr; 1904 const char *fw_name_dmub; 1905 enum dmub_asic dmub_asic; 1906 enum dmub_status status; 1907 int r; 1908 1909 switch (adev->ip_versions[DCE_HWIP][0]) { 1910 case IP_VERSION(2, 1, 0): 1911 dmub_asic = DMUB_ASIC_DCN21; 1912 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 1913 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 1914 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; 1915 break; 1916 case IP_VERSION(3, 0, 0): 1917 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) { 1918 dmub_asic = DMUB_ASIC_DCN30; 1919 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 1920 } else { 1921 dmub_asic = DMUB_ASIC_DCN30; 1922 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; 1923 } 1924 break; 1925 case IP_VERSION(3, 0, 1): 1926 dmub_asic = DMUB_ASIC_DCN301; 1927 fw_name_dmub = FIRMWARE_VANGOGH_DMUB; 1928 break; 1929 case IP_VERSION(3, 0, 2): 1930 dmub_asic = DMUB_ASIC_DCN302; 1931 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; 1932 break; 1933 case IP_VERSION(3, 0, 3): 1934 dmub_asic = DMUB_ASIC_DCN303; 1935 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; 1936 break; 1937 case IP_VERSION(3, 1, 2): 1938 case IP_VERSION(3, 1, 3): 1939 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; 1940 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; 1941 break; 1942 case IP_VERSION(3, 1, 5): 1943 dmub_asic = DMUB_ASIC_DCN315; 1944 fw_name_dmub = FIRMWARE_DCN_315_DMUB; 1945 break; 1946 case IP_VERSION(3, 1, 6): 1947 dmub_asic = DMUB_ASIC_DCN316; 1948 fw_name_dmub = FIRMWARE_DCN316_DMUB; 1949 break; 1950 case IP_VERSION(3, 2, 0): 1951 dmub_asic = DMUB_ASIC_DCN32; 1952 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; 1953 break; 1954 case IP_VERSION(3, 2, 1): 1955 dmub_asic = DMUB_ASIC_DCN321; 1956 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; 1957 break; 1958 default: 1959 /* ASIC doesn't support DMUB. */ 1960 return 0; 1961 } 1962 1963 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); 1964 if (r) { 1965 DRM_ERROR("DMUB firmware loading failed: %d\n", r); 1966 return 0; 1967 } 1968 1969 r = amdgpu_ucode_validate(adev->dm.dmub_fw); 1970 if (r) { 1971 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r); 1972 return 0; 1973 } 1974 1975 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; 1976 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); 1977 1978 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1979 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = 1980 AMDGPU_UCODE_ID_DMCUB; 1981 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = 1982 adev->dm.dmub_fw; 1983 adev->firmware.fw_size += 1984 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); 1985 1986 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", 1987 adev->dm.dmcub_fw_version); 1988 } 1989 1990 1991 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 1992 dmub_srv = adev->dm.dmub_srv; 1993 1994 if (!dmub_srv) { 1995 DRM_ERROR("Failed to allocate DMUB service!\n"); 1996 return -ENOMEM; 1997 } 1998 1999 memset(&create_params, 0, sizeof(create_params)); 2000 create_params.user_ctx = adev; 2001 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 2002 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 2003 create_params.asic = dmub_asic; 2004 2005 /* Create the DMUB service. */ 2006 status = dmub_srv_create(dmub_srv, &create_params); 2007 if (status != DMUB_STATUS_OK) { 2008 DRM_ERROR("Error creating DMUB service: %d\n", status); 2009 return -EINVAL; 2010 } 2011 2012 /* Calculate the size of all the regions for the DMUB service. */ 2013 memset(®ion_params, 0, sizeof(region_params)); 2014 2015 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 2016 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 2017 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 2018 region_params.vbios_size = adev->bios_size; 2019 region_params.fw_bss_data = region_params.bss_data_size ? 2020 adev->dm.dmub_fw->data + 2021 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2022 le32_to_cpu(hdr->inst_const_bytes) : NULL; 2023 region_params.fw_inst_const = 2024 adev->dm.dmub_fw->data + 2025 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 2026 PSP_HEADER_BYTES; 2027 2028 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, 2029 ®ion_info); 2030 2031 if (status != DMUB_STATUS_OK) { 2032 DRM_ERROR("Error calculating DMUB region info: %d\n", status); 2033 return -EINVAL; 2034 } 2035 2036 /* 2037 * Allocate a framebuffer based on the total size of all the regions. 2038 * TODO: Move this into GART. 2039 */ 2040 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 2041 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, 2042 &adev->dm.dmub_bo_gpu_addr, 2043 &adev->dm.dmub_bo_cpu_addr); 2044 if (r) 2045 return r; 2046 2047 /* Rebase the regions on the framebuffer address. */ 2048 memset(&fb_params, 0, sizeof(fb_params)); 2049 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; 2050 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; 2051 fb_params.region_info = ®ion_info; 2052 2053 adev->dm.dmub_fb_info = 2054 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); 2055 fb_info = adev->dm.dmub_fb_info; 2056 2057 if (!fb_info) { 2058 DRM_ERROR( 2059 "Failed to allocate framebuffer info for DMUB service!\n"); 2060 return -ENOMEM; 2061 } 2062 2063 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info); 2064 if (status != DMUB_STATUS_OK) { 2065 DRM_ERROR("Error calculating DMUB FB info: %d\n", status); 2066 return -EINVAL; 2067 } 2068 2069 return 0; 2070 } 2071 2072 static int dm_sw_init(void *handle) 2073 { 2074 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2075 int r; 2076 2077 r = dm_dmub_sw_init(adev); 2078 if (r) 2079 return r; 2080 2081 return load_dmcu_fw(adev); 2082 } 2083 2084 static int dm_sw_fini(void *handle) 2085 { 2086 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2087 2088 kfree(adev->dm.dmub_fb_info); 2089 adev->dm.dmub_fb_info = NULL; 2090 2091 if (adev->dm.dmub_srv) { 2092 dmub_srv_destroy(adev->dm.dmub_srv); 2093 adev->dm.dmub_srv = NULL; 2094 } 2095 2096 release_firmware(adev->dm.dmub_fw); 2097 adev->dm.dmub_fw = NULL; 2098 2099 release_firmware(adev->dm.fw_dmcu); 2100 adev->dm.fw_dmcu = NULL; 2101 2102 return 0; 2103 } 2104 2105 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 2106 { 2107 struct amdgpu_dm_connector *aconnector; 2108 struct drm_connector *connector; 2109 struct drm_connector_list_iter iter; 2110 int ret = 0; 2111 2112 drm_connector_list_iter_begin(dev, &iter); 2113 drm_for_each_connector_iter(connector, &iter) { 2114 aconnector = to_amdgpu_dm_connector(connector); 2115 if (aconnector->dc_link->type == dc_connection_mst_branch && 2116 aconnector->mst_mgr.aux) { 2117 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", 2118 aconnector, 2119 aconnector->base.base.id); 2120 2121 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 2122 if (ret < 0) { 2123 DRM_ERROR("DM_MST: Failed to start MST\n"); 2124 aconnector->dc_link->type = 2125 dc_connection_single; 2126 break; 2127 } 2128 } 2129 } 2130 drm_connector_list_iter_end(&iter); 2131 2132 return ret; 2133 } 2134 2135 static int dm_late_init(void *handle) 2136 { 2137 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2138 2139 struct dmcu_iram_parameters params; 2140 unsigned int linear_lut[16]; 2141 int i; 2142 struct dmcu *dmcu = NULL; 2143 2144 dmcu = adev->dm.dc->res_pool->dmcu; 2145 2146 for (i = 0; i < 16; i++) 2147 linear_lut[i] = 0xFFFF * i / 15; 2148 2149 params.set = 0; 2150 params.backlight_ramping_override = false; 2151 params.backlight_ramping_start = 0xCCCC; 2152 params.backlight_ramping_reduction = 0xCCCCCCCC; 2153 params.backlight_lut_array_size = 16; 2154 params.backlight_lut_array = linear_lut; 2155 2156 /* Min backlight level after ABM reduction, Don't allow below 1% 2157 * 0xFFFF x 0.01 = 0x28F 2158 */ 2159 params.min_abm_backlight = 0x28F; 2160 /* In the case where abm is implemented on dmcub, 2161 * dmcu object will be null. 2162 * ABM 2.4 and up are implemented on dmcub. 2163 */ 2164 if (dmcu) { 2165 if (!dmcu_load_iram(dmcu, params)) 2166 return -EINVAL; 2167 } else if (adev->dm.dc->ctx->dmub_srv) { 2168 struct dc_link *edp_links[MAX_NUM_EDP]; 2169 int edp_num; 2170 2171 get_edp_links(adev->dm.dc, edp_links, &edp_num); 2172 for (i = 0; i < edp_num; i++) { 2173 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) 2174 return -EINVAL; 2175 } 2176 } 2177 2178 return detect_mst_link_for_all_connectors(adev_to_drm(adev)); 2179 } 2180 2181 static void s3_handle_mst(struct drm_device *dev, bool suspend) 2182 { 2183 struct amdgpu_dm_connector *aconnector; 2184 struct drm_connector *connector; 2185 struct drm_connector_list_iter iter; 2186 struct drm_dp_mst_topology_mgr *mgr; 2187 int ret; 2188 bool need_hotplug = false; 2189 2190 drm_connector_list_iter_begin(dev, &iter); 2191 drm_for_each_connector_iter(connector, &iter) { 2192 aconnector = to_amdgpu_dm_connector(connector); 2193 if (aconnector->dc_link->type != dc_connection_mst_branch || 2194 aconnector->mst_port) 2195 continue; 2196 2197 mgr = &aconnector->mst_mgr; 2198 2199 if (suspend) { 2200 drm_dp_mst_topology_mgr_suspend(mgr); 2201 } else { 2202 ret = drm_dp_mst_topology_mgr_resume(mgr, true); 2203 if (ret < 0) { 2204 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, 2205 aconnector->dc_link); 2206 need_hotplug = true; 2207 } 2208 } 2209 } 2210 drm_connector_list_iter_end(&iter); 2211 2212 if (need_hotplug) 2213 drm_kms_helper_hotplug_event(dev); 2214 } 2215 2216 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) 2217 { 2218 int ret = 0; 2219 2220 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends 2221 * on window driver dc implementation. 2222 * For Navi1x, clock settings of dcn watermarks are fixed. the settings 2223 * should be passed to smu during boot up and resume from s3. 2224 * boot up: dc calculate dcn watermark clock settings within dc_create, 2225 * dcn20_resource_construct 2226 * then call pplib functions below to pass the settings to smu: 2227 * smu_set_watermarks_for_clock_ranges 2228 * smu_set_watermarks_table 2229 * navi10_set_watermarks_table 2230 * smu_write_watermarks_table 2231 * 2232 * For Renoir, clock settings of dcn watermark are also fixed values. 2233 * dc has implemented different flow for window driver: 2234 * dc_hardware_init / dc_set_power_state 2235 * dcn10_init_hw 2236 * notify_wm_ranges 2237 * set_wm_ranges 2238 * -- Linux 2239 * smu_set_watermarks_for_clock_ranges 2240 * renoir_set_watermarks_table 2241 * smu_write_watermarks_table 2242 * 2243 * For Linux, 2244 * dc_hardware_init -> amdgpu_dm_init 2245 * dc_set_power_state --> dm_resume 2246 * 2247 * therefore, this function apply to navi10/12/14 but not Renoir 2248 * * 2249 */ 2250 switch (adev->ip_versions[DCE_HWIP][0]) { 2251 case IP_VERSION(2, 0, 2): 2252 case IP_VERSION(2, 0, 0): 2253 break; 2254 default: 2255 return 0; 2256 } 2257 2258 ret = amdgpu_dpm_write_watermarks_table(adev); 2259 if (ret) { 2260 DRM_ERROR("Failed to update WMTABLE!\n"); 2261 return ret; 2262 } 2263 2264 return 0; 2265 } 2266 2267 /** 2268 * dm_hw_init() - Initialize DC device 2269 * @handle: The base driver device containing the amdgpu_dm device. 2270 * 2271 * Initialize the &struct amdgpu_display_manager device. This involves calling 2272 * the initializers of each DM component, then populating the struct with them. 2273 * 2274 * Although the function implies hardware initialization, both hardware and 2275 * software are initialized here. Splitting them out to their relevant init 2276 * hooks is a future TODO item. 2277 * 2278 * Some notable things that are initialized here: 2279 * 2280 * - Display Core, both software and hardware 2281 * - DC modules that we need (freesync and color management) 2282 * - DRM software states 2283 * - Interrupt sources and handlers 2284 * - Vblank support 2285 * - Debug FS entries, if enabled 2286 */ 2287 static int dm_hw_init(void *handle) 2288 { 2289 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2290 /* Create DAL display manager */ 2291 amdgpu_dm_init(adev); 2292 amdgpu_dm_hpd_init(adev); 2293 2294 return 0; 2295 } 2296 2297 /** 2298 * dm_hw_fini() - Teardown DC device 2299 * @handle: The base driver device containing the amdgpu_dm device. 2300 * 2301 * Teardown components within &struct amdgpu_display_manager that require 2302 * cleanup. This involves cleaning up the DRM device, DC, and any modules that 2303 * were loaded. Also flush IRQ workqueues and disable them. 2304 */ 2305 static int dm_hw_fini(void *handle) 2306 { 2307 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2308 2309 amdgpu_dm_hpd_fini(adev); 2310 2311 amdgpu_dm_irq_fini(adev); 2312 amdgpu_dm_fini(adev); 2313 return 0; 2314 } 2315 2316 2317 static int dm_enable_vblank(struct drm_crtc *crtc); 2318 static void dm_disable_vblank(struct drm_crtc *crtc); 2319 2320 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, 2321 struct dc_state *state, bool enable) 2322 { 2323 enum dc_irq_source irq_source; 2324 struct amdgpu_crtc *acrtc; 2325 int rc = -EBUSY; 2326 int i = 0; 2327 2328 for (i = 0; i < state->stream_count; i++) { 2329 acrtc = get_crtc_by_otg_inst( 2330 adev, state->stream_status[i].primary_otg_inst); 2331 2332 if (acrtc && state->stream_status[i].plane_count != 0) { 2333 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; 2334 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 2335 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", 2336 acrtc->crtc_id, enable ? "en" : "dis", rc); 2337 if (rc) 2338 DRM_WARN("Failed to %s pflip interrupts\n", 2339 enable ? "enable" : "disable"); 2340 2341 if (enable) { 2342 rc = dm_enable_vblank(&acrtc->base); 2343 if (rc) 2344 DRM_WARN("Failed to enable vblank interrupts\n"); 2345 } else { 2346 dm_disable_vblank(&acrtc->base); 2347 } 2348 2349 } 2350 } 2351 2352 } 2353 2354 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) 2355 { 2356 struct dc_state *context = NULL; 2357 enum dc_status res = DC_ERROR_UNEXPECTED; 2358 int i; 2359 struct dc_stream_state *del_streams[MAX_PIPES]; 2360 int del_streams_count = 0; 2361 2362 memset(del_streams, 0, sizeof(del_streams)); 2363 2364 context = dc_create_state(dc); 2365 if (context == NULL) 2366 goto context_alloc_fail; 2367 2368 dc_resource_state_copy_construct_current(dc, context); 2369 2370 /* First remove from context all streams */ 2371 for (i = 0; i < context->stream_count; i++) { 2372 struct dc_stream_state *stream = context->streams[i]; 2373 2374 del_streams[del_streams_count++] = stream; 2375 } 2376 2377 /* Remove all planes for removed streams and then remove the streams */ 2378 for (i = 0; i < del_streams_count; i++) { 2379 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { 2380 res = DC_FAIL_DETACH_SURFACES; 2381 goto fail; 2382 } 2383 2384 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); 2385 if (res != DC_OK) 2386 goto fail; 2387 } 2388 2389 res = dc_commit_state(dc, context); 2390 2391 fail: 2392 dc_release_state(context); 2393 2394 context_alloc_fail: 2395 return res; 2396 } 2397 2398 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) 2399 { 2400 int i; 2401 2402 if (dm->hpd_rx_offload_wq) { 2403 for (i = 0; i < dm->dc->caps.max_links; i++) 2404 flush_workqueue(dm->hpd_rx_offload_wq[i].wq); 2405 } 2406 } 2407 2408 static int dm_suspend(void *handle) 2409 { 2410 struct amdgpu_device *adev = handle; 2411 struct amdgpu_display_manager *dm = &adev->dm; 2412 int ret = 0; 2413 2414 if (amdgpu_in_reset(adev)) { 2415 mutex_lock(&dm->dc_lock); 2416 2417 dc_allow_idle_optimizations(adev->dm.dc, false); 2418 2419 dm->cached_dc_state = dc_copy_state(dm->dc->current_state); 2420 2421 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); 2422 2423 amdgpu_dm_commit_zero_streams(dm->dc); 2424 2425 amdgpu_dm_irq_suspend(adev); 2426 2427 hpd_rx_irq_work_suspend(dm); 2428 2429 return ret; 2430 } 2431 2432 WARN_ON(adev->dm.cached_state); 2433 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); 2434 2435 s3_handle_mst(adev_to_drm(adev), true); 2436 2437 amdgpu_dm_irq_suspend(adev); 2438 2439 hpd_rx_irq_work_suspend(dm); 2440 2441 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 2442 2443 return 0; 2444 } 2445 2446 struct amdgpu_dm_connector * 2447 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 2448 struct drm_crtc *crtc) 2449 { 2450 uint32_t i; 2451 struct drm_connector_state *new_con_state; 2452 struct drm_connector *connector; 2453 struct drm_crtc *crtc_from_state; 2454 2455 for_each_new_connector_in_state(state, connector, new_con_state, i) { 2456 crtc_from_state = new_con_state->crtc; 2457 2458 if (crtc_from_state == crtc) 2459 return to_amdgpu_dm_connector(connector); 2460 } 2461 2462 return NULL; 2463 } 2464 2465 static void emulated_link_detect(struct dc_link *link) 2466 { 2467 struct dc_sink_init_data sink_init_data = { 0 }; 2468 struct display_sink_capability sink_caps = { 0 }; 2469 enum dc_edid_status edid_status; 2470 struct dc_context *dc_ctx = link->ctx; 2471 struct dc_sink *sink = NULL; 2472 struct dc_sink *prev_sink = NULL; 2473 2474 link->type = dc_connection_none; 2475 prev_sink = link->local_sink; 2476 2477 if (prev_sink) 2478 dc_sink_release(prev_sink); 2479 2480 switch (link->connector_signal) { 2481 case SIGNAL_TYPE_HDMI_TYPE_A: { 2482 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2483 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; 2484 break; 2485 } 2486 2487 case SIGNAL_TYPE_DVI_SINGLE_LINK: { 2488 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2489 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 2490 break; 2491 } 2492 2493 case SIGNAL_TYPE_DVI_DUAL_LINK: { 2494 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2495 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; 2496 break; 2497 } 2498 2499 case SIGNAL_TYPE_LVDS: { 2500 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2501 sink_caps.signal = SIGNAL_TYPE_LVDS; 2502 break; 2503 } 2504 2505 case SIGNAL_TYPE_EDP: { 2506 sink_caps.transaction_type = 2507 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2508 sink_caps.signal = SIGNAL_TYPE_EDP; 2509 break; 2510 } 2511 2512 case SIGNAL_TYPE_DISPLAY_PORT: { 2513 sink_caps.transaction_type = 2514 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2515 sink_caps.signal = SIGNAL_TYPE_VIRTUAL; 2516 break; 2517 } 2518 2519 default: 2520 DC_ERROR("Invalid connector type! signal:%d\n", 2521 link->connector_signal); 2522 return; 2523 } 2524 2525 sink_init_data.link = link; 2526 sink_init_data.sink_signal = sink_caps.signal; 2527 2528 sink = dc_sink_create(&sink_init_data); 2529 if (!sink) { 2530 DC_ERROR("Failed to create sink!\n"); 2531 return; 2532 } 2533 2534 /* dc_sink_create returns a new reference */ 2535 link->local_sink = sink; 2536 2537 edid_status = dm_helpers_read_local_edid( 2538 link->ctx, 2539 link, 2540 sink); 2541 2542 if (edid_status != EDID_OK) 2543 DC_ERROR("Failed to read EDID"); 2544 2545 } 2546 2547 static void dm_gpureset_commit_state(struct dc_state *dc_state, 2548 struct amdgpu_display_manager *dm) 2549 { 2550 struct { 2551 struct dc_surface_update surface_updates[MAX_SURFACES]; 2552 struct dc_plane_info plane_infos[MAX_SURFACES]; 2553 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 2554 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 2555 struct dc_stream_update stream_update; 2556 } * bundle; 2557 int k, m; 2558 2559 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 2560 2561 if (!bundle) { 2562 dm_error("Failed to allocate update bundle\n"); 2563 goto cleanup; 2564 } 2565 2566 for (k = 0; k < dc_state->stream_count; k++) { 2567 bundle->stream_update.stream = dc_state->streams[k]; 2568 2569 for (m = 0; m < dc_state->stream_status->plane_count; m++) { 2570 bundle->surface_updates[m].surface = 2571 dc_state->stream_status->plane_states[m]; 2572 bundle->surface_updates[m].surface->force_full_update = 2573 true; 2574 } 2575 dc_commit_updates_for_stream( 2576 dm->dc, bundle->surface_updates, 2577 dc_state->stream_status->plane_count, 2578 dc_state->streams[k], &bundle->stream_update, dc_state); 2579 } 2580 2581 cleanup: 2582 kfree(bundle); 2583 2584 return; 2585 } 2586 2587 static int dm_resume(void *handle) 2588 { 2589 struct amdgpu_device *adev = handle; 2590 struct drm_device *ddev = adev_to_drm(adev); 2591 struct amdgpu_display_manager *dm = &adev->dm; 2592 struct amdgpu_dm_connector *aconnector; 2593 struct drm_connector *connector; 2594 struct drm_connector_list_iter iter; 2595 struct drm_crtc *crtc; 2596 struct drm_crtc_state *new_crtc_state; 2597 struct dm_crtc_state *dm_new_crtc_state; 2598 struct drm_plane *plane; 2599 struct drm_plane_state *new_plane_state; 2600 struct dm_plane_state *dm_new_plane_state; 2601 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 2602 enum dc_connection_type new_connection_type = dc_connection_none; 2603 struct dc_state *dc_state; 2604 int i, r, j; 2605 2606 if (amdgpu_in_reset(adev)) { 2607 dc_state = dm->cached_dc_state; 2608 2609 /* 2610 * The dc->current_state is backed up into dm->cached_dc_state 2611 * before we commit 0 streams. 2612 * 2613 * DC will clear link encoder assignments on the real state 2614 * but the changes won't propagate over to the copy we made 2615 * before the 0 streams commit. 2616 * 2617 * DC expects that link encoder assignments are *not* valid 2618 * when committing a state, so as a workaround we can copy 2619 * off of the current state. 2620 * 2621 * We lose the previous assignments, but we had already 2622 * commit 0 streams anyway. 2623 */ 2624 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); 2625 2626 if (dc_enable_dmub_notifications(adev->dm.dc)) 2627 amdgpu_dm_outbox_init(adev); 2628 2629 r = dm_dmub_hw_init(adev); 2630 if (r) 2631 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 2632 2633 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2634 dc_resume(dm->dc); 2635 2636 amdgpu_dm_irq_resume_early(adev); 2637 2638 for (i = 0; i < dc_state->stream_count; i++) { 2639 dc_state->streams[i]->mode_changed = true; 2640 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { 2641 dc_state->stream_status[i].plane_states[j]->update_flags.raw 2642 = 0xffffffff; 2643 } 2644 } 2645 2646 WARN_ON(!dc_commit_state(dm->dc, dc_state)); 2647 2648 dm_gpureset_commit_state(dm->cached_dc_state, dm); 2649 2650 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); 2651 2652 dc_release_state(dm->cached_dc_state); 2653 dm->cached_dc_state = NULL; 2654 2655 amdgpu_dm_irq_resume_late(adev); 2656 2657 mutex_unlock(&dm->dc_lock); 2658 2659 return 0; 2660 } 2661 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 2662 dc_release_state(dm_state->context); 2663 dm_state->context = dc_create_state(dm->dc); 2664 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 2665 dc_resource_state_construct(dm->dc, dm_state->context); 2666 2667 /* Re-enable outbox interrupts for DPIA. */ 2668 if (dc_enable_dmub_notifications(adev->dm.dc)) 2669 amdgpu_dm_outbox_init(adev); 2670 2671 /* Before powering on DC we need to re-initialize DMUB. */ 2672 dm_dmub_hw_resume(adev); 2673 2674 /* power on hardware */ 2675 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2676 2677 /* program HPD filter */ 2678 dc_resume(dm->dc); 2679 2680 /* 2681 * early enable HPD Rx IRQ, should be done before set mode as short 2682 * pulse interrupts are used for MST 2683 */ 2684 amdgpu_dm_irq_resume_early(adev); 2685 2686 /* On resume we need to rewrite the MSTM control bits to enable MST*/ 2687 s3_handle_mst(ddev, false); 2688 2689 /* Do detection*/ 2690 drm_connector_list_iter_begin(ddev, &iter); 2691 drm_for_each_connector_iter(connector, &iter) { 2692 aconnector = to_amdgpu_dm_connector(connector); 2693 2694 /* 2695 * this is the case when traversing through already created 2696 * MST connectors, should be skipped 2697 */ 2698 if (aconnector->dc_link && 2699 aconnector->dc_link->type == dc_connection_mst_branch) 2700 continue; 2701 2702 mutex_lock(&aconnector->hpd_lock); 2703 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) 2704 DRM_ERROR("KMS: Failed to detect connector\n"); 2705 2706 if (aconnector->base.force && new_connection_type == dc_connection_none) 2707 emulated_link_detect(aconnector->dc_link); 2708 else 2709 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 2710 2711 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 2712 aconnector->fake_enable = false; 2713 2714 if (aconnector->dc_sink) 2715 dc_sink_release(aconnector->dc_sink); 2716 aconnector->dc_sink = NULL; 2717 amdgpu_dm_update_connector_after_detect(aconnector); 2718 mutex_unlock(&aconnector->hpd_lock); 2719 } 2720 drm_connector_list_iter_end(&iter); 2721 2722 /* Force mode set in atomic commit */ 2723 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) 2724 new_crtc_state->active_changed = true; 2725 2726 /* 2727 * atomic_check is expected to create the dc states. We need to release 2728 * them here, since they were duplicated as part of the suspend 2729 * procedure. 2730 */ 2731 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 2732 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 2733 if (dm_new_crtc_state->stream) { 2734 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 2735 dc_stream_release(dm_new_crtc_state->stream); 2736 dm_new_crtc_state->stream = NULL; 2737 } 2738 } 2739 2740 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { 2741 dm_new_plane_state = to_dm_plane_state(new_plane_state); 2742 if (dm_new_plane_state->dc_state) { 2743 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 2744 dc_plane_state_release(dm_new_plane_state->dc_state); 2745 dm_new_plane_state->dc_state = NULL; 2746 } 2747 } 2748 2749 drm_atomic_helper_resume(ddev, dm->cached_state); 2750 2751 dm->cached_state = NULL; 2752 2753 amdgpu_dm_irq_resume_late(adev); 2754 2755 amdgpu_dm_smu_write_watermarks_table(adev); 2756 2757 return 0; 2758 } 2759 2760 /** 2761 * DOC: DM Lifecycle 2762 * 2763 * DM (and consequently DC) is registered in the amdgpu base driver as a IP 2764 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to 2765 * the base driver's device list to be initialized and torn down accordingly. 2766 * 2767 * The functions to do so are provided as hooks in &struct amd_ip_funcs. 2768 */ 2769 2770 static const struct amd_ip_funcs amdgpu_dm_funcs = { 2771 .name = "dm", 2772 .early_init = dm_early_init, 2773 .late_init = dm_late_init, 2774 .sw_init = dm_sw_init, 2775 .sw_fini = dm_sw_fini, 2776 .early_fini = amdgpu_dm_early_fini, 2777 .hw_init = dm_hw_init, 2778 .hw_fini = dm_hw_fini, 2779 .suspend = dm_suspend, 2780 .resume = dm_resume, 2781 .is_idle = dm_is_idle, 2782 .wait_for_idle = dm_wait_for_idle, 2783 .check_soft_reset = dm_check_soft_reset, 2784 .soft_reset = dm_soft_reset, 2785 .set_clockgating_state = dm_set_clockgating_state, 2786 .set_powergating_state = dm_set_powergating_state, 2787 }; 2788 2789 const struct amdgpu_ip_block_version dm_ip_block = 2790 { 2791 .type = AMD_IP_BLOCK_TYPE_DCE, 2792 .major = 1, 2793 .minor = 0, 2794 .rev = 0, 2795 .funcs = &amdgpu_dm_funcs, 2796 }; 2797 2798 2799 /** 2800 * DOC: atomic 2801 * 2802 * *WIP* 2803 */ 2804 2805 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 2806 .fb_create = amdgpu_display_user_framebuffer_create, 2807 .get_format_info = amd_get_format_info, 2808 .output_poll_changed = drm_fb_helper_output_poll_changed, 2809 .atomic_check = amdgpu_dm_atomic_check, 2810 .atomic_commit = drm_atomic_helper_commit, 2811 }; 2812 2813 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 2814 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail 2815 }; 2816 2817 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) 2818 { 2819 u32 max_avg, min_cll, max, min, q, r; 2820 struct amdgpu_dm_backlight_caps *caps; 2821 struct amdgpu_display_manager *dm; 2822 struct drm_connector *conn_base; 2823 struct amdgpu_device *adev; 2824 struct dc_link *link = NULL; 2825 static const u8 pre_computed_values[] = { 2826 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69, 2827 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98}; 2828 int i; 2829 2830 if (!aconnector || !aconnector->dc_link) 2831 return; 2832 2833 link = aconnector->dc_link; 2834 if (link->connector_signal != SIGNAL_TYPE_EDP) 2835 return; 2836 2837 conn_base = &aconnector->base; 2838 adev = drm_to_adev(conn_base->dev); 2839 dm = &adev->dm; 2840 for (i = 0; i < dm->num_of_edps; i++) { 2841 if (link == dm->backlight_link[i]) 2842 break; 2843 } 2844 if (i >= dm->num_of_edps) 2845 return; 2846 caps = &dm->backlight_caps[i]; 2847 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; 2848 caps->aux_support = false; 2849 max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall; 2850 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; 2851 2852 if (caps->ext_caps->bits.oled == 1 /*|| 2853 caps->ext_caps->bits.sdr_aux_backlight_control == 1 || 2854 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/) 2855 caps->aux_support = true; 2856 2857 if (amdgpu_backlight == 0) 2858 caps->aux_support = false; 2859 else if (amdgpu_backlight == 1) 2860 caps->aux_support = true; 2861 2862 /* From the specification (CTA-861-G), for calculating the maximum 2863 * luminance we need to use: 2864 * Luminance = 50*2**(CV/32) 2865 * Where CV is a one-byte value. 2866 * For calculating this expression we may need float point precision; 2867 * to avoid this complexity level, we take advantage that CV is divided 2868 * by a constant. From the Euclids division algorithm, we know that CV 2869 * can be written as: CV = 32*q + r. Next, we replace CV in the 2870 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just 2871 * need to pre-compute the value of r/32. For pre-computing the values 2872 * We just used the following Ruby line: 2873 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round} 2874 * The results of the above expressions can be verified at 2875 * pre_computed_values. 2876 */ 2877 q = max_avg >> 5; 2878 r = max_avg % 32; 2879 max = (1 << q) * pre_computed_values[r]; 2880 2881 // min luminance: maxLum * (CV/255)^2 / 100 2882 q = DIV_ROUND_CLOSEST(min_cll, 255); 2883 min = max * DIV_ROUND_CLOSEST((q * q), 100); 2884 2885 caps->aux_max_input_signal = max; 2886 caps->aux_min_input_signal = min; 2887 } 2888 2889 void amdgpu_dm_update_connector_after_detect( 2890 struct amdgpu_dm_connector *aconnector) 2891 { 2892 struct drm_connector *connector = &aconnector->base; 2893 struct drm_device *dev = connector->dev; 2894 struct dc_sink *sink; 2895 2896 /* MST handled by drm_mst framework */ 2897 if (aconnector->mst_mgr.mst_state == true) 2898 return; 2899 2900 sink = aconnector->dc_link->local_sink; 2901 if (sink) 2902 dc_sink_retain(sink); 2903 2904 /* 2905 * Edid mgmt connector gets first update only in mode_valid hook and then 2906 * the connector sink is set to either fake or physical sink depends on link status. 2907 * Skip if already done during boot. 2908 */ 2909 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 2910 && aconnector->dc_em_sink) { 2911 2912 /* 2913 * For S3 resume with headless use eml_sink to fake stream 2914 * because on resume connector->sink is set to NULL 2915 */ 2916 mutex_lock(&dev->mode_config.mutex); 2917 2918 if (sink) { 2919 if (aconnector->dc_sink) { 2920 amdgpu_dm_update_freesync_caps(connector, NULL); 2921 /* 2922 * retain and release below are used to 2923 * bump up refcount for sink because the link doesn't point 2924 * to it anymore after disconnect, so on next crtc to connector 2925 * reshuffle by UMD we will get into unwanted dc_sink release 2926 */ 2927 dc_sink_release(aconnector->dc_sink); 2928 } 2929 aconnector->dc_sink = sink; 2930 dc_sink_retain(aconnector->dc_sink); 2931 amdgpu_dm_update_freesync_caps(connector, 2932 aconnector->edid); 2933 } else { 2934 amdgpu_dm_update_freesync_caps(connector, NULL); 2935 if (!aconnector->dc_sink) { 2936 aconnector->dc_sink = aconnector->dc_em_sink; 2937 dc_sink_retain(aconnector->dc_sink); 2938 } 2939 } 2940 2941 mutex_unlock(&dev->mode_config.mutex); 2942 2943 if (sink) 2944 dc_sink_release(sink); 2945 return; 2946 } 2947 2948 /* 2949 * TODO: temporary guard to look for proper fix 2950 * if this sink is MST sink, we should not do anything 2951 */ 2952 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 2953 dc_sink_release(sink); 2954 return; 2955 } 2956 2957 if (aconnector->dc_sink == sink) { 2958 /* 2959 * We got a DP short pulse (Link Loss, DP CTS, etc...). 2960 * Do nothing!! 2961 */ 2962 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", 2963 aconnector->connector_id); 2964 if (sink) 2965 dc_sink_release(sink); 2966 return; 2967 } 2968 2969 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 2970 aconnector->connector_id, aconnector->dc_sink, sink); 2971 2972 mutex_lock(&dev->mode_config.mutex); 2973 2974 /* 2975 * 1. Update status of the drm connector 2976 * 2. Send an event and let userspace tell us what to do 2977 */ 2978 if (sink) { 2979 /* 2980 * TODO: check if we still need the S3 mode update workaround. 2981 * If yes, put it here. 2982 */ 2983 if (aconnector->dc_sink) { 2984 amdgpu_dm_update_freesync_caps(connector, NULL); 2985 dc_sink_release(aconnector->dc_sink); 2986 } 2987 2988 aconnector->dc_sink = sink; 2989 dc_sink_retain(aconnector->dc_sink); 2990 if (sink->dc_edid.length == 0) { 2991 aconnector->edid = NULL; 2992 if (aconnector->dc_link->aux_mode) { 2993 drm_dp_cec_unset_edid( 2994 &aconnector->dm_dp_aux.aux); 2995 } 2996 } else { 2997 aconnector->edid = 2998 (struct edid *)sink->dc_edid.raw_edid; 2999 3000 if (aconnector->dc_link->aux_mode) 3001 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, 3002 aconnector->edid); 3003 } 3004 3005 drm_connector_update_edid_property(connector, aconnector->edid); 3006 amdgpu_dm_update_freesync_caps(connector, aconnector->edid); 3007 update_connector_ext_caps(aconnector); 3008 } else { 3009 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 3010 amdgpu_dm_update_freesync_caps(connector, NULL); 3011 drm_connector_update_edid_property(connector, NULL); 3012 aconnector->num_modes = 0; 3013 dc_sink_release(aconnector->dc_sink); 3014 aconnector->dc_sink = NULL; 3015 aconnector->edid = NULL; 3016 #ifdef CONFIG_DRM_AMD_DC_HDCP 3017 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ 3018 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 3019 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 3020 #endif 3021 } 3022 3023 mutex_unlock(&dev->mode_config.mutex); 3024 3025 update_subconnector_property(aconnector); 3026 3027 if (sink) 3028 dc_sink_release(sink); 3029 } 3030 3031 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) 3032 { 3033 struct drm_connector *connector = &aconnector->base; 3034 struct drm_device *dev = connector->dev; 3035 enum dc_connection_type new_connection_type = dc_connection_none; 3036 struct amdgpu_device *adev = drm_to_adev(dev); 3037 #ifdef CONFIG_DRM_AMD_DC_HDCP 3038 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 3039 #endif 3040 3041 if (adev->dm.disable_hpd_irq) 3042 return; 3043 3044 /* 3045 * In case of failure or MST no need to update connector status or notify the OS 3046 * since (for MST case) MST does this in its own context. 3047 */ 3048 mutex_lock(&aconnector->hpd_lock); 3049 3050 #ifdef CONFIG_DRM_AMD_DC_HDCP 3051 if (adev->dm.hdcp_workqueue) { 3052 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 3053 dm_con_state->update_hdcp = true; 3054 } 3055 #endif 3056 if (aconnector->fake_enable) 3057 aconnector->fake_enable = false; 3058 3059 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) 3060 DRM_ERROR("KMS: Failed to detect connector\n"); 3061 3062 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3063 emulated_link_detect(aconnector->dc_link); 3064 3065 drm_modeset_lock_all(dev); 3066 dm_restore_drm_connector_state(dev, connector); 3067 drm_modeset_unlock_all(dev); 3068 3069 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3070 drm_kms_helper_connector_hotplug_event(connector); 3071 3072 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { 3073 amdgpu_dm_update_connector_after_detect(aconnector); 3074 3075 drm_modeset_lock_all(dev); 3076 dm_restore_drm_connector_state(dev, connector); 3077 drm_modeset_unlock_all(dev); 3078 3079 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 3080 drm_kms_helper_connector_hotplug_event(connector); 3081 } 3082 mutex_unlock(&aconnector->hpd_lock); 3083 3084 } 3085 3086 static void handle_hpd_irq(void *param) 3087 { 3088 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3089 3090 handle_hpd_irq_helper(aconnector); 3091 3092 } 3093 3094 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) 3095 { 3096 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; 3097 uint8_t dret; 3098 bool new_irq_handled = false; 3099 int dpcd_addr; 3100 int dpcd_bytes_to_read; 3101 3102 const int max_process_count = 30; 3103 int process_count = 0; 3104 3105 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); 3106 3107 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { 3108 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; 3109 /* DPCD 0x200 - 0x201 for downstream IRQ */ 3110 dpcd_addr = DP_SINK_COUNT; 3111 } else { 3112 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; 3113 /* DPCD 0x2002 - 0x2005 for downstream IRQ */ 3114 dpcd_addr = DP_SINK_COUNT_ESI; 3115 } 3116 3117 dret = drm_dp_dpcd_read( 3118 &aconnector->dm_dp_aux.aux, 3119 dpcd_addr, 3120 esi, 3121 dpcd_bytes_to_read); 3122 3123 while (dret == dpcd_bytes_to_read && 3124 process_count < max_process_count) { 3125 uint8_t retry; 3126 dret = 0; 3127 3128 process_count++; 3129 3130 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); 3131 /* handle HPD short pulse irq */ 3132 if (aconnector->mst_mgr.mst_state) 3133 drm_dp_mst_hpd_irq( 3134 &aconnector->mst_mgr, 3135 esi, 3136 &new_irq_handled); 3137 3138 if (new_irq_handled) { 3139 /* ACK at DPCD to notify down stream */ 3140 const int ack_dpcd_bytes_to_write = 3141 dpcd_bytes_to_read - 1; 3142 3143 for (retry = 0; retry < 3; retry++) { 3144 uint8_t wret; 3145 3146 wret = drm_dp_dpcd_write( 3147 &aconnector->dm_dp_aux.aux, 3148 dpcd_addr + 1, 3149 &esi[1], 3150 ack_dpcd_bytes_to_write); 3151 if (wret == ack_dpcd_bytes_to_write) 3152 break; 3153 } 3154 3155 /* check if there is new irq to be handled */ 3156 dret = drm_dp_dpcd_read( 3157 &aconnector->dm_dp_aux.aux, 3158 dpcd_addr, 3159 esi, 3160 dpcd_bytes_to_read); 3161 3162 new_irq_handled = false; 3163 } else { 3164 break; 3165 } 3166 } 3167 3168 if (process_count == max_process_count) 3169 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); 3170 } 3171 3172 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, 3173 union hpd_irq_data hpd_irq_data) 3174 { 3175 struct hpd_rx_irq_offload_work *offload_work = 3176 kzalloc(sizeof(*offload_work), GFP_KERNEL); 3177 3178 if (!offload_work) { 3179 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); 3180 return; 3181 } 3182 3183 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); 3184 offload_work->data = hpd_irq_data; 3185 offload_work->offload_wq = offload_wq; 3186 3187 queue_work(offload_wq->wq, &offload_work->work); 3188 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); 3189 } 3190 3191 static void handle_hpd_rx_irq(void *param) 3192 { 3193 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3194 struct drm_connector *connector = &aconnector->base; 3195 struct drm_device *dev = connector->dev; 3196 struct dc_link *dc_link = aconnector->dc_link; 3197 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 3198 bool result = false; 3199 enum dc_connection_type new_connection_type = dc_connection_none; 3200 struct amdgpu_device *adev = drm_to_adev(dev); 3201 union hpd_irq_data hpd_irq_data; 3202 bool link_loss = false; 3203 bool has_left_work = false; 3204 int idx = aconnector->base.index; 3205 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; 3206 3207 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 3208 3209 if (adev->dm.disable_hpd_irq) 3210 return; 3211 3212 /* 3213 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio 3214 * conflict, after implement i2c helper, this mutex should be 3215 * retired. 3216 */ 3217 mutex_lock(&aconnector->hpd_lock); 3218 3219 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, 3220 &link_loss, true, &has_left_work); 3221 3222 if (!has_left_work) 3223 goto out; 3224 3225 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 3226 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3227 goto out; 3228 } 3229 3230 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { 3231 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 3232 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 3233 dm_handle_mst_sideband_msg(aconnector); 3234 goto out; 3235 } 3236 3237 if (link_loss) { 3238 bool skip = false; 3239 3240 spin_lock(&offload_wq->offload_lock); 3241 skip = offload_wq->is_handling_link_loss; 3242 3243 if (!skip) 3244 offload_wq->is_handling_link_loss = true; 3245 3246 spin_unlock(&offload_wq->offload_lock); 3247 3248 if (!skip) 3249 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3250 3251 goto out; 3252 } 3253 } 3254 3255 out: 3256 if (result && !is_mst_root_connector) { 3257 /* Downstream Port status changed. */ 3258 if (!dc_link_detect_sink(dc_link, &new_connection_type)) 3259 DRM_ERROR("KMS: Failed to detect connector\n"); 3260 3261 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3262 emulated_link_detect(dc_link); 3263 3264 if (aconnector->fake_enable) 3265 aconnector->fake_enable = false; 3266 3267 amdgpu_dm_update_connector_after_detect(aconnector); 3268 3269 3270 drm_modeset_lock_all(dev); 3271 dm_restore_drm_connector_state(dev, connector); 3272 drm_modeset_unlock_all(dev); 3273 3274 drm_kms_helper_connector_hotplug_event(connector); 3275 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { 3276 3277 if (aconnector->fake_enable) 3278 aconnector->fake_enable = false; 3279 3280 amdgpu_dm_update_connector_after_detect(aconnector); 3281 3282 3283 drm_modeset_lock_all(dev); 3284 dm_restore_drm_connector_state(dev, connector); 3285 drm_modeset_unlock_all(dev); 3286 3287 drm_kms_helper_connector_hotplug_event(connector); 3288 } 3289 } 3290 #ifdef CONFIG_DRM_AMD_DC_HDCP 3291 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { 3292 if (adev->dm.hdcp_workqueue) 3293 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); 3294 } 3295 #endif 3296 3297 if (dc_link->type != dc_connection_mst_branch) 3298 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); 3299 3300 mutex_unlock(&aconnector->hpd_lock); 3301 } 3302 3303 static void register_hpd_handlers(struct amdgpu_device *adev) 3304 { 3305 struct drm_device *dev = adev_to_drm(adev); 3306 struct drm_connector *connector; 3307 struct amdgpu_dm_connector *aconnector; 3308 const struct dc_link *dc_link; 3309 struct dc_interrupt_params int_params = {0}; 3310 3311 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3312 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3313 3314 list_for_each_entry(connector, 3315 &dev->mode_config.connector_list, head) { 3316 3317 aconnector = to_amdgpu_dm_connector(connector); 3318 dc_link = aconnector->dc_link; 3319 3320 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { 3321 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3322 int_params.irq_source = dc_link->irq_source_hpd; 3323 3324 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3325 handle_hpd_irq, 3326 (void *) aconnector); 3327 } 3328 3329 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { 3330 3331 /* Also register for DP short pulse (hpd_rx). */ 3332 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3333 int_params.irq_source = dc_link->irq_source_hpd_rx; 3334 3335 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3336 handle_hpd_rx_irq, 3337 (void *) aconnector); 3338 3339 if (adev->dm.hpd_rx_offload_wq) 3340 adev->dm.hpd_rx_offload_wq[connector->index].aconnector = 3341 aconnector; 3342 } 3343 } 3344 } 3345 3346 #if defined(CONFIG_DRM_AMD_DC_SI) 3347 /* Register IRQ sources and initialize IRQ callbacks */ 3348 static int dce60_register_irq_handlers(struct amdgpu_device *adev) 3349 { 3350 struct dc *dc = adev->dm.dc; 3351 struct common_irq_params *c_irq_params; 3352 struct dc_interrupt_params int_params = {0}; 3353 int r; 3354 int i; 3355 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3356 3357 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3358 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3359 3360 /* 3361 * Actions of amdgpu_irq_add_id(): 3362 * 1. Register a set() function with base driver. 3363 * Base driver will call set() function to enable/disable an 3364 * interrupt in DC hardware. 3365 * 2. Register amdgpu_dm_irq_handler(). 3366 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3367 * coming from DC hardware. 3368 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3369 * for acknowledging and handling. */ 3370 3371 /* Use VBLANK interrupt */ 3372 for (i = 0; i < adev->mode_info.num_crtc; i++) { 3373 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq); 3374 if (r) { 3375 DRM_ERROR("Failed to add crtc irq id!\n"); 3376 return r; 3377 } 3378 3379 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3380 int_params.irq_source = 3381 dc_interrupt_to_irq_source(dc, i+1 , 0); 3382 3383 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3384 3385 c_irq_params->adev = adev; 3386 c_irq_params->irq_src = int_params.irq_source; 3387 3388 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3389 dm_crtc_high_irq, c_irq_params); 3390 } 3391 3392 /* Use GRPH_PFLIP interrupt */ 3393 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 3394 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 3395 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 3396 if (r) { 3397 DRM_ERROR("Failed to add page flip irq id!\n"); 3398 return r; 3399 } 3400 3401 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3402 int_params.irq_source = 3403 dc_interrupt_to_irq_source(dc, i, 0); 3404 3405 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3406 3407 c_irq_params->adev = adev; 3408 c_irq_params->irq_src = int_params.irq_source; 3409 3410 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3411 dm_pflip_high_irq, c_irq_params); 3412 3413 } 3414 3415 /* HPD */ 3416 r = amdgpu_irq_add_id(adev, client_id, 3417 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 3418 if (r) { 3419 DRM_ERROR("Failed to add hpd irq id!\n"); 3420 return r; 3421 } 3422 3423 register_hpd_handlers(adev); 3424 3425 return 0; 3426 } 3427 #endif 3428 3429 /* Register IRQ sources and initialize IRQ callbacks */ 3430 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 3431 { 3432 struct dc *dc = adev->dm.dc; 3433 struct common_irq_params *c_irq_params; 3434 struct dc_interrupt_params int_params = {0}; 3435 int r; 3436 int i; 3437 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3438 3439 if (adev->family >= AMDGPU_FAMILY_AI) 3440 client_id = SOC15_IH_CLIENTID_DCE; 3441 3442 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3443 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3444 3445 /* 3446 * Actions of amdgpu_irq_add_id(): 3447 * 1. Register a set() function with base driver. 3448 * Base driver will call set() function to enable/disable an 3449 * interrupt in DC hardware. 3450 * 2. Register amdgpu_dm_irq_handler(). 3451 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3452 * coming from DC hardware. 3453 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3454 * for acknowledging and handling. */ 3455 3456 /* Use VBLANK interrupt */ 3457 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { 3458 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); 3459 if (r) { 3460 DRM_ERROR("Failed to add crtc irq id!\n"); 3461 return r; 3462 } 3463 3464 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3465 int_params.irq_source = 3466 dc_interrupt_to_irq_source(dc, i, 0); 3467 3468 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3469 3470 c_irq_params->adev = adev; 3471 c_irq_params->irq_src = int_params.irq_source; 3472 3473 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3474 dm_crtc_high_irq, c_irq_params); 3475 } 3476 3477 /* Use VUPDATE interrupt */ 3478 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { 3479 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); 3480 if (r) { 3481 DRM_ERROR("Failed to add vupdate irq id!\n"); 3482 return r; 3483 } 3484 3485 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3486 int_params.irq_source = 3487 dc_interrupt_to_irq_source(dc, i, 0); 3488 3489 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3490 3491 c_irq_params->adev = adev; 3492 c_irq_params->irq_src = int_params.irq_source; 3493 3494 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3495 dm_vupdate_high_irq, c_irq_params); 3496 } 3497 3498 /* Use GRPH_PFLIP interrupt */ 3499 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 3500 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 3501 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 3502 if (r) { 3503 DRM_ERROR("Failed to add page flip irq id!\n"); 3504 return r; 3505 } 3506 3507 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3508 int_params.irq_source = 3509 dc_interrupt_to_irq_source(dc, i, 0); 3510 3511 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3512 3513 c_irq_params->adev = adev; 3514 c_irq_params->irq_src = int_params.irq_source; 3515 3516 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3517 dm_pflip_high_irq, c_irq_params); 3518 3519 } 3520 3521 /* HPD */ 3522 r = amdgpu_irq_add_id(adev, client_id, 3523 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 3524 if (r) { 3525 DRM_ERROR("Failed to add hpd irq id!\n"); 3526 return r; 3527 } 3528 3529 register_hpd_handlers(adev); 3530 3531 return 0; 3532 } 3533 3534 /* Register IRQ sources and initialize IRQ callbacks */ 3535 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 3536 { 3537 struct dc *dc = adev->dm.dc; 3538 struct common_irq_params *c_irq_params; 3539 struct dc_interrupt_params int_params = {0}; 3540 int r; 3541 int i; 3542 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3543 static const unsigned int vrtl_int_srcid[] = { 3544 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, 3545 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, 3546 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, 3547 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, 3548 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, 3549 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL 3550 }; 3551 #endif 3552 3553 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3554 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3555 3556 /* 3557 * Actions of amdgpu_irq_add_id(): 3558 * 1. Register a set() function with base driver. 3559 * Base driver will call set() function to enable/disable an 3560 * interrupt in DC hardware. 3561 * 2. Register amdgpu_dm_irq_handler(). 3562 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3563 * coming from DC hardware. 3564 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3565 * for acknowledging and handling. 3566 */ 3567 3568 /* Use VSTARTUP interrupt */ 3569 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 3570 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 3571 i++) { 3572 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); 3573 3574 if (r) { 3575 DRM_ERROR("Failed to add crtc irq id!\n"); 3576 return r; 3577 } 3578 3579 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3580 int_params.irq_source = 3581 dc_interrupt_to_irq_source(dc, i, 0); 3582 3583 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3584 3585 c_irq_params->adev = adev; 3586 c_irq_params->irq_src = int_params.irq_source; 3587 3588 amdgpu_dm_irq_register_interrupt( 3589 adev, &int_params, dm_crtc_high_irq, c_irq_params); 3590 } 3591 3592 /* Use otg vertical line interrupt */ 3593 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3594 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { 3595 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, 3596 vrtl_int_srcid[i], &adev->vline0_irq); 3597 3598 if (r) { 3599 DRM_ERROR("Failed to add vline0 irq id!\n"); 3600 return r; 3601 } 3602 3603 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3604 int_params.irq_source = 3605 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); 3606 3607 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) { 3608 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]); 3609 break; 3610 } 3611 3612 c_irq_params = &adev->dm.vline0_params[int_params.irq_source 3613 - DC_IRQ_SOURCE_DC1_VLINE0]; 3614 3615 c_irq_params->adev = adev; 3616 c_irq_params->irq_src = int_params.irq_source; 3617 3618 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3619 dm_dcn_vertical_interrupt0_high_irq, c_irq_params); 3620 } 3621 #endif 3622 3623 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to 3624 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx 3625 * to trigger at end of each vblank, regardless of state of the lock, 3626 * matching DCE behaviour. 3627 */ 3628 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; 3629 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; 3630 i++) { 3631 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); 3632 3633 if (r) { 3634 DRM_ERROR("Failed to add vupdate irq id!\n"); 3635 return r; 3636 } 3637 3638 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3639 int_params.irq_source = 3640 dc_interrupt_to_irq_source(dc, i, 0); 3641 3642 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3643 3644 c_irq_params->adev = adev; 3645 c_irq_params->irq_src = int_params.irq_source; 3646 3647 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3648 dm_vupdate_high_irq, c_irq_params); 3649 } 3650 3651 /* Use GRPH_PFLIP interrupt */ 3652 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 3653 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; 3654 i++) { 3655 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 3656 if (r) { 3657 DRM_ERROR("Failed to add page flip irq id!\n"); 3658 return r; 3659 } 3660 3661 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3662 int_params.irq_source = 3663 dc_interrupt_to_irq_source(dc, i, 0); 3664 3665 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3666 3667 c_irq_params->adev = adev; 3668 c_irq_params->irq_src = int_params.irq_source; 3669 3670 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3671 dm_pflip_high_irq, c_irq_params); 3672 3673 } 3674 3675 /* HPD */ 3676 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 3677 &adev->hpd_irq); 3678 if (r) { 3679 DRM_ERROR("Failed to add hpd irq id!\n"); 3680 return r; 3681 } 3682 3683 register_hpd_handlers(adev); 3684 3685 return 0; 3686 } 3687 /* Register Outbox IRQ sources and initialize IRQ callbacks */ 3688 static int register_outbox_irq_handlers(struct amdgpu_device *adev) 3689 { 3690 struct dc *dc = adev->dm.dc; 3691 struct common_irq_params *c_irq_params; 3692 struct dc_interrupt_params int_params = {0}; 3693 int r, i; 3694 3695 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3696 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3697 3698 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, 3699 &adev->dmub_outbox_irq); 3700 if (r) { 3701 DRM_ERROR("Failed to add outbox irq id!\n"); 3702 return r; 3703 } 3704 3705 if (dc->ctx->dmub_srv) { 3706 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; 3707 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3708 int_params.irq_source = 3709 dc_interrupt_to_irq_source(dc, i, 0); 3710 3711 c_irq_params = &adev->dm.dmub_outbox_params[0]; 3712 3713 c_irq_params->adev = adev; 3714 c_irq_params->irq_src = int_params.irq_source; 3715 3716 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3717 dm_dmub_outbox1_low_irq, c_irq_params); 3718 } 3719 3720 return 0; 3721 } 3722 3723 /* 3724 * Acquires the lock for the atomic state object and returns 3725 * the new atomic state. 3726 * 3727 * This should only be called during atomic check. 3728 */ 3729 int dm_atomic_get_state(struct drm_atomic_state *state, 3730 struct dm_atomic_state **dm_state) 3731 { 3732 struct drm_device *dev = state->dev; 3733 struct amdgpu_device *adev = drm_to_adev(dev); 3734 struct amdgpu_display_manager *dm = &adev->dm; 3735 struct drm_private_state *priv_state; 3736 3737 if (*dm_state) 3738 return 0; 3739 3740 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); 3741 if (IS_ERR(priv_state)) 3742 return PTR_ERR(priv_state); 3743 3744 *dm_state = to_dm_atomic_state(priv_state); 3745 3746 return 0; 3747 } 3748 3749 static struct dm_atomic_state * 3750 dm_atomic_get_new_state(struct drm_atomic_state *state) 3751 { 3752 struct drm_device *dev = state->dev; 3753 struct amdgpu_device *adev = drm_to_adev(dev); 3754 struct amdgpu_display_manager *dm = &adev->dm; 3755 struct drm_private_obj *obj; 3756 struct drm_private_state *new_obj_state; 3757 int i; 3758 3759 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { 3760 if (obj->funcs == dm->atomic_obj.funcs) 3761 return to_dm_atomic_state(new_obj_state); 3762 } 3763 3764 return NULL; 3765 } 3766 3767 static struct drm_private_state * 3768 dm_atomic_duplicate_state(struct drm_private_obj *obj) 3769 { 3770 struct dm_atomic_state *old_state, *new_state; 3771 3772 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); 3773 if (!new_state) 3774 return NULL; 3775 3776 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); 3777 3778 old_state = to_dm_atomic_state(obj->state); 3779 3780 if (old_state && old_state->context) 3781 new_state->context = dc_copy_state(old_state->context); 3782 3783 if (!new_state->context) { 3784 kfree(new_state); 3785 return NULL; 3786 } 3787 3788 return &new_state->base; 3789 } 3790 3791 static void dm_atomic_destroy_state(struct drm_private_obj *obj, 3792 struct drm_private_state *state) 3793 { 3794 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 3795 3796 if (dm_state && dm_state->context) 3797 dc_release_state(dm_state->context); 3798 3799 kfree(dm_state); 3800 } 3801 3802 static struct drm_private_state_funcs dm_atomic_state_funcs = { 3803 .atomic_duplicate_state = dm_atomic_duplicate_state, 3804 .atomic_destroy_state = dm_atomic_destroy_state, 3805 }; 3806 3807 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 3808 { 3809 struct dm_atomic_state *state; 3810 int r; 3811 3812 adev->mode_info.mode_config_initialized = true; 3813 3814 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 3815 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 3816 3817 adev_to_drm(adev)->mode_config.max_width = 16384; 3818 adev_to_drm(adev)->mode_config.max_height = 16384; 3819 3820 adev_to_drm(adev)->mode_config.preferred_depth = 24; 3821 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 3822 /* indicates support for immediate flip */ 3823 adev_to_drm(adev)->mode_config.async_page_flip = true; 3824 3825 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; 3826 3827 state = kzalloc(sizeof(*state), GFP_KERNEL); 3828 if (!state) 3829 return -ENOMEM; 3830 3831 state->context = dc_create_state(adev->dm.dc); 3832 if (!state->context) { 3833 kfree(state); 3834 return -ENOMEM; 3835 } 3836 3837 dc_resource_state_copy_construct_current(adev->dm.dc, state->context); 3838 3839 drm_atomic_private_obj_init(adev_to_drm(adev), 3840 &adev->dm.atomic_obj, 3841 &state->base, 3842 &dm_atomic_state_funcs); 3843 3844 r = amdgpu_display_modeset_create_props(adev); 3845 if (r) { 3846 dc_release_state(state->context); 3847 kfree(state); 3848 return r; 3849 } 3850 3851 r = amdgpu_dm_audio_init(adev); 3852 if (r) { 3853 dc_release_state(state->context); 3854 kfree(state); 3855 return r; 3856 } 3857 3858 return 0; 3859 } 3860 3861 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 3862 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 3863 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 3864 3865 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 3866 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 3867 3868 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, 3869 int bl_idx) 3870 { 3871 #if defined(CONFIG_ACPI) 3872 struct amdgpu_dm_backlight_caps caps; 3873 3874 memset(&caps, 0, sizeof(caps)); 3875 3876 if (dm->backlight_caps[bl_idx].caps_valid) 3877 return; 3878 3879 amdgpu_acpi_get_backlight_caps(&caps); 3880 if (caps.caps_valid) { 3881 dm->backlight_caps[bl_idx].caps_valid = true; 3882 if (caps.aux_support) 3883 return; 3884 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; 3885 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; 3886 } else { 3887 dm->backlight_caps[bl_idx].min_input_signal = 3888 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 3889 dm->backlight_caps[bl_idx].max_input_signal = 3890 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 3891 } 3892 #else 3893 if (dm->backlight_caps[bl_idx].aux_support) 3894 return; 3895 3896 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 3897 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 3898 #endif 3899 } 3900 3901 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, 3902 unsigned *min, unsigned *max) 3903 { 3904 if (!caps) 3905 return 0; 3906 3907 if (caps->aux_support) { 3908 // Firmware limits are in nits, DC API wants millinits. 3909 *max = 1000 * caps->aux_max_input_signal; 3910 *min = 1000 * caps->aux_min_input_signal; 3911 } else { 3912 // Firmware limits are 8-bit, PWM control is 16-bit. 3913 *max = 0x101 * caps->max_input_signal; 3914 *min = 0x101 * caps->min_input_signal; 3915 } 3916 return 1; 3917 } 3918 3919 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, 3920 uint32_t brightness) 3921 { 3922 unsigned min, max; 3923 3924 if (!get_brightness_range(caps, &min, &max)) 3925 return brightness; 3926 3927 // Rescale 0..255 to min..max 3928 return min + DIV_ROUND_CLOSEST((max - min) * brightness, 3929 AMDGPU_MAX_BL_LEVEL); 3930 } 3931 3932 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, 3933 uint32_t brightness) 3934 { 3935 unsigned min, max; 3936 3937 if (!get_brightness_range(caps, &min, &max)) 3938 return brightness; 3939 3940 if (brightness < min) 3941 return 0; 3942 // Rescale min..max to 0..255 3943 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), 3944 max - min); 3945 } 3946 3947 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 3948 int bl_idx, 3949 u32 user_brightness) 3950 { 3951 struct amdgpu_dm_backlight_caps caps; 3952 struct dc_link *link; 3953 u32 brightness; 3954 bool rc; 3955 3956 amdgpu_dm_update_backlight_caps(dm, bl_idx); 3957 caps = dm->backlight_caps[bl_idx]; 3958 3959 dm->brightness[bl_idx] = user_brightness; 3960 /* update scratch register */ 3961 if (bl_idx == 0) 3962 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); 3963 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); 3964 link = (struct dc_link *)dm->backlight_link[bl_idx]; 3965 3966 /* Change brightness based on AUX property */ 3967 if (caps.aux_support) { 3968 rc = dc_link_set_backlight_level_nits(link, true, brightness, 3969 AUX_BL_DEFAULT_TRANSITION_TIME_MS); 3970 if (!rc) 3971 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); 3972 } else { 3973 rc = dc_link_set_backlight_level(link, brightness, 0); 3974 if (!rc) 3975 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); 3976 } 3977 3978 if (rc) 3979 dm->actual_brightness[bl_idx] = user_brightness; 3980 } 3981 3982 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 3983 { 3984 struct amdgpu_display_manager *dm = bl_get_data(bd); 3985 int i; 3986 3987 for (i = 0; i < dm->num_of_edps; i++) { 3988 if (bd == dm->backlight_dev[i]) 3989 break; 3990 } 3991 if (i >= AMDGPU_DM_MAX_NUM_EDP) 3992 i = 0; 3993 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); 3994 3995 return 0; 3996 } 3997 3998 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, 3999 int bl_idx) 4000 { 4001 struct amdgpu_dm_backlight_caps caps; 4002 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; 4003 4004 amdgpu_dm_update_backlight_caps(dm, bl_idx); 4005 caps = dm->backlight_caps[bl_idx]; 4006 4007 if (caps.aux_support) { 4008 u32 avg, peak; 4009 bool rc; 4010 4011 rc = dc_link_get_backlight_level_nits(link, &avg, &peak); 4012 if (!rc) 4013 return dm->brightness[bl_idx]; 4014 return convert_brightness_to_user(&caps, avg); 4015 } else { 4016 int ret = dc_link_get_backlight_level(link); 4017 4018 if (ret == DC_ERROR_UNEXPECTED) 4019 return dm->brightness[bl_idx]; 4020 return convert_brightness_to_user(&caps, ret); 4021 } 4022 } 4023 4024 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 4025 { 4026 struct amdgpu_display_manager *dm = bl_get_data(bd); 4027 int i; 4028 4029 for (i = 0; i < dm->num_of_edps; i++) { 4030 if (bd == dm->backlight_dev[i]) 4031 break; 4032 } 4033 if (i >= AMDGPU_DM_MAX_NUM_EDP) 4034 i = 0; 4035 return amdgpu_dm_backlight_get_level(dm, i); 4036 } 4037 4038 static const struct backlight_ops amdgpu_dm_backlight_ops = { 4039 .options = BL_CORE_SUSPENDRESUME, 4040 .get_brightness = amdgpu_dm_backlight_get_brightness, 4041 .update_status = amdgpu_dm_backlight_update_status, 4042 }; 4043 4044 static void 4045 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) 4046 { 4047 char bl_name[16]; 4048 struct backlight_properties props = { 0 }; 4049 4050 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps); 4051 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL; 4052 4053 props.max_brightness = AMDGPU_MAX_BL_LEVEL; 4054 props.brightness = AMDGPU_MAX_BL_LEVEL; 4055 props.type = BACKLIGHT_RAW; 4056 4057 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 4058 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps); 4059 4060 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name, 4061 adev_to_drm(dm->adev)->dev, 4062 dm, 4063 &amdgpu_dm_backlight_ops, 4064 &props); 4065 4066 if (IS_ERR(dm->backlight_dev[dm->num_of_edps])) 4067 DRM_ERROR("DM: Backlight registration failed!\n"); 4068 else 4069 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); 4070 } 4071 #endif 4072 4073 static int initialize_plane(struct amdgpu_display_manager *dm, 4074 struct amdgpu_mode_info *mode_info, int plane_id, 4075 enum drm_plane_type plane_type, 4076 const struct dc_plane_cap *plane_cap) 4077 { 4078 struct drm_plane *plane; 4079 unsigned long possible_crtcs; 4080 int ret = 0; 4081 4082 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); 4083 if (!plane) { 4084 DRM_ERROR("KMS: Failed to allocate plane\n"); 4085 return -ENOMEM; 4086 } 4087 plane->type = plane_type; 4088 4089 /* 4090 * HACK: IGT tests expect that the primary plane for a CRTC 4091 * can only have one possible CRTC. Only expose support for 4092 * any CRTC if they're not going to be used as a primary plane 4093 * for a CRTC - like overlay or underlay planes. 4094 */ 4095 possible_crtcs = 1 << plane_id; 4096 if (plane_id >= dm->dc->caps.max_streams) 4097 possible_crtcs = 0xff; 4098 4099 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); 4100 4101 if (ret) { 4102 DRM_ERROR("KMS: Failed to initialize plane\n"); 4103 kfree(plane); 4104 return ret; 4105 } 4106 4107 if (mode_info) 4108 mode_info->planes[plane_id] = plane; 4109 4110 return ret; 4111 } 4112 4113 4114 static void register_backlight_device(struct amdgpu_display_manager *dm, 4115 struct dc_link *link) 4116 { 4117 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 4118 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 4119 4120 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && 4121 link->type != dc_connection_none) { 4122 /* 4123 * Event if registration failed, we should continue with 4124 * DM initialization because not having a backlight control 4125 * is better then a black screen. 4126 */ 4127 if (!dm->backlight_dev[dm->num_of_edps]) 4128 amdgpu_dm_register_backlight_device(dm); 4129 4130 if (dm->backlight_dev[dm->num_of_edps]) { 4131 dm->backlight_link[dm->num_of_edps] = link; 4132 dm->num_of_edps++; 4133 } 4134 } 4135 #endif 4136 } 4137 4138 4139 /* 4140 * In this architecture, the association 4141 * connector -> encoder -> crtc 4142 * id not really requried. The crtc and connector will hold the 4143 * display_index as an abstraction to use with DAL component 4144 * 4145 * Returns 0 on success 4146 */ 4147 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 4148 { 4149 struct amdgpu_display_manager *dm = &adev->dm; 4150 int32_t i; 4151 struct amdgpu_dm_connector *aconnector = NULL; 4152 struct amdgpu_encoder *aencoder = NULL; 4153 struct amdgpu_mode_info *mode_info = &adev->mode_info; 4154 uint32_t link_cnt; 4155 int32_t primary_planes; 4156 enum dc_connection_type new_connection_type = dc_connection_none; 4157 const struct dc_plane_cap *plane; 4158 bool psr_feature_enabled = false; 4159 4160 dm->display_indexes_num = dm->dc->caps.max_streams; 4161 /* Update the actual used number of crtc */ 4162 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 4163 4164 link_cnt = dm->dc->caps.max_links; 4165 if (amdgpu_dm_mode_config_init(dm->adev)) { 4166 DRM_ERROR("DM: Failed to initialize mode config\n"); 4167 return -EINVAL; 4168 } 4169 4170 /* There is one primary plane per CRTC */ 4171 primary_planes = dm->dc->caps.max_streams; 4172 ASSERT(primary_planes <= AMDGPU_MAX_PLANES); 4173 4174 /* 4175 * Initialize primary planes, implicit planes for legacy IOCTLS. 4176 * Order is reversed to match iteration order in atomic check. 4177 */ 4178 for (i = (primary_planes - 1); i >= 0; i--) { 4179 plane = &dm->dc->caps.planes[i]; 4180 4181 if (initialize_plane(dm, mode_info, i, 4182 DRM_PLANE_TYPE_PRIMARY, plane)) { 4183 DRM_ERROR("KMS: Failed to initialize primary plane\n"); 4184 goto fail; 4185 } 4186 } 4187 4188 /* 4189 * Initialize overlay planes, index starting after primary planes. 4190 * These planes have a higher DRM index than the primary planes since 4191 * they should be considered as having a higher z-order. 4192 * Order is reversed to match iteration order in atomic check. 4193 * 4194 * Only support DCN for now, and only expose one so we don't encourage 4195 * userspace to use up all the pipes. 4196 */ 4197 for (i = 0; i < dm->dc->caps.max_planes; ++i) { 4198 struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; 4199 4200 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) 4201 continue; 4202 4203 if (!plane->blends_with_above || !plane->blends_with_below) 4204 continue; 4205 4206 if (!plane->pixel_format_support.argb8888) 4207 continue; 4208 4209 if (initialize_plane(dm, NULL, primary_planes + i, 4210 DRM_PLANE_TYPE_OVERLAY, plane)) { 4211 DRM_ERROR("KMS: Failed to initialize overlay plane\n"); 4212 goto fail; 4213 } 4214 4215 /* Only create one overlay plane. */ 4216 break; 4217 } 4218 4219 for (i = 0; i < dm->dc->caps.max_streams; i++) 4220 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { 4221 DRM_ERROR("KMS: Failed to initialize crtc\n"); 4222 goto fail; 4223 } 4224 4225 /* Use Outbox interrupt */ 4226 switch (adev->ip_versions[DCE_HWIP][0]) { 4227 case IP_VERSION(3, 0, 0): 4228 case IP_VERSION(3, 1, 2): 4229 case IP_VERSION(3, 1, 3): 4230 case IP_VERSION(3, 1, 5): 4231 case IP_VERSION(3, 1, 6): 4232 case IP_VERSION(3, 2, 0): 4233 case IP_VERSION(3, 2, 1): 4234 case IP_VERSION(2, 1, 0): 4235 if (register_outbox_irq_handlers(dm->adev)) { 4236 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4237 goto fail; 4238 } 4239 break; 4240 default: 4241 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", 4242 adev->ip_versions[DCE_HWIP][0]); 4243 } 4244 4245 /* Determine whether to enable PSR support by default. */ 4246 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { 4247 switch (adev->ip_versions[DCE_HWIP][0]) { 4248 case IP_VERSION(3, 1, 2): 4249 case IP_VERSION(3, 1, 3): 4250 case IP_VERSION(3, 1, 5): 4251 case IP_VERSION(3, 1, 6): 4252 case IP_VERSION(3, 2, 0): 4253 case IP_VERSION(3, 2, 1): 4254 psr_feature_enabled = true; 4255 break; 4256 default: 4257 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; 4258 break; 4259 } 4260 } 4261 4262 /* Disable vblank IRQs aggressively for power-saving. */ 4263 adev_to_drm(adev)->vblank_disable_immediate = true; 4264 4265 /* loops over all connectors on the board */ 4266 for (i = 0; i < link_cnt; i++) { 4267 struct dc_link *link = NULL; 4268 4269 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { 4270 DRM_ERROR( 4271 "KMS: Cannot support more than %d display indexes\n", 4272 AMDGPU_DM_MAX_DISPLAY_INDEX); 4273 continue; 4274 } 4275 4276 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 4277 if (!aconnector) 4278 goto fail; 4279 4280 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); 4281 if (!aencoder) 4282 goto fail; 4283 4284 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 4285 DRM_ERROR("KMS: Failed to initialize encoder\n"); 4286 goto fail; 4287 } 4288 4289 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 4290 DRM_ERROR("KMS: Failed to initialize connector\n"); 4291 goto fail; 4292 } 4293 4294 link = dc_get_link_at_index(dm->dc, i); 4295 4296 if (!dc_link_detect_sink(link, &new_connection_type)) 4297 DRM_ERROR("KMS: Failed to detect connector\n"); 4298 4299 if (aconnector->base.force && new_connection_type == dc_connection_none) { 4300 emulated_link_detect(link); 4301 amdgpu_dm_update_connector_after_detect(aconnector); 4302 4303 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { 4304 amdgpu_dm_update_connector_after_detect(aconnector); 4305 register_backlight_device(dm, link); 4306 if (dm->num_of_edps) 4307 update_connector_ext_caps(aconnector); 4308 if (psr_feature_enabled) 4309 amdgpu_dm_set_psr_caps(link); 4310 4311 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when 4312 * PSR is also supported. 4313 */ 4314 if (link->psr_settings.psr_feature_enabled) 4315 adev_to_drm(adev)->vblank_disable_immediate = false; 4316 } 4317 4318 4319 } 4320 4321 /* Software is initialized. Now we can register interrupt handlers. */ 4322 switch (adev->asic_type) { 4323 #if defined(CONFIG_DRM_AMD_DC_SI) 4324 case CHIP_TAHITI: 4325 case CHIP_PITCAIRN: 4326 case CHIP_VERDE: 4327 case CHIP_OLAND: 4328 if (dce60_register_irq_handlers(dm->adev)) { 4329 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4330 goto fail; 4331 } 4332 break; 4333 #endif 4334 case CHIP_BONAIRE: 4335 case CHIP_HAWAII: 4336 case CHIP_KAVERI: 4337 case CHIP_KABINI: 4338 case CHIP_MULLINS: 4339 case CHIP_TONGA: 4340 case CHIP_FIJI: 4341 case CHIP_CARRIZO: 4342 case CHIP_STONEY: 4343 case CHIP_POLARIS11: 4344 case CHIP_POLARIS10: 4345 case CHIP_POLARIS12: 4346 case CHIP_VEGAM: 4347 case CHIP_VEGA10: 4348 case CHIP_VEGA12: 4349 case CHIP_VEGA20: 4350 if (dce110_register_irq_handlers(dm->adev)) { 4351 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4352 goto fail; 4353 } 4354 break; 4355 default: 4356 switch (adev->ip_versions[DCE_HWIP][0]) { 4357 case IP_VERSION(1, 0, 0): 4358 case IP_VERSION(1, 0, 1): 4359 case IP_VERSION(2, 0, 2): 4360 case IP_VERSION(2, 0, 3): 4361 case IP_VERSION(2, 0, 0): 4362 case IP_VERSION(2, 1, 0): 4363 case IP_VERSION(3, 0, 0): 4364 case IP_VERSION(3, 0, 2): 4365 case IP_VERSION(3, 0, 3): 4366 case IP_VERSION(3, 0, 1): 4367 case IP_VERSION(3, 1, 2): 4368 case IP_VERSION(3, 1, 3): 4369 case IP_VERSION(3, 1, 5): 4370 case IP_VERSION(3, 1, 6): 4371 case IP_VERSION(3, 2, 0): 4372 case IP_VERSION(3, 2, 1): 4373 if (dcn10_register_irq_handlers(dm->adev)) { 4374 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4375 goto fail; 4376 } 4377 break; 4378 default: 4379 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", 4380 adev->ip_versions[DCE_HWIP][0]); 4381 goto fail; 4382 } 4383 break; 4384 } 4385 4386 return 0; 4387 fail: 4388 kfree(aencoder); 4389 kfree(aconnector); 4390 4391 return -EINVAL; 4392 } 4393 4394 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 4395 { 4396 drm_atomic_private_obj_fini(&dm->atomic_obj); 4397 return; 4398 } 4399 4400 /****************************************************************************** 4401 * amdgpu_display_funcs functions 4402 *****************************************************************************/ 4403 4404 /* 4405 * dm_bandwidth_update - program display watermarks 4406 * 4407 * @adev: amdgpu_device pointer 4408 * 4409 * Calculate and program the display watermarks and line buffer allocation. 4410 */ 4411 static void dm_bandwidth_update(struct amdgpu_device *adev) 4412 { 4413 /* TODO: implement later */ 4414 } 4415 4416 static const struct amdgpu_display_funcs dm_display_funcs = { 4417 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 4418 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 4419 .backlight_set_level = NULL, /* never called for DC */ 4420 .backlight_get_level = NULL, /* never called for DC */ 4421 .hpd_sense = NULL,/* called unconditionally */ 4422 .hpd_set_polarity = NULL, /* called unconditionally */ 4423 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 4424 .page_flip_get_scanoutpos = 4425 dm_crtc_get_scanoutpos,/* called unconditionally */ 4426 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 4427 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 4428 }; 4429 4430 #if defined(CONFIG_DEBUG_KERNEL_DC) 4431 4432 static ssize_t s3_debug_store(struct device *device, 4433 struct device_attribute *attr, 4434 const char *buf, 4435 size_t count) 4436 { 4437 int ret; 4438 int s3_state; 4439 struct drm_device *drm_dev = dev_get_drvdata(device); 4440 struct amdgpu_device *adev = drm_to_adev(drm_dev); 4441 4442 ret = kstrtoint(buf, 0, &s3_state); 4443 4444 if (ret == 0) { 4445 if (s3_state) { 4446 dm_resume(adev); 4447 drm_kms_helper_hotplug_event(adev_to_drm(adev)); 4448 } else 4449 dm_suspend(adev); 4450 } 4451 4452 return ret == 0 ? count : 0; 4453 } 4454 4455 DEVICE_ATTR_WO(s3_debug); 4456 4457 #endif 4458 4459 static int dm_early_init(void *handle) 4460 { 4461 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4462 4463 switch (adev->asic_type) { 4464 #if defined(CONFIG_DRM_AMD_DC_SI) 4465 case CHIP_TAHITI: 4466 case CHIP_PITCAIRN: 4467 case CHIP_VERDE: 4468 adev->mode_info.num_crtc = 6; 4469 adev->mode_info.num_hpd = 6; 4470 adev->mode_info.num_dig = 6; 4471 break; 4472 case CHIP_OLAND: 4473 adev->mode_info.num_crtc = 2; 4474 adev->mode_info.num_hpd = 2; 4475 adev->mode_info.num_dig = 2; 4476 break; 4477 #endif 4478 case CHIP_BONAIRE: 4479 case CHIP_HAWAII: 4480 adev->mode_info.num_crtc = 6; 4481 adev->mode_info.num_hpd = 6; 4482 adev->mode_info.num_dig = 6; 4483 break; 4484 case CHIP_KAVERI: 4485 adev->mode_info.num_crtc = 4; 4486 adev->mode_info.num_hpd = 6; 4487 adev->mode_info.num_dig = 7; 4488 break; 4489 case CHIP_KABINI: 4490 case CHIP_MULLINS: 4491 adev->mode_info.num_crtc = 2; 4492 adev->mode_info.num_hpd = 6; 4493 adev->mode_info.num_dig = 6; 4494 break; 4495 case CHIP_FIJI: 4496 case CHIP_TONGA: 4497 adev->mode_info.num_crtc = 6; 4498 adev->mode_info.num_hpd = 6; 4499 adev->mode_info.num_dig = 7; 4500 break; 4501 case CHIP_CARRIZO: 4502 adev->mode_info.num_crtc = 3; 4503 adev->mode_info.num_hpd = 6; 4504 adev->mode_info.num_dig = 9; 4505 break; 4506 case CHIP_STONEY: 4507 adev->mode_info.num_crtc = 2; 4508 adev->mode_info.num_hpd = 6; 4509 adev->mode_info.num_dig = 9; 4510 break; 4511 case CHIP_POLARIS11: 4512 case CHIP_POLARIS12: 4513 adev->mode_info.num_crtc = 5; 4514 adev->mode_info.num_hpd = 5; 4515 adev->mode_info.num_dig = 5; 4516 break; 4517 case CHIP_POLARIS10: 4518 case CHIP_VEGAM: 4519 adev->mode_info.num_crtc = 6; 4520 adev->mode_info.num_hpd = 6; 4521 adev->mode_info.num_dig = 6; 4522 break; 4523 case CHIP_VEGA10: 4524 case CHIP_VEGA12: 4525 case CHIP_VEGA20: 4526 adev->mode_info.num_crtc = 6; 4527 adev->mode_info.num_hpd = 6; 4528 adev->mode_info.num_dig = 6; 4529 break; 4530 default: 4531 4532 switch (adev->ip_versions[DCE_HWIP][0]) { 4533 case IP_VERSION(2, 0, 2): 4534 case IP_VERSION(3, 0, 0): 4535 adev->mode_info.num_crtc = 6; 4536 adev->mode_info.num_hpd = 6; 4537 adev->mode_info.num_dig = 6; 4538 break; 4539 case IP_VERSION(2, 0, 0): 4540 case IP_VERSION(3, 0, 2): 4541 adev->mode_info.num_crtc = 5; 4542 adev->mode_info.num_hpd = 5; 4543 adev->mode_info.num_dig = 5; 4544 break; 4545 case IP_VERSION(2, 0, 3): 4546 case IP_VERSION(3, 0, 3): 4547 adev->mode_info.num_crtc = 2; 4548 adev->mode_info.num_hpd = 2; 4549 adev->mode_info.num_dig = 2; 4550 break; 4551 case IP_VERSION(1, 0, 0): 4552 case IP_VERSION(1, 0, 1): 4553 case IP_VERSION(3, 0, 1): 4554 case IP_VERSION(2, 1, 0): 4555 case IP_VERSION(3, 1, 2): 4556 case IP_VERSION(3, 1, 3): 4557 case IP_VERSION(3, 1, 5): 4558 case IP_VERSION(3, 1, 6): 4559 case IP_VERSION(3, 2, 0): 4560 case IP_VERSION(3, 2, 1): 4561 adev->mode_info.num_crtc = 4; 4562 adev->mode_info.num_hpd = 4; 4563 adev->mode_info.num_dig = 4; 4564 break; 4565 default: 4566 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", 4567 adev->ip_versions[DCE_HWIP][0]); 4568 return -EINVAL; 4569 } 4570 break; 4571 } 4572 4573 amdgpu_dm_set_irq_funcs(adev); 4574 4575 if (adev->mode_info.funcs == NULL) 4576 adev->mode_info.funcs = &dm_display_funcs; 4577 4578 /* 4579 * Note: Do NOT change adev->audio_endpt_rreg and 4580 * adev->audio_endpt_wreg because they are initialised in 4581 * amdgpu_device_init() 4582 */ 4583 #if defined(CONFIG_DEBUG_KERNEL_DC) 4584 device_create_file( 4585 adev_to_drm(adev)->dev, 4586 &dev_attr_s3_debug); 4587 #endif 4588 4589 return 0; 4590 } 4591 4592 static bool modeset_required(struct drm_crtc_state *crtc_state, 4593 struct dc_stream_state *new_stream, 4594 struct dc_stream_state *old_stream) 4595 { 4596 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 4597 } 4598 4599 static bool modereset_required(struct drm_crtc_state *crtc_state) 4600 { 4601 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 4602 } 4603 4604 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 4605 { 4606 drm_encoder_cleanup(encoder); 4607 kfree(encoder); 4608 } 4609 4610 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 4611 .destroy = amdgpu_dm_encoder_destroy, 4612 }; 4613 4614 4615 static void get_min_max_dc_plane_scaling(struct drm_device *dev, 4616 struct drm_framebuffer *fb, 4617 int *min_downscale, int *max_upscale) 4618 { 4619 struct amdgpu_device *adev = drm_to_adev(dev); 4620 struct dc *dc = adev->dm.dc; 4621 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */ 4622 struct dc_plane_cap *plane_cap = &dc->caps.planes[0]; 4623 4624 switch (fb->format->format) { 4625 case DRM_FORMAT_P010: 4626 case DRM_FORMAT_NV12: 4627 case DRM_FORMAT_NV21: 4628 *max_upscale = plane_cap->max_upscale_factor.nv12; 4629 *min_downscale = plane_cap->max_downscale_factor.nv12; 4630 break; 4631 4632 case DRM_FORMAT_XRGB16161616F: 4633 case DRM_FORMAT_ARGB16161616F: 4634 case DRM_FORMAT_XBGR16161616F: 4635 case DRM_FORMAT_ABGR16161616F: 4636 *max_upscale = plane_cap->max_upscale_factor.fp16; 4637 *min_downscale = plane_cap->max_downscale_factor.fp16; 4638 break; 4639 4640 default: 4641 *max_upscale = plane_cap->max_upscale_factor.argb8888; 4642 *min_downscale = plane_cap->max_downscale_factor.argb8888; 4643 break; 4644 } 4645 4646 /* 4647 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a 4648 * scaling factor of 1.0 == 1000 units. 4649 */ 4650 if (*max_upscale == 1) 4651 *max_upscale = 1000; 4652 4653 if (*min_downscale == 1) 4654 *min_downscale = 1000; 4655 } 4656 4657 4658 static int fill_dc_scaling_info(struct amdgpu_device *adev, 4659 const struct drm_plane_state *state, 4660 struct dc_scaling_info *scaling_info) 4661 { 4662 int scale_w, scale_h, min_downscale, max_upscale; 4663 4664 memset(scaling_info, 0, sizeof(*scaling_info)); 4665 4666 /* Source is fixed 16.16 but we ignore mantissa for now... */ 4667 scaling_info->src_rect.x = state->src_x >> 16; 4668 scaling_info->src_rect.y = state->src_y >> 16; 4669 4670 /* 4671 * For reasons we don't (yet) fully understand a non-zero 4672 * src_y coordinate into an NV12 buffer can cause a 4673 * system hang on DCN1x. 4674 * To avoid hangs (and maybe be overly cautious) 4675 * let's reject both non-zero src_x and src_y. 4676 * 4677 * We currently know of only one use-case to reproduce a 4678 * scenario with non-zero src_x and src_y for NV12, which 4679 * is to gesture the YouTube Android app into full screen 4680 * on ChromeOS. 4681 */ 4682 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || 4683 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && 4684 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && 4685 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) 4686 return -EINVAL; 4687 4688 scaling_info->src_rect.width = state->src_w >> 16; 4689 if (scaling_info->src_rect.width == 0) 4690 return -EINVAL; 4691 4692 scaling_info->src_rect.height = state->src_h >> 16; 4693 if (scaling_info->src_rect.height == 0) 4694 return -EINVAL; 4695 4696 scaling_info->dst_rect.x = state->crtc_x; 4697 scaling_info->dst_rect.y = state->crtc_y; 4698 4699 if (state->crtc_w == 0) 4700 return -EINVAL; 4701 4702 scaling_info->dst_rect.width = state->crtc_w; 4703 4704 if (state->crtc_h == 0) 4705 return -EINVAL; 4706 4707 scaling_info->dst_rect.height = state->crtc_h; 4708 4709 /* DRM doesn't specify clipping on destination output. */ 4710 scaling_info->clip_rect = scaling_info->dst_rect; 4711 4712 /* Validate scaling per-format with DC plane caps */ 4713 if (state->plane && state->plane->dev && state->fb) { 4714 get_min_max_dc_plane_scaling(state->plane->dev, state->fb, 4715 &min_downscale, &max_upscale); 4716 } else { 4717 min_downscale = 250; 4718 max_upscale = 16000; 4719 } 4720 4721 scale_w = scaling_info->dst_rect.width * 1000 / 4722 scaling_info->src_rect.width; 4723 4724 if (scale_w < min_downscale || scale_w > max_upscale) 4725 return -EINVAL; 4726 4727 scale_h = scaling_info->dst_rect.height * 1000 / 4728 scaling_info->src_rect.height; 4729 4730 if (scale_h < min_downscale || scale_h > max_upscale) 4731 return -EINVAL; 4732 4733 /* 4734 * The "scaling_quality" can be ignored for now, quality = 0 has DC 4735 * assume reasonable defaults based on the format. 4736 */ 4737 4738 return 0; 4739 } 4740 4741 static void 4742 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, 4743 uint64_t tiling_flags) 4744 { 4745 /* Fill GFX8 params */ 4746 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { 4747 unsigned int bankw, bankh, mtaspect, tile_split, num_banks; 4748 4749 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 4750 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 4751 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 4752 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 4753 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 4754 4755 /* XXX fix me for VI */ 4756 tiling_info->gfx8.num_banks = num_banks; 4757 tiling_info->gfx8.array_mode = 4758 DC_ARRAY_2D_TILED_THIN1; 4759 tiling_info->gfx8.tile_split = tile_split; 4760 tiling_info->gfx8.bank_width = bankw; 4761 tiling_info->gfx8.bank_height = bankh; 4762 tiling_info->gfx8.tile_aspect = mtaspect; 4763 tiling_info->gfx8.tile_mode = 4764 DC_ADDR_SURF_MICRO_TILING_DISPLAY; 4765 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) 4766 == DC_ARRAY_1D_TILED_THIN1) { 4767 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; 4768 } 4769 4770 tiling_info->gfx8.pipe_config = 4771 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 4772 } 4773 4774 static void 4775 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, 4776 union dc_tiling_info *tiling_info) 4777 { 4778 tiling_info->gfx9.num_pipes = 4779 adev->gfx.config.gb_addr_config_fields.num_pipes; 4780 tiling_info->gfx9.num_banks = 4781 adev->gfx.config.gb_addr_config_fields.num_banks; 4782 tiling_info->gfx9.pipe_interleave = 4783 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; 4784 tiling_info->gfx9.num_shader_engines = 4785 adev->gfx.config.gb_addr_config_fields.num_se; 4786 tiling_info->gfx9.max_compressed_frags = 4787 adev->gfx.config.gb_addr_config_fields.max_compress_frags; 4788 tiling_info->gfx9.num_rb_per_se = 4789 adev->gfx.config.gb_addr_config_fields.num_rb_per_se; 4790 tiling_info->gfx9.shaderEnable = 1; 4791 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) 4792 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; 4793 } 4794 4795 static int 4796 validate_dcc(struct amdgpu_device *adev, 4797 const enum surface_pixel_format format, 4798 const enum dc_rotation_angle rotation, 4799 const union dc_tiling_info *tiling_info, 4800 const struct dc_plane_dcc_param *dcc, 4801 const struct dc_plane_address *address, 4802 const struct plane_size *plane_size) 4803 { 4804 struct dc *dc = adev->dm.dc; 4805 struct dc_dcc_surface_param input; 4806 struct dc_surface_dcc_cap output; 4807 4808 memset(&input, 0, sizeof(input)); 4809 memset(&output, 0, sizeof(output)); 4810 4811 if (!dcc->enable) 4812 return 0; 4813 4814 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || 4815 !dc->cap_funcs.get_dcc_compression_cap) 4816 return -EINVAL; 4817 4818 input.format = format; 4819 input.surface_size.width = plane_size->surface_size.width; 4820 input.surface_size.height = plane_size->surface_size.height; 4821 input.swizzle_mode = tiling_info->gfx9.swizzle; 4822 4823 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) 4824 input.scan = SCAN_DIRECTION_HORIZONTAL; 4825 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) 4826 input.scan = SCAN_DIRECTION_VERTICAL; 4827 4828 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) 4829 return -EINVAL; 4830 4831 if (!output.capable) 4832 return -EINVAL; 4833 4834 if (dcc->independent_64b_blks == 0 && 4835 output.grph.rgb.independent_64b_blks != 0) 4836 return -EINVAL; 4837 4838 return 0; 4839 } 4840 4841 static bool 4842 modifier_has_dcc(uint64_t modifier) 4843 { 4844 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); 4845 } 4846 4847 static unsigned 4848 modifier_gfx9_swizzle_mode(uint64_t modifier) 4849 { 4850 if (modifier == DRM_FORMAT_MOD_LINEAR) 4851 return 0; 4852 4853 return AMD_FMT_MOD_GET(TILE, modifier); 4854 } 4855 4856 static const struct drm_format_info * 4857 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 4858 { 4859 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); 4860 } 4861 4862 static void 4863 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, 4864 union dc_tiling_info *tiling_info, 4865 uint64_t modifier) 4866 { 4867 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); 4868 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); 4869 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); 4870 unsigned int pipes_log2; 4871 4872 pipes_log2 = min(5u, mod_pipe_xor_bits); 4873 4874 fill_gfx9_tiling_info_from_device(adev, tiling_info); 4875 4876 if (!IS_AMD_FMT_MOD(modifier)) 4877 return; 4878 4879 tiling_info->gfx9.num_pipes = 1u << pipes_log2; 4880 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2); 4881 4882 if (adev->family >= AMDGPU_FAMILY_NV) { 4883 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2; 4884 } else { 4885 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits; 4886 4887 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */ 4888 } 4889 } 4890 4891 enum dm_micro_swizzle { 4892 MICRO_SWIZZLE_Z = 0, 4893 MICRO_SWIZZLE_S = 1, 4894 MICRO_SWIZZLE_D = 2, 4895 MICRO_SWIZZLE_R = 3 4896 }; 4897 4898 static bool dm_plane_format_mod_supported(struct drm_plane *plane, 4899 uint32_t format, 4900 uint64_t modifier) 4901 { 4902 struct amdgpu_device *adev = drm_to_adev(plane->dev); 4903 const struct drm_format_info *info = drm_format_info(format); 4904 struct hw_asic_id asic_id = adev->dm.dc->ctx->asic_id; 4905 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; 4906 4907 if (!info) 4908 return false; 4909 4910 /* 4911 * We always have to allow these modifiers: 4912 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers. 4913 * 2. Not passing any modifiers is the same as explicitly passing INVALID. 4914 */ 4915 if (modifier == DRM_FORMAT_MOD_LINEAR || 4916 modifier == DRM_FORMAT_MOD_INVALID) { 4917 return true; 4918 } 4919 4920 /* check if swizzle mode is supported by this version of DCN */ 4921 switch (asic_id.chip_family) { 4922 case FAMILY_SI: 4923 case FAMILY_CI: 4924 case FAMILY_KV: 4925 case FAMILY_CZ: 4926 case FAMILY_VI: 4927 /* asics before AI does not have modifier support */ 4928 return false; 4929 break; 4930 case FAMILY_AI: 4931 case FAMILY_RV: 4932 case FAMILY_NV: 4933 case FAMILY_VGH: 4934 case FAMILY_YELLOW_CARP: 4935 case AMDGPU_FAMILY_GC_10_3_6: 4936 case AMDGPU_FAMILY_GC_10_3_7: 4937 switch (AMD_FMT_MOD_GET(TILE, modifier)) { 4938 case AMD_FMT_MOD_TILE_GFX9_64K_R_X: 4939 case AMD_FMT_MOD_TILE_GFX9_64K_D_X: 4940 case AMD_FMT_MOD_TILE_GFX9_64K_S_X: 4941 case AMD_FMT_MOD_TILE_GFX9_64K_D: 4942 return true; 4943 break; 4944 default: 4945 return false; 4946 break; 4947 } 4948 break; 4949 case AMDGPU_FAMILY_GC_11_0_0: 4950 switch (AMD_FMT_MOD_GET(TILE, modifier)) { 4951 case AMD_FMT_MOD_TILE_GFX11_256K_R_X: 4952 case AMD_FMT_MOD_TILE_GFX9_64K_R_X: 4953 case AMD_FMT_MOD_TILE_GFX9_64K_D_X: 4954 case AMD_FMT_MOD_TILE_GFX9_64K_S_X: 4955 case AMD_FMT_MOD_TILE_GFX9_64K_D: 4956 return true; 4957 break; 4958 default: 4959 return false; 4960 break; 4961 } 4962 break; 4963 default: 4964 ASSERT(0); /* Unknown asic */ 4965 break; 4966 } 4967 4968 /* 4969 * For D swizzle the canonical modifier depends on the bpp, so check 4970 * it here. 4971 */ 4972 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && 4973 adev->family >= AMDGPU_FAMILY_NV) { 4974 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) 4975 return false; 4976 } 4977 4978 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && 4979 info->cpp[0] < 8) 4980 return false; 4981 4982 if (modifier_has_dcc(modifier)) { 4983 /* Per radeonsi comments 16/64 bpp are more complicated. */ 4984 if (info->cpp[0] != 4) 4985 return false; 4986 /* We support multi-planar formats, but not when combined with 4987 * additional DCC metadata planes. */ 4988 if (info->num_planes > 1) 4989 return false; 4990 } 4991 4992 return true; 4993 } 4994 4995 static void 4996 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) 4997 { 4998 if (!*mods) 4999 return; 5000 5001 if (*cap - *size < 1) { 5002 uint64_t new_cap = *cap * 2; 5003 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); 5004 5005 if (!new_mods) { 5006 kfree(*mods); 5007 *mods = NULL; 5008 return; 5009 } 5010 5011 memcpy(new_mods, *mods, sizeof(uint64_t) * *size); 5012 kfree(*mods); 5013 *mods = new_mods; 5014 *cap = new_cap; 5015 } 5016 5017 (*mods)[*size] = mod; 5018 *size += 1; 5019 } 5020 5021 static void 5022 add_gfx9_modifiers(const struct amdgpu_device *adev, 5023 uint64_t **mods, uint64_t *size, uint64_t *capacity) 5024 { 5025 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 5026 int pipe_xor_bits = min(8, pipes + 5027 ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); 5028 int bank_xor_bits = min(8 - pipe_xor_bits, 5029 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); 5030 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + 5031 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); 5032 5033 5034 if (adev->family == AMDGPU_FAMILY_RV) { 5035 /* Raven2 and later */ 5036 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81; 5037 5038 /* 5039 * No _D DCC swizzles yet because we only allow 32bpp, which 5040 * doesn't support _D on DCN 5041 */ 5042 5043 if (has_constant_encode) { 5044 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5045 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5046 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 5047 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5048 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 5049 AMD_FMT_MOD_SET(DCC, 1) | 5050 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5051 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 5052 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); 5053 } 5054 5055 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5056 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5057 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 5058 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5059 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 5060 AMD_FMT_MOD_SET(DCC, 1) | 5061 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5062 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 5063 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); 5064 5065 if (has_constant_encode) { 5066 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5067 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5068 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 5069 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5070 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 5071 AMD_FMT_MOD_SET(DCC, 1) | 5072 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 5073 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5074 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 5075 5076 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5077 AMD_FMT_MOD_SET(RB, rb) | 5078 AMD_FMT_MOD_SET(PIPE, pipes)); 5079 } 5080 5081 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5082 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5083 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 5084 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5085 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 5086 AMD_FMT_MOD_SET(DCC, 1) | 5087 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 5088 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5089 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 5090 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | 5091 AMD_FMT_MOD_SET(RB, rb) | 5092 AMD_FMT_MOD_SET(PIPE, pipes)); 5093 } 5094 5095 /* 5096 * Only supported for 64bpp on Raven, will be filtered on format in 5097 * dm_plane_format_mod_supported. 5098 */ 5099 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5100 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | 5101 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 5102 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5103 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 5104 5105 if (adev->family == AMDGPU_FAMILY_RV) { 5106 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5107 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5108 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 5109 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5110 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 5111 } 5112 5113 /* 5114 * Only supported for 64bpp on Raven, will be filtered on format in 5115 * dm_plane_format_mod_supported. 5116 */ 5117 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5118 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 5119 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5120 5121 if (adev->family == AMDGPU_FAMILY_RV) { 5122 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5123 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 5124 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5125 } 5126 } 5127 5128 static void 5129 add_gfx10_1_modifiers(const struct amdgpu_device *adev, 5130 uint64_t **mods, uint64_t *size, uint64_t *capacity) 5131 { 5132 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 5133 5134 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5135 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5136 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 5137 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5138 AMD_FMT_MOD_SET(DCC, 1) | 5139 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5140 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5141 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 5142 5143 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5144 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5145 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 5146 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5147 AMD_FMT_MOD_SET(DCC, 1) | 5148 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 5149 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5150 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5151 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 5152 5153 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5154 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5155 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 5156 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 5157 5158 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5159 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5160 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 5161 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 5162 5163 5164 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ 5165 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5166 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 5167 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5168 5169 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5170 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 5171 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5172 } 5173 5174 static void 5175 add_gfx10_3_modifiers(const struct amdgpu_device *adev, 5176 uint64_t **mods, uint64_t *size, uint64_t *capacity) 5177 { 5178 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 5179 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); 5180 5181 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5182 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5183 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 5184 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5185 AMD_FMT_MOD_SET(PACKERS, pkrs) | 5186 AMD_FMT_MOD_SET(DCC, 1) | 5187 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5188 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5189 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 5190 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 5191 5192 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5193 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5194 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 5195 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5196 AMD_FMT_MOD_SET(PACKERS, pkrs) | 5197 AMD_FMT_MOD_SET(DCC, 1) | 5198 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5199 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 5200 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 5201 5202 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5203 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5204 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 5205 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5206 AMD_FMT_MOD_SET(PACKERS, pkrs) | 5207 AMD_FMT_MOD_SET(DCC, 1) | 5208 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 5209 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5210 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5211 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 5212 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 5213 5214 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5215 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5216 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 5217 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5218 AMD_FMT_MOD_SET(PACKERS, pkrs) | 5219 AMD_FMT_MOD_SET(DCC, 1) | 5220 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 5221 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 5222 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 5223 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 5224 5225 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5226 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 5227 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 5228 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5229 AMD_FMT_MOD_SET(PACKERS, pkrs)); 5230 5231 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5232 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5233 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 5234 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5235 AMD_FMT_MOD_SET(PACKERS, pkrs)); 5236 5237 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ 5238 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5239 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 5240 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5241 5242 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5243 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 5244 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5245 } 5246 5247 static void 5248 add_gfx11_modifiers(struct amdgpu_device *adev, 5249 uint64_t **mods, uint64_t *size, uint64_t *capacity) 5250 { 5251 int num_pipes = 0; 5252 int pipe_xor_bits = 0; 5253 int num_pkrs = 0; 5254 int pkrs = 0; 5255 u32 gb_addr_config; 5256 unsigned swizzle_r_x; 5257 uint64_t modifier_r_x; 5258 uint64_t modifier_dcc_best; 5259 uint64_t modifier_dcc_4k; 5260 5261 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from 5262 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */ 5263 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); 5264 ASSERT(gb_addr_config != 0); 5265 5266 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); 5267 pkrs = ilog2(num_pkrs); 5268 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES); 5269 pipe_xor_bits = ilog2(num_pipes); 5270 5271 /* R_X swizzle modes are the best for rendering and DCC requires them. */ 5272 swizzle_r_x = num_pipes > 16 ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : 5273 AMD_FMT_MOD_TILE_GFX9_64K_R_X; 5274 5275 modifier_r_x = AMD_FMT_MOD | 5276 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | 5277 AMD_FMT_MOD_SET(TILE, swizzle_r_x) | 5278 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5279 AMD_FMT_MOD_SET(PACKERS, pkrs); 5280 5281 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */ 5282 modifier_dcc_best = modifier_r_x | 5283 AMD_FMT_MOD_SET(DCC, 1) | 5284 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) | 5285 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 5286 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B); 5287 5288 /* DCC settings for 4K and greater resolutions. (required by display hw) */ 5289 modifier_dcc_4k = modifier_r_x | 5290 AMD_FMT_MOD_SET(DCC, 1) | 5291 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 5292 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 5293 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); 5294 5295 add_modifier(mods, size, capacity, modifier_dcc_best); 5296 add_modifier(mods, size, capacity, modifier_dcc_4k); 5297 5298 add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); 5299 add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); 5300 5301 add_modifier(mods, size, capacity, modifier_r_x); 5302 5303 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5304 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | 5305 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); 5306 } 5307 5308 static int 5309 get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) 5310 { 5311 uint64_t size = 0, capacity = 128; 5312 *mods = NULL; 5313 5314 /* We have not hooked up any pre-GFX9 modifiers. */ 5315 if (adev->family < AMDGPU_FAMILY_AI) 5316 return 0; 5317 5318 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); 5319 5320 if (plane_type == DRM_PLANE_TYPE_CURSOR) { 5321 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 5322 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 5323 return *mods ? 0 : -ENOMEM; 5324 } 5325 5326 switch (adev->family) { 5327 case AMDGPU_FAMILY_AI: 5328 case AMDGPU_FAMILY_RV: 5329 add_gfx9_modifiers(adev, mods, &size, &capacity); 5330 break; 5331 case AMDGPU_FAMILY_NV: 5332 case AMDGPU_FAMILY_VGH: 5333 case AMDGPU_FAMILY_YC: 5334 case AMDGPU_FAMILY_GC_10_3_6: 5335 case AMDGPU_FAMILY_GC_10_3_7: 5336 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) 5337 add_gfx10_3_modifiers(adev, mods, &size, &capacity); 5338 else 5339 add_gfx10_1_modifiers(adev, mods, &size, &capacity); 5340 break; 5341 case AMDGPU_FAMILY_GC_11_0_0: 5342 add_gfx11_modifiers(adev, mods, &size, &capacity); 5343 break; 5344 } 5345 5346 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 5347 5348 /* INVALID marks the end of the list. */ 5349 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 5350 5351 if (!*mods) 5352 return -ENOMEM; 5353 5354 return 0; 5355 } 5356 5357 static int 5358 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, 5359 const struct amdgpu_framebuffer *afb, 5360 const enum surface_pixel_format format, 5361 const enum dc_rotation_angle rotation, 5362 const struct plane_size *plane_size, 5363 union dc_tiling_info *tiling_info, 5364 struct dc_plane_dcc_param *dcc, 5365 struct dc_plane_address *address, 5366 const bool force_disable_dcc) 5367 { 5368 const uint64_t modifier = afb->base.modifier; 5369 int ret = 0; 5370 5371 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); 5372 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); 5373 5374 if (modifier_has_dcc(modifier) && !force_disable_dcc) { 5375 uint64_t dcc_address = afb->address + afb->base.offsets[1]; 5376 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); 5377 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); 5378 5379 dcc->enable = 1; 5380 dcc->meta_pitch = afb->base.pitches[1]; 5381 dcc->independent_64b_blks = independent_64b_blks; 5382 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { 5383 if (independent_64b_blks && independent_128b_blks) 5384 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; 5385 else if (independent_128b_blks) 5386 dcc->dcc_ind_blk = hubp_ind_block_128b; 5387 else if (independent_64b_blks && !independent_128b_blks) 5388 dcc->dcc_ind_blk = hubp_ind_block_64b; 5389 else 5390 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 5391 } else { 5392 if (independent_64b_blks) 5393 dcc->dcc_ind_blk = hubp_ind_block_64b; 5394 else 5395 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 5396 } 5397 5398 address->grph.meta_addr.low_part = lower_32_bits(dcc_address); 5399 address->grph.meta_addr.high_part = upper_32_bits(dcc_address); 5400 } 5401 5402 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); 5403 if (ret) 5404 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); 5405 5406 return ret; 5407 } 5408 5409 static int 5410 fill_plane_buffer_attributes(struct amdgpu_device *adev, 5411 const struct amdgpu_framebuffer *afb, 5412 const enum surface_pixel_format format, 5413 const enum dc_rotation_angle rotation, 5414 const uint64_t tiling_flags, 5415 union dc_tiling_info *tiling_info, 5416 struct plane_size *plane_size, 5417 struct dc_plane_dcc_param *dcc, 5418 struct dc_plane_address *address, 5419 bool tmz_surface, 5420 bool force_disable_dcc) 5421 { 5422 const struct drm_framebuffer *fb = &afb->base; 5423 int ret; 5424 5425 memset(tiling_info, 0, sizeof(*tiling_info)); 5426 memset(plane_size, 0, sizeof(*plane_size)); 5427 memset(dcc, 0, sizeof(*dcc)); 5428 memset(address, 0, sizeof(*address)); 5429 5430 address->tmz_surface = tmz_surface; 5431 5432 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 5433 uint64_t addr = afb->address + fb->offsets[0]; 5434 5435 plane_size->surface_size.x = 0; 5436 plane_size->surface_size.y = 0; 5437 plane_size->surface_size.width = fb->width; 5438 plane_size->surface_size.height = fb->height; 5439 plane_size->surface_pitch = 5440 fb->pitches[0] / fb->format->cpp[0]; 5441 5442 address->type = PLN_ADDR_TYPE_GRAPHICS; 5443 address->grph.addr.low_part = lower_32_bits(addr); 5444 address->grph.addr.high_part = upper_32_bits(addr); 5445 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { 5446 uint64_t luma_addr = afb->address + fb->offsets[0]; 5447 uint64_t chroma_addr = afb->address + fb->offsets[1]; 5448 5449 plane_size->surface_size.x = 0; 5450 plane_size->surface_size.y = 0; 5451 plane_size->surface_size.width = fb->width; 5452 plane_size->surface_size.height = fb->height; 5453 plane_size->surface_pitch = 5454 fb->pitches[0] / fb->format->cpp[0]; 5455 5456 plane_size->chroma_size.x = 0; 5457 plane_size->chroma_size.y = 0; 5458 /* TODO: set these based on surface format */ 5459 plane_size->chroma_size.width = fb->width / 2; 5460 plane_size->chroma_size.height = fb->height / 2; 5461 5462 plane_size->chroma_pitch = 5463 fb->pitches[1] / fb->format->cpp[1]; 5464 5465 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; 5466 address->video_progressive.luma_addr.low_part = 5467 lower_32_bits(luma_addr); 5468 address->video_progressive.luma_addr.high_part = 5469 upper_32_bits(luma_addr); 5470 address->video_progressive.chroma_addr.low_part = 5471 lower_32_bits(chroma_addr); 5472 address->video_progressive.chroma_addr.high_part = 5473 upper_32_bits(chroma_addr); 5474 } 5475 5476 if (adev->family >= AMDGPU_FAMILY_AI) { 5477 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, 5478 rotation, plane_size, 5479 tiling_info, dcc, 5480 address, 5481 force_disable_dcc); 5482 if (ret) 5483 return ret; 5484 } else { 5485 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); 5486 } 5487 5488 return 0; 5489 } 5490 5491 static void 5492 fill_blending_from_plane_state(const struct drm_plane_state *plane_state, 5493 bool *per_pixel_alpha, bool *pre_multiplied_alpha, 5494 bool *global_alpha, int *global_alpha_value) 5495 { 5496 *per_pixel_alpha = false; 5497 *pre_multiplied_alpha = true; 5498 *global_alpha = false; 5499 *global_alpha_value = 0xff; 5500 5501 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) 5502 return; 5503 5504 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI || 5505 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) { 5506 static const uint32_t alpha_formats[] = { 5507 DRM_FORMAT_ARGB8888, 5508 DRM_FORMAT_RGBA8888, 5509 DRM_FORMAT_ABGR8888, 5510 }; 5511 uint32_t format = plane_state->fb->format->format; 5512 unsigned int i; 5513 5514 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { 5515 if (format == alpha_formats[i]) { 5516 *per_pixel_alpha = true; 5517 break; 5518 } 5519 } 5520 5521 if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) 5522 *pre_multiplied_alpha = false; 5523 } 5524 5525 if (plane_state->alpha < 0xffff) { 5526 *global_alpha = true; 5527 *global_alpha_value = plane_state->alpha >> 8; 5528 } 5529 } 5530 5531 static int 5532 fill_plane_color_attributes(const struct drm_plane_state *plane_state, 5533 const enum surface_pixel_format format, 5534 enum dc_color_space *color_space) 5535 { 5536 bool full_range; 5537 5538 *color_space = COLOR_SPACE_SRGB; 5539 5540 /* DRM color properties only affect non-RGB formats. */ 5541 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 5542 return 0; 5543 5544 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); 5545 5546 switch (plane_state->color_encoding) { 5547 case DRM_COLOR_YCBCR_BT601: 5548 if (full_range) 5549 *color_space = COLOR_SPACE_YCBCR601; 5550 else 5551 *color_space = COLOR_SPACE_YCBCR601_LIMITED; 5552 break; 5553 5554 case DRM_COLOR_YCBCR_BT709: 5555 if (full_range) 5556 *color_space = COLOR_SPACE_YCBCR709; 5557 else 5558 *color_space = COLOR_SPACE_YCBCR709_LIMITED; 5559 break; 5560 5561 case DRM_COLOR_YCBCR_BT2020: 5562 if (full_range) 5563 *color_space = COLOR_SPACE_2020_YCBCR; 5564 else 5565 return -EINVAL; 5566 break; 5567 5568 default: 5569 return -EINVAL; 5570 } 5571 5572 return 0; 5573 } 5574 5575 static int 5576 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 5577 const struct drm_plane_state *plane_state, 5578 const uint64_t tiling_flags, 5579 struct dc_plane_info *plane_info, 5580 struct dc_plane_address *address, 5581 bool tmz_surface, 5582 bool force_disable_dcc) 5583 { 5584 const struct drm_framebuffer *fb = plane_state->fb; 5585 const struct amdgpu_framebuffer *afb = 5586 to_amdgpu_framebuffer(plane_state->fb); 5587 int ret; 5588 5589 memset(plane_info, 0, sizeof(*plane_info)); 5590 5591 switch (fb->format->format) { 5592 case DRM_FORMAT_C8: 5593 plane_info->format = 5594 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 5595 break; 5596 case DRM_FORMAT_RGB565: 5597 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 5598 break; 5599 case DRM_FORMAT_XRGB8888: 5600 case DRM_FORMAT_ARGB8888: 5601 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 5602 break; 5603 case DRM_FORMAT_XRGB2101010: 5604 case DRM_FORMAT_ARGB2101010: 5605 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 5606 break; 5607 case DRM_FORMAT_XBGR2101010: 5608 case DRM_FORMAT_ABGR2101010: 5609 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 5610 break; 5611 case DRM_FORMAT_XBGR8888: 5612 case DRM_FORMAT_ABGR8888: 5613 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; 5614 break; 5615 case DRM_FORMAT_NV21: 5616 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 5617 break; 5618 case DRM_FORMAT_NV12: 5619 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 5620 break; 5621 case DRM_FORMAT_P010: 5622 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; 5623 break; 5624 case DRM_FORMAT_XRGB16161616F: 5625 case DRM_FORMAT_ARGB16161616F: 5626 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; 5627 break; 5628 case DRM_FORMAT_XBGR16161616F: 5629 case DRM_FORMAT_ABGR16161616F: 5630 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; 5631 break; 5632 case DRM_FORMAT_XRGB16161616: 5633 case DRM_FORMAT_ARGB16161616: 5634 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; 5635 break; 5636 case DRM_FORMAT_XBGR16161616: 5637 case DRM_FORMAT_ABGR16161616: 5638 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; 5639 break; 5640 default: 5641 DRM_ERROR( 5642 "Unsupported screen format %p4cc\n", 5643 &fb->format->format); 5644 return -EINVAL; 5645 } 5646 5647 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 5648 case DRM_MODE_ROTATE_0: 5649 plane_info->rotation = ROTATION_ANGLE_0; 5650 break; 5651 case DRM_MODE_ROTATE_90: 5652 plane_info->rotation = ROTATION_ANGLE_90; 5653 break; 5654 case DRM_MODE_ROTATE_180: 5655 plane_info->rotation = ROTATION_ANGLE_180; 5656 break; 5657 case DRM_MODE_ROTATE_270: 5658 plane_info->rotation = ROTATION_ANGLE_270; 5659 break; 5660 default: 5661 plane_info->rotation = ROTATION_ANGLE_0; 5662 break; 5663 } 5664 5665 plane_info->visible = true; 5666 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; 5667 5668 plane_info->layer_index = 0; 5669 5670 ret = fill_plane_color_attributes(plane_state, plane_info->format, 5671 &plane_info->color_space); 5672 if (ret) 5673 return ret; 5674 5675 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format, 5676 plane_info->rotation, tiling_flags, 5677 &plane_info->tiling_info, 5678 &plane_info->plane_size, 5679 &plane_info->dcc, address, tmz_surface, 5680 force_disable_dcc); 5681 if (ret) 5682 return ret; 5683 5684 fill_blending_from_plane_state( 5685 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, 5686 &plane_info->global_alpha, &plane_info->global_alpha_value); 5687 5688 return 0; 5689 } 5690 5691 static int fill_dc_plane_attributes(struct amdgpu_device *adev, 5692 struct dc_plane_state *dc_plane_state, 5693 struct drm_plane_state *plane_state, 5694 struct drm_crtc_state *crtc_state) 5695 { 5696 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5697 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; 5698 struct dc_scaling_info scaling_info; 5699 struct dc_plane_info plane_info; 5700 int ret; 5701 bool force_disable_dcc = false; 5702 5703 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info); 5704 if (ret) 5705 return ret; 5706 5707 dc_plane_state->src_rect = scaling_info.src_rect; 5708 dc_plane_state->dst_rect = scaling_info.dst_rect; 5709 dc_plane_state->clip_rect = scaling_info.clip_rect; 5710 dc_plane_state->scaling_quality = scaling_info.scaling_quality; 5711 5712 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; 5713 ret = fill_dc_plane_info_and_addr(adev, plane_state, 5714 afb->tiling_flags, 5715 &plane_info, 5716 &dc_plane_state->address, 5717 afb->tmz_surface, 5718 force_disable_dcc); 5719 if (ret) 5720 return ret; 5721 5722 dc_plane_state->format = plane_info.format; 5723 dc_plane_state->color_space = plane_info.color_space; 5724 dc_plane_state->format = plane_info.format; 5725 dc_plane_state->plane_size = plane_info.plane_size; 5726 dc_plane_state->rotation = plane_info.rotation; 5727 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; 5728 dc_plane_state->stereo_format = plane_info.stereo_format; 5729 dc_plane_state->tiling_info = plane_info.tiling_info; 5730 dc_plane_state->visible = plane_info.visible; 5731 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; 5732 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha; 5733 dc_plane_state->global_alpha = plane_info.global_alpha; 5734 dc_plane_state->global_alpha_value = plane_info.global_alpha_value; 5735 dc_plane_state->dcc = plane_info.dcc; 5736 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0 5737 dc_plane_state->flip_int_enabled = true; 5738 5739 /* 5740 * Always set input transfer function, since plane state is refreshed 5741 * every time. 5742 */ 5743 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); 5744 if (ret) 5745 return ret; 5746 5747 return 0; 5748 } 5749 5750 /** 5751 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates 5752 * 5753 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP 5754 * remote fb 5755 * @old_plane_state: Old state of @plane 5756 * @new_plane_state: New state of @plane 5757 * @crtc_state: New state of CRTC connected to the @plane 5758 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects 5759 * 5760 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions 5761 * (referred to as "damage clips" in DRM nomenclature) that require updating on 5762 * the eDP remote buffer. The responsibility of specifying the dirty regions is 5763 * amdgpu_dm's. 5764 * 5765 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the 5766 * plane with regions that require flushing to the eDP remote buffer. In 5767 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - 5768 * implicitly provide damage clips without any client support via the plane 5769 * bounds. 5770 * 5771 * Today, amdgpu_dm only supports the MPO and cursor usecase. 5772 * 5773 * TODO: Also enable for FB_DAMAGE_CLIPS 5774 */ 5775 static void fill_dc_dirty_rects(struct drm_plane *plane, 5776 struct drm_plane_state *old_plane_state, 5777 struct drm_plane_state *new_plane_state, 5778 struct drm_crtc_state *crtc_state, 5779 struct dc_flip_addrs *flip_addrs) 5780 { 5781 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5782 struct rect *dirty_rects = flip_addrs->dirty_rects; 5783 uint32_t num_clips; 5784 bool bb_changed; 5785 bool fb_changed; 5786 uint32_t i = 0; 5787 5788 flip_addrs->dirty_rect_count = 0; 5789 5790 /* 5791 * Cursor plane has it's own dirty rect update interface. See 5792 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data 5793 */ 5794 if (plane->type == DRM_PLANE_TYPE_CURSOR) 5795 return; 5796 5797 /* 5798 * Today, we only consider MPO use-case for PSR SU. If MPO not 5799 * requested, and there is a plane update, do FFU. 5800 */ 5801 if (!dm_crtc_state->mpo_requested) { 5802 dirty_rects[0].x = 0; 5803 dirty_rects[0].y = 0; 5804 dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay; 5805 dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay; 5806 flip_addrs->dirty_rect_count = 1; 5807 DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", 5808 new_plane_state->plane->base.id, 5809 dm_crtc_state->base.mode.crtc_hdisplay, 5810 dm_crtc_state->base.mode.crtc_vdisplay); 5811 return; 5812 } 5813 5814 /* 5815 * MPO is requested. Add entire plane bounding box to dirty rects if 5816 * flipped to or damaged. 5817 * 5818 * If plane is moved or resized, also add old bounding box to dirty 5819 * rects. 5820 */ 5821 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 5822 fb_changed = old_plane_state->fb->base.id != 5823 new_plane_state->fb->base.id; 5824 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || 5825 old_plane_state->crtc_y != new_plane_state->crtc_y || 5826 old_plane_state->crtc_w != new_plane_state->crtc_w || 5827 old_plane_state->crtc_h != new_plane_state->crtc_h); 5828 5829 DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", 5830 new_plane_state->plane->base.id, 5831 bb_changed, fb_changed, num_clips); 5832 5833 if (num_clips || fb_changed || bb_changed) { 5834 dirty_rects[i].x = new_plane_state->crtc_x; 5835 dirty_rects[i].y = new_plane_state->crtc_y; 5836 dirty_rects[i].width = new_plane_state->crtc_w; 5837 dirty_rects[i].height = new_plane_state->crtc_h; 5838 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", 5839 new_plane_state->plane->base.id, 5840 dirty_rects[i].x, dirty_rects[i].y, 5841 dirty_rects[i].width, dirty_rects[i].height); 5842 i += 1; 5843 } 5844 5845 /* Add old plane bounding-box if plane is moved or resized */ 5846 if (bb_changed) { 5847 dirty_rects[i].x = old_plane_state->crtc_x; 5848 dirty_rects[i].y = old_plane_state->crtc_y; 5849 dirty_rects[i].width = old_plane_state->crtc_w; 5850 dirty_rects[i].height = old_plane_state->crtc_h; 5851 DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", 5852 old_plane_state->plane->base.id, 5853 dirty_rects[i].x, dirty_rects[i].y, 5854 dirty_rects[i].width, dirty_rects[i].height); 5855 i += 1; 5856 } 5857 5858 flip_addrs->dirty_rect_count = i; 5859 } 5860 5861 static void update_stream_scaling_settings(const struct drm_display_mode *mode, 5862 const struct dm_connector_state *dm_state, 5863 struct dc_stream_state *stream) 5864 { 5865 enum amdgpu_rmx_type rmx_type; 5866 5867 struct rect src = { 0 }; /* viewport in composition space*/ 5868 struct rect dst = { 0 }; /* stream addressable area */ 5869 5870 /* no mode. nothing to be done */ 5871 if (!mode) 5872 return; 5873 5874 /* Full screen scaling by default */ 5875 src.width = mode->hdisplay; 5876 src.height = mode->vdisplay; 5877 dst.width = stream->timing.h_addressable; 5878 dst.height = stream->timing.v_addressable; 5879 5880 if (dm_state) { 5881 rmx_type = dm_state->scaling; 5882 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 5883 if (src.width * dst.height < 5884 src.height * dst.width) { 5885 /* height needs less upscaling/more downscaling */ 5886 dst.width = src.width * 5887 dst.height / src.height; 5888 } else { 5889 /* width needs less upscaling/more downscaling */ 5890 dst.height = src.height * 5891 dst.width / src.width; 5892 } 5893 } else if (rmx_type == RMX_CENTER) { 5894 dst = src; 5895 } 5896 5897 dst.x = (stream->timing.h_addressable - dst.width) / 2; 5898 dst.y = (stream->timing.v_addressable - dst.height) / 2; 5899 5900 if (dm_state->underscan_enable) { 5901 dst.x += dm_state->underscan_hborder / 2; 5902 dst.y += dm_state->underscan_vborder / 2; 5903 dst.width -= dm_state->underscan_hborder; 5904 dst.height -= dm_state->underscan_vborder; 5905 } 5906 } 5907 5908 stream->src = src; 5909 stream->dst = dst; 5910 5911 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", 5912 dst.x, dst.y, dst.width, dst.height); 5913 5914 } 5915 5916 static enum dc_color_depth 5917 convert_color_depth_from_display_info(const struct drm_connector *connector, 5918 bool is_y420, int requested_bpc) 5919 { 5920 uint8_t bpc; 5921 5922 if (is_y420) { 5923 bpc = 8; 5924 5925 /* Cap display bpc based on HDMI 2.0 HF-VSDB */ 5926 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) 5927 bpc = 16; 5928 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) 5929 bpc = 12; 5930 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) 5931 bpc = 10; 5932 } else { 5933 bpc = (uint8_t)connector->display_info.bpc; 5934 /* Assume 8 bpc by default if no bpc is specified. */ 5935 bpc = bpc ? bpc : 8; 5936 } 5937 5938 if (requested_bpc > 0) { 5939 /* 5940 * Cap display bpc based on the user requested value. 5941 * 5942 * The value for state->max_bpc may not correctly updated 5943 * depending on when the connector gets added to the state 5944 * or if this was called outside of atomic check, so it 5945 * can't be used directly. 5946 */ 5947 bpc = min_t(u8, bpc, requested_bpc); 5948 5949 /* Round down to the nearest even number. */ 5950 bpc = bpc - (bpc & 1); 5951 } 5952 5953 switch (bpc) { 5954 case 0: 5955 /* 5956 * Temporary Work around, DRM doesn't parse color depth for 5957 * EDID revision before 1.4 5958 * TODO: Fix edid parsing 5959 */ 5960 return COLOR_DEPTH_888; 5961 case 6: 5962 return COLOR_DEPTH_666; 5963 case 8: 5964 return COLOR_DEPTH_888; 5965 case 10: 5966 return COLOR_DEPTH_101010; 5967 case 12: 5968 return COLOR_DEPTH_121212; 5969 case 14: 5970 return COLOR_DEPTH_141414; 5971 case 16: 5972 return COLOR_DEPTH_161616; 5973 default: 5974 return COLOR_DEPTH_UNDEFINED; 5975 } 5976 } 5977 5978 static enum dc_aspect_ratio 5979 get_aspect_ratio(const struct drm_display_mode *mode_in) 5980 { 5981 /* 1-1 mapping, since both enums follow the HDMI spec. */ 5982 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; 5983 } 5984 5985 static enum dc_color_space 5986 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) 5987 { 5988 enum dc_color_space color_space = COLOR_SPACE_SRGB; 5989 5990 switch (dc_crtc_timing->pixel_encoding) { 5991 case PIXEL_ENCODING_YCBCR422: 5992 case PIXEL_ENCODING_YCBCR444: 5993 case PIXEL_ENCODING_YCBCR420: 5994 { 5995 /* 5996 * 27030khz is the separation point between HDTV and SDTV 5997 * according to HDMI spec, we use YCbCr709 and YCbCr601 5998 * respectively 5999 */ 6000 if (dc_crtc_timing->pix_clk_100hz > 270300) { 6001 if (dc_crtc_timing->flags.Y_ONLY) 6002 color_space = 6003 COLOR_SPACE_YCBCR709_LIMITED; 6004 else 6005 color_space = COLOR_SPACE_YCBCR709; 6006 } else { 6007 if (dc_crtc_timing->flags.Y_ONLY) 6008 color_space = 6009 COLOR_SPACE_YCBCR601_LIMITED; 6010 else 6011 color_space = COLOR_SPACE_YCBCR601; 6012 } 6013 6014 } 6015 break; 6016 case PIXEL_ENCODING_RGB: 6017 color_space = COLOR_SPACE_SRGB; 6018 break; 6019 6020 default: 6021 WARN_ON(1); 6022 break; 6023 } 6024 6025 return color_space; 6026 } 6027 6028 static bool adjust_colour_depth_from_display_info( 6029 struct dc_crtc_timing *timing_out, 6030 const struct drm_display_info *info) 6031 { 6032 enum dc_color_depth depth = timing_out->display_color_depth; 6033 int normalized_clk; 6034 do { 6035 normalized_clk = timing_out->pix_clk_100hz / 10; 6036 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 6037 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 6038 normalized_clk /= 2; 6039 /* Adjusting pix clock following on HDMI spec based on colour depth */ 6040 switch (depth) { 6041 case COLOR_DEPTH_888: 6042 break; 6043 case COLOR_DEPTH_101010: 6044 normalized_clk = (normalized_clk * 30) / 24; 6045 break; 6046 case COLOR_DEPTH_121212: 6047 normalized_clk = (normalized_clk * 36) / 24; 6048 break; 6049 case COLOR_DEPTH_161616: 6050 normalized_clk = (normalized_clk * 48) / 24; 6051 break; 6052 default: 6053 /* The above depths are the only ones valid for HDMI. */ 6054 return false; 6055 } 6056 if (normalized_clk <= info->max_tmds_clock) { 6057 timing_out->display_color_depth = depth; 6058 return true; 6059 } 6060 } while (--depth > COLOR_DEPTH_666); 6061 return false; 6062 } 6063 6064 static void fill_stream_properties_from_drm_display_mode( 6065 struct dc_stream_state *stream, 6066 const struct drm_display_mode *mode_in, 6067 const struct drm_connector *connector, 6068 const struct drm_connector_state *connector_state, 6069 const struct dc_stream_state *old_stream, 6070 int requested_bpc) 6071 { 6072 struct dc_crtc_timing *timing_out = &stream->timing; 6073 const struct drm_display_info *info = &connector->display_info; 6074 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6075 struct hdmi_vendor_infoframe hv_frame; 6076 struct hdmi_avi_infoframe avi_frame; 6077 6078 memset(&hv_frame, 0, sizeof(hv_frame)); 6079 memset(&avi_frame, 0, sizeof(avi_frame)); 6080 6081 timing_out->h_border_left = 0; 6082 timing_out->h_border_right = 0; 6083 timing_out->v_border_top = 0; 6084 timing_out->v_border_bottom = 0; 6085 /* TODO: un-hardcode */ 6086 if (drm_mode_is_420_only(info, mode_in) 6087 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6088 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6089 else if (drm_mode_is_420_also(info, mode_in) 6090 && aconnector->force_yuv420_output) 6091 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6092 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) 6093 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6094 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 6095 else 6096 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 6097 6098 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 6099 timing_out->display_color_depth = convert_color_depth_from_display_info( 6100 connector, 6101 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), 6102 requested_bpc); 6103 timing_out->scan_type = SCANNING_TYPE_NODATA; 6104 timing_out->hdmi_vic = 0; 6105 6106 if(old_stream) { 6107 timing_out->vic = old_stream->timing.vic; 6108 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; 6109 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; 6110 } else { 6111 timing_out->vic = drm_match_cea_mode(mode_in); 6112 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 6113 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 6114 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 6115 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 6116 } 6117 6118 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 6119 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); 6120 timing_out->vic = avi_frame.video_code; 6121 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); 6122 timing_out->hdmi_vic = hv_frame.vic; 6123 } 6124 6125 if (is_freesync_video_mode(mode_in, aconnector)) { 6126 timing_out->h_addressable = mode_in->hdisplay; 6127 timing_out->h_total = mode_in->htotal; 6128 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; 6129 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; 6130 timing_out->v_total = mode_in->vtotal; 6131 timing_out->v_addressable = mode_in->vdisplay; 6132 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; 6133 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; 6134 timing_out->pix_clk_100hz = mode_in->clock * 10; 6135 } else { 6136 timing_out->h_addressable = mode_in->crtc_hdisplay; 6137 timing_out->h_total = mode_in->crtc_htotal; 6138 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 6139 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 6140 timing_out->v_total = mode_in->crtc_vtotal; 6141 timing_out->v_addressable = mode_in->crtc_vdisplay; 6142 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 6143 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 6144 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; 6145 } 6146 6147 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 6148 6149 stream->output_color_space = get_output_color_space(timing_out); 6150 6151 stream->out_transfer_func->type = TF_TYPE_PREDEFINED; 6152 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; 6153 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 6154 if (!adjust_colour_depth_from_display_info(timing_out, info) && 6155 drm_mode_is_420_also(info, mode_in) && 6156 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { 6157 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6158 adjust_colour_depth_from_display_info(timing_out, info); 6159 } 6160 } 6161 } 6162 6163 static void fill_audio_info(struct audio_info *audio_info, 6164 const struct drm_connector *drm_connector, 6165 const struct dc_sink *dc_sink) 6166 { 6167 int i = 0; 6168 int cea_revision = 0; 6169 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 6170 6171 audio_info->manufacture_id = edid_caps->manufacturer_id; 6172 audio_info->product_id = edid_caps->product_id; 6173 6174 cea_revision = drm_connector->display_info.cea_rev; 6175 6176 strscpy(audio_info->display_name, 6177 edid_caps->display_name, 6178 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 6179 6180 if (cea_revision >= 3) { 6181 audio_info->mode_count = edid_caps->audio_mode_count; 6182 6183 for (i = 0; i < audio_info->mode_count; ++i) { 6184 audio_info->modes[i].format_code = 6185 (enum audio_format_code) 6186 (edid_caps->audio_modes[i].format_code); 6187 audio_info->modes[i].channel_count = 6188 edid_caps->audio_modes[i].channel_count; 6189 audio_info->modes[i].sample_rates.all = 6190 edid_caps->audio_modes[i].sample_rate; 6191 audio_info->modes[i].sample_size = 6192 edid_caps->audio_modes[i].sample_size; 6193 } 6194 } 6195 6196 audio_info->flags.all = edid_caps->speaker_flags; 6197 6198 /* TODO: We only check for the progressive mode, check for interlace mode too */ 6199 if (drm_connector->latency_present[0]) { 6200 audio_info->video_latency = drm_connector->video_latency[0]; 6201 audio_info->audio_latency = drm_connector->audio_latency[0]; 6202 } 6203 6204 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 6205 6206 } 6207 6208 static void 6209 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 6210 struct drm_display_mode *dst_mode) 6211 { 6212 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 6213 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 6214 dst_mode->crtc_clock = src_mode->crtc_clock; 6215 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 6216 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 6217 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 6218 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 6219 dst_mode->crtc_htotal = src_mode->crtc_htotal; 6220 dst_mode->crtc_hskew = src_mode->crtc_hskew; 6221 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 6222 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 6223 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 6224 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 6225 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 6226 } 6227 6228 static void 6229 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 6230 const struct drm_display_mode *native_mode, 6231 bool scale_enabled) 6232 { 6233 if (scale_enabled) { 6234 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 6235 } else if (native_mode->clock == drm_mode->clock && 6236 native_mode->htotal == drm_mode->htotal && 6237 native_mode->vtotal == drm_mode->vtotal) { 6238 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 6239 } else { 6240 /* no scaling nor amdgpu inserted, no need to patch */ 6241 } 6242 } 6243 6244 static struct dc_sink * 6245 create_fake_sink(struct amdgpu_dm_connector *aconnector) 6246 { 6247 struct dc_sink_init_data sink_init_data = { 0 }; 6248 struct dc_sink *sink = NULL; 6249 sink_init_data.link = aconnector->dc_link; 6250 sink_init_data.sink_signal = aconnector->dc_link->connector_signal; 6251 6252 sink = dc_sink_create(&sink_init_data); 6253 if (!sink) { 6254 DRM_ERROR("Failed to create sink!\n"); 6255 return NULL; 6256 } 6257 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 6258 6259 return sink; 6260 } 6261 6262 static void set_multisync_trigger_params( 6263 struct dc_stream_state *stream) 6264 { 6265 struct dc_stream_state *master = NULL; 6266 6267 if (stream->triggered_crtc_reset.enabled) { 6268 master = stream->triggered_crtc_reset.event_source; 6269 stream->triggered_crtc_reset.event = 6270 master->timing.flags.VSYNC_POSITIVE_POLARITY ? 6271 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; 6272 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; 6273 } 6274 } 6275 6276 static void set_master_stream(struct dc_stream_state *stream_set[], 6277 int stream_count) 6278 { 6279 int j, highest_rfr = 0, master_stream = 0; 6280 6281 for (j = 0; j < stream_count; j++) { 6282 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 6283 int refresh_rate = 0; 6284 6285 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ 6286 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 6287 if (refresh_rate > highest_rfr) { 6288 highest_rfr = refresh_rate; 6289 master_stream = j; 6290 } 6291 } 6292 } 6293 for (j = 0; j < stream_count; j++) { 6294 if (stream_set[j]) 6295 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 6296 } 6297 } 6298 6299 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 6300 { 6301 int i = 0; 6302 struct dc_stream_state *stream; 6303 6304 if (context->stream_count < 2) 6305 return; 6306 for (i = 0; i < context->stream_count ; i++) { 6307 if (!context->streams[i]) 6308 continue; 6309 /* 6310 * TODO: add a function to read AMD VSDB bits and set 6311 * crtc_sync_master.multi_sync_enabled flag 6312 * For now it's set to false 6313 */ 6314 } 6315 6316 set_master_stream(context->streams, context->stream_count); 6317 6318 for (i = 0; i < context->stream_count ; i++) { 6319 stream = context->streams[i]; 6320 6321 if (!stream) 6322 continue; 6323 6324 set_multisync_trigger_params(stream); 6325 } 6326 } 6327 6328 #if defined(CONFIG_DRM_AMD_DC_DCN) 6329 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, 6330 struct dc_sink *sink, struct dc_stream_state *stream, 6331 struct dsc_dec_dpcd_caps *dsc_caps) 6332 { 6333 stream->timing.flags.DSC = 0; 6334 dsc_caps->is_dsc_supported = false; 6335 6336 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || 6337 sink->sink_signal == SIGNAL_TYPE_EDP)) { 6338 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || 6339 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) 6340 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 6341 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 6342 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, 6343 dsc_caps); 6344 } 6345 } 6346 6347 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, 6348 struct dc_sink *sink, struct dc_stream_state *stream, 6349 struct dsc_dec_dpcd_caps *dsc_caps, 6350 uint32_t max_dsc_target_bpp_limit_override) 6351 { 6352 const struct dc_link_settings *verified_link_cap = NULL; 6353 uint32_t link_bw_in_kbps; 6354 uint32_t edp_min_bpp_x16, edp_max_bpp_x16; 6355 struct dc *dc = sink->ctx->dc; 6356 struct dc_dsc_bw_range bw_range = {0}; 6357 struct dc_dsc_config dsc_cfg = {0}; 6358 6359 verified_link_cap = dc_link_get_link_cap(stream->link); 6360 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); 6361 edp_min_bpp_x16 = 8 * 16; 6362 edp_max_bpp_x16 = 8 * 16; 6363 6364 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) 6365 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; 6366 6367 if (edp_max_bpp_x16 < edp_min_bpp_x16) 6368 edp_min_bpp_x16 = edp_max_bpp_x16; 6369 6370 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], 6371 dc->debug.dsc_min_slice_height_override, 6372 edp_min_bpp_x16, edp_max_bpp_x16, 6373 dsc_caps, 6374 &stream->timing, 6375 &bw_range)) { 6376 6377 if (bw_range.max_kbps < link_bw_in_kbps) { 6378 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 6379 dsc_caps, 6380 dc->debug.dsc_min_slice_height_override, 6381 max_dsc_target_bpp_limit_override, 6382 0, 6383 &stream->timing, 6384 &dsc_cfg)) { 6385 stream->timing.dsc_cfg = dsc_cfg; 6386 stream->timing.flags.DSC = 1; 6387 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; 6388 } 6389 return; 6390 } 6391 } 6392 6393 if (dc_dsc_compute_config(dc->res_pool->dscs[0], 6394 dsc_caps, 6395 dc->debug.dsc_min_slice_height_override, 6396 max_dsc_target_bpp_limit_override, 6397 link_bw_in_kbps, 6398 &stream->timing, 6399 &dsc_cfg)) { 6400 stream->timing.dsc_cfg = dsc_cfg; 6401 stream->timing.flags.DSC = 1; 6402 } 6403 } 6404 6405 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, 6406 struct dc_sink *sink, struct dc_stream_state *stream, 6407 struct dsc_dec_dpcd_caps *dsc_caps) 6408 { 6409 struct drm_connector *drm_connector = &aconnector->base; 6410 uint32_t link_bandwidth_kbps; 6411 uint32_t max_dsc_target_bpp_limit_override = 0; 6412 struct dc *dc = sink->ctx->dc; 6413 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps; 6414 uint32_t dsc_max_supported_bw_in_kbps; 6415 6416 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 6417 dc_link_get_link_cap(aconnector->dc_link)); 6418 6419 if (stream->link && stream->link->local_sink) 6420 max_dsc_target_bpp_limit_override = 6421 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit; 6422 6423 /* Set DSC policy according to dsc_clock_en */ 6424 dc_dsc_policy_set_enable_dsc_when_not_needed( 6425 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); 6426 6427 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp && 6428 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { 6429 6430 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); 6431 6432 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 6433 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 6434 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 6435 dsc_caps, 6436 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, 6437 max_dsc_target_bpp_limit_override, 6438 link_bandwidth_kbps, 6439 &stream->timing, 6440 &stream->timing.dsc_cfg)) { 6441 stream->timing.flags.DSC = 1; 6442 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", 6443 __func__, drm_connector->name); 6444 } 6445 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 6446 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); 6447 max_supported_bw_in_kbps = link_bandwidth_kbps; 6448 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; 6449 6450 if (timing_bw_in_kbps > max_supported_bw_in_kbps && 6451 max_supported_bw_in_kbps > 0 && 6452 dsc_max_supported_bw_in_kbps > 0) 6453 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 6454 dsc_caps, 6455 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, 6456 max_dsc_target_bpp_limit_override, 6457 dsc_max_supported_bw_in_kbps, 6458 &stream->timing, 6459 &stream->timing.dsc_cfg)) { 6460 stream->timing.flags.DSC = 1; 6461 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", 6462 __func__, drm_connector->name); 6463 } 6464 } 6465 } 6466 6467 /* Overwrite the stream flag if DSC is enabled through debugfs */ 6468 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) 6469 stream->timing.flags.DSC = 1; 6470 6471 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) 6472 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 6473 6474 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) 6475 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 6476 6477 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) 6478 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; 6479 } 6480 #endif /* CONFIG_DRM_AMD_DC_DCN */ 6481 6482 /** 6483 * DOC: FreeSync Video 6484 * 6485 * When a userspace application wants to play a video, the content follows a 6486 * standard format definition that usually specifies the FPS for that format. 6487 * The below list illustrates some video format and the expected FPS, 6488 * respectively: 6489 * 6490 * - TV/NTSC (23.976 FPS) 6491 * - Cinema (24 FPS) 6492 * - TV/PAL (25 FPS) 6493 * - TV/NTSC (29.97 FPS) 6494 * - TV/NTSC (30 FPS) 6495 * - Cinema HFR (48 FPS) 6496 * - TV/PAL (50 FPS) 6497 * - Commonly used (60 FPS) 6498 * - Multiples of 24 (48,72,96,120 FPS) 6499 * 6500 * The list of standards video format is not huge and can be added to the 6501 * connector modeset list beforehand. With that, userspace can leverage 6502 * FreeSync to extends the front porch in order to attain the target refresh 6503 * rate. Such a switch will happen seamlessly, without screen blanking or 6504 * reprogramming of the output in any other way. If the userspace requests a 6505 * modesetting change compatible with FreeSync modes that only differ in the 6506 * refresh rate, DC will skip the full update and avoid blink during the 6507 * transition. For example, the video player can change the modesetting from 6508 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without 6509 * causing any display blink. This same concept can be applied to a mode 6510 * setting change. 6511 */ 6512 static struct drm_display_mode * 6513 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, 6514 bool use_probed_modes) 6515 { 6516 struct drm_display_mode *m, *m_pref = NULL; 6517 u16 current_refresh, highest_refresh; 6518 struct list_head *list_head = use_probed_modes ? 6519 &aconnector->base.probed_modes : 6520 &aconnector->base.modes; 6521 6522 if (aconnector->freesync_vid_base.clock != 0) 6523 return &aconnector->freesync_vid_base; 6524 6525 /* Find the preferred mode */ 6526 list_for_each_entry (m, list_head, head) { 6527 if (m->type & DRM_MODE_TYPE_PREFERRED) { 6528 m_pref = m; 6529 break; 6530 } 6531 } 6532 6533 if (!m_pref) { 6534 /* Probably an EDID with no preferred mode. Fallback to first entry */ 6535 m_pref = list_first_entry_or_null( 6536 &aconnector->base.modes, struct drm_display_mode, head); 6537 if (!m_pref) { 6538 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); 6539 return NULL; 6540 } 6541 } 6542 6543 highest_refresh = drm_mode_vrefresh(m_pref); 6544 6545 /* 6546 * Find the mode with highest refresh rate with same resolution. 6547 * For some monitors, preferred mode is not the mode with highest 6548 * supported refresh rate. 6549 */ 6550 list_for_each_entry (m, list_head, head) { 6551 current_refresh = drm_mode_vrefresh(m); 6552 6553 if (m->hdisplay == m_pref->hdisplay && 6554 m->vdisplay == m_pref->vdisplay && 6555 highest_refresh < current_refresh) { 6556 highest_refresh = current_refresh; 6557 m_pref = m; 6558 } 6559 } 6560 6561 drm_mode_copy(&aconnector->freesync_vid_base, m_pref); 6562 return m_pref; 6563 } 6564 6565 static bool is_freesync_video_mode(const struct drm_display_mode *mode, 6566 struct amdgpu_dm_connector *aconnector) 6567 { 6568 struct drm_display_mode *high_mode; 6569 int timing_diff; 6570 6571 high_mode = get_highest_refresh_rate_mode(aconnector, false); 6572 if (!high_mode || !mode) 6573 return false; 6574 6575 timing_diff = high_mode->vtotal - mode->vtotal; 6576 6577 if (high_mode->clock == 0 || high_mode->clock != mode->clock || 6578 high_mode->hdisplay != mode->hdisplay || 6579 high_mode->vdisplay != mode->vdisplay || 6580 high_mode->hsync_start != mode->hsync_start || 6581 high_mode->hsync_end != mode->hsync_end || 6582 high_mode->htotal != mode->htotal || 6583 high_mode->hskew != mode->hskew || 6584 high_mode->vscan != mode->vscan || 6585 high_mode->vsync_start - mode->vsync_start != timing_diff || 6586 high_mode->vsync_end - mode->vsync_end != timing_diff) 6587 return false; 6588 else 6589 return true; 6590 } 6591 6592 static struct dc_stream_state * 6593 create_stream_for_sink(struct amdgpu_dm_connector *aconnector, 6594 const struct drm_display_mode *drm_mode, 6595 const struct dm_connector_state *dm_state, 6596 const struct dc_stream_state *old_stream, 6597 int requested_bpc) 6598 { 6599 struct drm_display_mode *preferred_mode = NULL; 6600 struct drm_connector *drm_connector; 6601 const struct drm_connector_state *con_state = 6602 dm_state ? &dm_state->base : NULL; 6603 struct dc_stream_state *stream = NULL; 6604 struct drm_display_mode mode = *drm_mode; 6605 struct drm_display_mode saved_mode; 6606 struct drm_display_mode *freesync_mode = NULL; 6607 bool native_mode_found = false; 6608 bool recalculate_timing = false; 6609 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; 6610 int mode_refresh; 6611 int preferred_refresh = 0; 6612 #if defined(CONFIG_DRM_AMD_DC_DCN) 6613 struct dsc_dec_dpcd_caps dsc_caps; 6614 #endif 6615 struct dc_sink *sink = NULL; 6616 6617 memset(&saved_mode, 0, sizeof(saved_mode)); 6618 6619 if (aconnector == NULL) { 6620 DRM_ERROR("aconnector is NULL!\n"); 6621 return stream; 6622 } 6623 6624 drm_connector = &aconnector->base; 6625 6626 if (!aconnector->dc_sink) { 6627 sink = create_fake_sink(aconnector); 6628 if (!sink) 6629 return stream; 6630 } else { 6631 sink = aconnector->dc_sink; 6632 dc_sink_retain(sink); 6633 } 6634 6635 stream = dc_create_stream_for_sink(sink); 6636 6637 if (stream == NULL) { 6638 DRM_ERROR("Failed to create stream for sink!\n"); 6639 goto finish; 6640 } 6641 6642 stream->dm_stream_context = aconnector; 6643 6644 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 6645 drm_connector->display_info.hdmi.scdc.scrambling.low_rates; 6646 6647 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 6648 /* Search for preferred mode */ 6649 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 6650 native_mode_found = true; 6651 break; 6652 } 6653 } 6654 if (!native_mode_found) 6655 preferred_mode = list_first_entry_or_null( 6656 &aconnector->base.modes, 6657 struct drm_display_mode, 6658 head); 6659 6660 mode_refresh = drm_mode_vrefresh(&mode); 6661 6662 if (preferred_mode == NULL) { 6663 /* 6664 * This may not be an error, the use case is when we have no 6665 * usermode calls to reset and set mode upon hotplug. In this 6666 * case, we call set mode ourselves to restore the previous mode 6667 * and the modelist may not be filled in in time. 6668 */ 6669 DRM_DEBUG_DRIVER("No preferred mode found\n"); 6670 } else { 6671 recalculate_timing = is_freesync_video_mode(&mode, aconnector); 6672 if (recalculate_timing) { 6673 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); 6674 drm_mode_copy(&saved_mode, &mode); 6675 drm_mode_copy(&mode, freesync_mode); 6676 } else { 6677 decide_crtc_timing_for_drm_display_mode( 6678 &mode, preferred_mode, scale); 6679 6680 preferred_refresh = drm_mode_vrefresh(preferred_mode); 6681 } 6682 } 6683 6684 if (recalculate_timing) 6685 drm_mode_set_crtcinfo(&saved_mode, 0); 6686 else if (!dm_state) 6687 drm_mode_set_crtcinfo(&mode, 0); 6688 6689 /* 6690 * If scaling is enabled and refresh rate didn't change 6691 * we copy the vic and polarities of the old timings 6692 */ 6693 if (!scale || mode_refresh != preferred_refresh) 6694 fill_stream_properties_from_drm_display_mode( 6695 stream, &mode, &aconnector->base, con_state, NULL, 6696 requested_bpc); 6697 else 6698 fill_stream_properties_from_drm_display_mode( 6699 stream, &mode, &aconnector->base, con_state, old_stream, 6700 requested_bpc); 6701 6702 #if defined(CONFIG_DRM_AMD_DC_DCN) 6703 /* SST DSC determination policy */ 6704 update_dsc_caps(aconnector, sink, stream, &dsc_caps); 6705 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) 6706 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); 6707 #endif 6708 6709 update_stream_scaling_settings(&mode, dm_state, stream); 6710 6711 fill_audio_info( 6712 &stream->audio_info, 6713 drm_connector, 6714 sink); 6715 6716 update_stream_signal(stream, sink); 6717 6718 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6719 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); 6720 6721 if (stream->link->psr_settings.psr_feature_enabled) { 6722 // 6723 // should decide stream support vsc sdp colorimetry capability 6724 // before building vsc info packet 6725 // 6726 stream->use_vsc_sdp_for_colorimetry = false; 6727 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 6728 stream->use_vsc_sdp_for_colorimetry = 6729 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; 6730 } else { 6731 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) 6732 stream->use_vsc_sdp_for_colorimetry = true; 6733 } 6734 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space); 6735 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 6736 6737 } 6738 finish: 6739 dc_sink_release(sink); 6740 6741 return stream; 6742 } 6743 6744 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) 6745 { 6746 drm_crtc_cleanup(crtc); 6747 kfree(crtc); 6748 } 6749 6750 static void dm_crtc_destroy_state(struct drm_crtc *crtc, 6751 struct drm_crtc_state *state) 6752 { 6753 struct dm_crtc_state *cur = to_dm_crtc_state(state); 6754 6755 /* TODO Destroy dc_stream objects are stream object is flattened */ 6756 if (cur->stream) 6757 dc_stream_release(cur->stream); 6758 6759 6760 __drm_atomic_helper_crtc_destroy_state(state); 6761 6762 6763 kfree(state); 6764 } 6765 6766 static void dm_crtc_reset_state(struct drm_crtc *crtc) 6767 { 6768 struct dm_crtc_state *state; 6769 6770 if (crtc->state) 6771 dm_crtc_destroy_state(crtc, crtc->state); 6772 6773 state = kzalloc(sizeof(*state), GFP_KERNEL); 6774 if (WARN_ON(!state)) 6775 return; 6776 6777 __drm_atomic_helper_crtc_reset(crtc, &state->base); 6778 } 6779 6780 static struct drm_crtc_state * 6781 dm_crtc_duplicate_state(struct drm_crtc *crtc) 6782 { 6783 struct dm_crtc_state *state, *cur; 6784 6785 cur = to_dm_crtc_state(crtc->state); 6786 6787 if (WARN_ON(!crtc->state)) 6788 return NULL; 6789 6790 state = kzalloc(sizeof(*state), GFP_KERNEL); 6791 if (!state) 6792 return NULL; 6793 6794 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); 6795 6796 if (cur->stream) { 6797 state->stream = cur->stream; 6798 dc_stream_retain(state->stream); 6799 } 6800 6801 state->active_planes = cur->active_planes; 6802 state->vrr_infopacket = cur->vrr_infopacket; 6803 state->abm_level = cur->abm_level; 6804 state->vrr_supported = cur->vrr_supported; 6805 state->freesync_config = cur->freesync_config; 6806 state->cm_has_degamma = cur->cm_has_degamma; 6807 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; 6808 state->mpo_requested = cur->mpo_requested; 6809 /* TODO Duplicate dc_stream after objects are stream object is flattened */ 6810 6811 return &state->base; 6812 } 6813 6814 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 6815 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) 6816 { 6817 crtc_debugfs_init(crtc); 6818 6819 return 0; 6820 } 6821 #endif 6822 6823 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) 6824 { 6825 enum dc_irq_source irq_source; 6826 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 6827 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 6828 int rc; 6829 6830 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; 6831 6832 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 6833 6834 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", 6835 acrtc->crtc_id, enable ? "en" : "dis", rc); 6836 return rc; 6837 } 6838 6839 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) 6840 { 6841 enum dc_irq_source irq_source; 6842 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 6843 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 6844 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); 6845 struct amdgpu_display_manager *dm = &adev->dm; 6846 struct vblank_control_work *work; 6847 int rc = 0; 6848 6849 if (enable) { 6850 /* vblank irq on -> Only need vupdate irq in vrr mode */ 6851 if (amdgpu_dm_vrr_active(acrtc_state)) 6852 rc = dm_set_vupdate_irq(crtc, true); 6853 } else { 6854 /* vblank irq off -> vupdate irq off */ 6855 rc = dm_set_vupdate_irq(crtc, false); 6856 } 6857 6858 if (rc) 6859 return rc; 6860 6861 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 6862 6863 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) 6864 return -EBUSY; 6865 6866 if (amdgpu_in_reset(adev)) 6867 return 0; 6868 6869 if (dm->vblank_control_workqueue) { 6870 work = kzalloc(sizeof(*work), GFP_ATOMIC); 6871 if (!work) 6872 return -ENOMEM; 6873 6874 INIT_WORK(&work->work, vblank_control_worker); 6875 work->dm = dm; 6876 work->acrtc = acrtc; 6877 work->enable = enable; 6878 6879 if (acrtc_state->stream) { 6880 dc_stream_retain(acrtc_state->stream); 6881 work->stream = acrtc_state->stream; 6882 } 6883 6884 queue_work(dm->vblank_control_workqueue, &work->work); 6885 } 6886 6887 return 0; 6888 } 6889 6890 static int dm_enable_vblank(struct drm_crtc *crtc) 6891 { 6892 return dm_set_vblank(crtc, true); 6893 } 6894 6895 static void dm_disable_vblank(struct drm_crtc *crtc) 6896 { 6897 dm_set_vblank(crtc, false); 6898 } 6899 6900 /* Implemented only the options currently available for the driver */ 6901 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { 6902 .reset = dm_crtc_reset_state, 6903 .destroy = amdgpu_dm_crtc_destroy, 6904 .set_config = drm_atomic_helper_set_config, 6905 .page_flip = drm_atomic_helper_page_flip, 6906 .atomic_duplicate_state = dm_crtc_duplicate_state, 6907 .atomic_destroy_state = dm_crtc_destroy_state, 6908 .set_crc_source = amdgpu_dm_crtc_set_crc_source, 6909 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, 6910 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, 6911 .get_vblank_counter = amdgpu_get_vblank_counter_kms, 6912 .enable_vblank = dm_enable_vblank, 6913 .disable_vblank = dm_disable_vblank, 6914 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 6915 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 6916 .late_register = amdgpu_dm_crtc_late_register, 6917 #endif 6918 }; 6919 6920 static enum drm_connector_status 6921 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 6922 { 6923 bool connected; 6924 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6925 6926 /* 6927 * Notes: 6928 * 1. This interface is NOT called in context of HPD irq. 6929 * 2. This interface *is called* in context of user-mode ioctl. Which 6930 * makes it a bad place for *any* MST-related activity. 6931 */ 6932 6933 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && 6934 !aconnector->fake_enable) 6935 connected = (aconnector->dc_sink != NULL); 6936 else 6937 connected = (aconnector->base.force == DRM_FORCE_ON); 6938 6939 update_subconnector_property(aconnector); 6940 6941 return (connected ? connector_status_connected : 6942 connector_status_disconnected); 6943 } 6944 6945 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 6946 struct drm_connector_state *connector_state, 6947 struct drm_property *property, 6948 uint64_t val) 6949 { 6950 struct drm_device *dev = connector->dev; 6951 struct amdgpu_device *adev = drm_to_adev(dev); 6952 struct dm_connector_state *dm_old_state = 6953 to_dm_connector_state(connector->state); 6954 struct dm_connector_state *dm_new_state = 6955 to_dm_connector_state(connector_state); 6956 6957 int ret = -EINVAL; 6958 6959 if (property == dev->mode_config.scaling_mode_property) { 6960 enum amdgpu_rmx_type rmx_type; 6961 6962 switch (val) { 6963 case DRM_MODE_SCALE_CENTER: 6964 rmx_type = RMX_CENTER; 6965 break; 6966 case DRM_MODE_SCALE_ASPECT: 6967 rmx_type = RMX_ASPECT; 6968 break; 6969 case DRM_MODE_SCALE_FULLSCREEN: 6970 rmx_type = RMX_FULL; 6971 break; 6972 case DRM_MODE_SCALE_NONE: 6973 default: 6974 rmx_type = RMX_OFF; 6975 break; 6976 } 6977 6978 if (dm_old_state->scaling == rmx_type) 6979 return 0; 6980 6981 dm_new_state->scaling = rmx_type; 6982 ret = 0; 6983 } else if (property == adev->mode_info.underscan_hborder_property) { 6984 dm_new_state->underscan_hborder = val; 6985 ret = 0; 6986 } else if (property == adev->mode_info.underscan_vborder_property) { 6987 dm_new_state->underscan_vborder = val; 6988 ret = 0; 6989 } else if (property == adev->mode_info.underscan_property) { 6990 dm_new_state->underscan_enable = val; 6991 ret = 0; 6992 } else if (property == adev->mode_info.abm_level_property) { 6993 dm_new_state->abm_level = val; 6994 ret = 0; 6995 } 6996 6997 return ret; 6998 } 6999 7000 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 7001 const struct drm_connector_state *state, 7002 struct drm_property *property, 7003 uint64_t *val) 7004 { 7005 struct drm_device *dev = connector->dev; 7006 struct amdgpu_device *adev = drm_to_adev(dev); 7007 struct dm_connector_state *dm_state = 7008 to_dm_connector_state(state); 7009 int ret = -EINVAL; 7010 7011 if (property == dev->mode_config.scaling_mode_property) { 7012 switch (dm_state->scaling) { 7013 case RMX_CENTER: 7014 *val = DRM_MODE_SCALE_CENTER; 7015 break; 7016 case RMX_ASPECT: 7017 *val = DRM_MODE_SCALE_ASPECT; 7018 break; 7019 case RMX_FULL: 7020 *val = DRM_MODE_SCALE_FULLSCREEN; 7021 break; 7022 case RMX_OFF: 7023 default: 7024 *val = DRM_MODE_SCALE_NONE; 7025 break; 7026 } 7027 ret = 0; 7028 } else if (property == adev->mode_info.underscan_hborder_property) { 7029 *val = dm_state->underscan_hborder; 7030 ret = 0; 7031 } else if (property == adev->mode_info.underscan_vborder_property) { 7032 *val = dm_state->underscan_vborder; 7033 ret = 0; 7034 } else if (property == adev->mode_info.underscan_property) { 7035 *val = dm_state->underscan_enable; 7036 ret = 0; 7037 } else if (property == adev->mode_info.abm_level_property) { 7038 *val = dm_state->abm_level; 7039 ret = 0; 7040 } 7041 7042 return ret; 7043 } 7044 7045 static void amdgpu_dm_connector_unregister(struct drm_connector *connector) 7046 { 7047 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 7048 7049 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); 7050 } 7051 7052 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 7053 { 7054 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7055 const struct dc_link *link = aconnector->dc_link; 7056 struct amdgpu_device *adev = drm_to_adev(connector->dev); 7057 struct amdgpu_display_manager *dm = &adev->dm; 7058 int i; 7059 7060 /* 7061 * Call only if mst_mgr was iniitalized before since it's not done 7062 * for all connector types. 7063 */ 7064 if (aconnector->mst_mgr.dev) 7065 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); 7066 7067 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 7068 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 7069 for (i = 0; i < dm->num_of_edps; i++) { 7070 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) { 7071 backlight_device_unregister(dm->backlight_dev[i]); 7072 dm->backlight_dev[i] = NULL; 7073 } 7074 } 7075 #endif 7076 7077 if (aconnector->dc_em_sink) 7078 dc_sink_release(aconnector->dc_em_sink); 7079 aconnector->dc_em_sink = NULL; 7080 if (aconnector->dc_sink) 7081 dc_sink_release(aconnector->dc_sink); 7082 aconnector->dc_sink = NULL; 7083 7084 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 7085 drm_connector_unregister(connector); 7086 drm_connector_cleanup(connector); 7087 if (aconnector->i2c) { 7088 i2c_del_adapter(&aconnector->i2c->base); 7089 kfree(aconnector->i2c); 7090 } 7091 kfree(aconnector->dm_dp_aux.aux.name); 7092 7093 kfree(connector); 7094 } 7095 7096 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 7097 { 7098 struct dm_connector_state *state = 7099 to_dm_connector_state(connector->state); 7100 7101 if (connector->state) 7102 __drm_atomic_helper_connector_destroy_state(connector->state); 7103 7104 kfree(state); 7105 7106 state = kzalloc(sizeof(*state), GFP_KERNEL); 7107 7108 if (state) { 7109 state->scaling = RMX_OFF; 7110 state->underscan_enable = false; 7111 state->underscan_hborder = 0; 7112 state->underscan_vborder = 0; 7113 state->base.max_requested_bpc = 8; 7114 state->vcpi_slots = 0; 7115 state->pbn = 0; 7116 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 7117 state->abm_level = amdgpu_dm_abm_level; 7118 7119 __drm_atomic_helper_connector_reset(connector, &state->base); 7120 } 7121 } 7122 7123 struct drm_connector_state * 7124 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 7125 { 7126 struct dm_connector_state *state = 7127 to_dm_connector_state(connector->state); 7128 7129 struct dm_connector_state *new_state = 7130 kmemdup(state, sizeof(*state), GFP_KERNEL); 7131 7132 if (!new_state) 7133 return NULL; 7134 7135 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); 7136 7137 new_state->freesync_capable = state->freesync_capable; 7138 new_state->abm_level = state->abm_level; 7139 new_state->scaling = state->scaling; 7140 new_state->underscan_enable = state->underscan_enable; 7141 new_state->underscan_hborder = state->underscan_hborder; 7142 new_state->underscan_vborder = state->underscan_vborder; 7143 new_state->vcpi_slots = state->vcpi_slots; 7144 new_state->pbn = state->pbn; 7145 return &new_state->base; 7146 } 7147 7148 static int 7149 amdgpu_dm_connector_late_register(struct drm_connector *connector) 7150 { 7151 struct amdgpu_dm_connector *amdgpu_dm_connector = 7152 to_amdgpu_dm_connector(connector); 7153 int r; 7154 7155 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 7156 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 7157 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; 7158 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); 7159 if (r) 7160 return r; 7161 } 7162 7163 #if defined(CONFIG_DEBUG_FS) 7164 connector_debugfs_init(amdgpu_dm_connector); 7165 #endif 7166 7167 return 0; 7168 } 7169 7170 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 7171 .reset = amdgpu_dm_connector_funcs_reset, 7172 .detect = amdgpu_dm_connector_detect, 7173 .fill_modes = drm_helper_probe_single_connector_modes, 7174 .destroy = amdgpu_dm_connector_destroy, 7175 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 7176 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 7177 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 7178 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 7179 .late_register = amdgpu_dm_connector_late_register, 7180 .early_unregister = amdgpu_dm_connector_unregister 7181 }; 7182 7183 static int get_modes(struct drm_connector *connector) 7184 { 7185 return amdgpu_dm_connector_get_modes(connector); 7186 } 7187 7188 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 7189 { 7190 struct dc_sink_init_data init_params = { 7191 .link = aconnector->dc_link, 7192 .sink_signal = SIGNAL_TYPE_VIRTUAL 7193 }; 7194 struct edid *edid; 7195 7196 if (!aconnector->base.edid_blob_ptr) { 7197 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", 7198 aconnector->base.name); 7199 7200 aconnector->base.force = DRM_FORCE_OFF; 7201 aconnector->base.override_edid = false; 7202 return; 7203 } 7204 7205 edid = (struct edid *) aconnector->base.edid_blob_ptr->data; 7206 7207 aconnector->edid = edid; 7208 7209 aconnector->dc_em_sink = dc_link_add_remote_sink( 7210 aconnector->dc_link, 7211 (uint8_t *)edid, 7212 (edid->extensions + 1) * EDID_LENGTH, 7213 &init_params); 7214 7215 if (aconnector->base.force == DRM_FORCE_ON) { 7216 aconnector->dc_sink = aconnector->dc_link->local_sink ? 7217 aconnector->dc_link->local_sink : 7218 aconnector->dc_em_sink; 7219 dc_sink_retain(aconnector->dc_sink); 7220 } 7221 } 7222 7223 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 7224 { 7225 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 7226 7227 /* 7228 * In case of headless boot with force on for DP managed connector 7229 * Those settings have to be != 0 to get initial modeset 7230 */ 7231 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 7232 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 7233 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 7234 } 7235 7236 7237 aconnector->base.override_edid = true; 7238 create_eml_sink(aconnector); 7239 } 7240 7241 struct dc_stream_state * 7242 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 7243 const struct drm_display_mode *drm_mode, 7244 const struct dm_connector_state *dm_state, 7245 const struct dc_stream_state *old_stream) 7246 { 7247 struct drm_connector *connector = &aconnector->base; 7248 struct amdgpu_device *adev = drm_to_adev(connector->dev); 7249 struct dc_stream_state *stream; 7250 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; 7251 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; 7252 enum dc_status dc_result = DC_OK; 7253 7254 do { 7255 stream = create_stream_for_sink(aconnector, drm_mode, 7256 dm_state, old_stream, 7257 requested_bpc); 7258 if (stream == NULL) { 7259 DRM_ERROR("Failed to create stream for sink!\n"); 7260 break; 7261 } 7262 7263 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 7264 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); 7265 7266 if (dc_result == DC_OK) 7267 dc_result = dc_validate_stream(adev->dm.dc, stream); 7268 7269 if (dc_result != DC_OK) { 7270 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", 7271 drm_mode->hdisplay, 7272 drm_mode->vdisplay, 7273 drm_mode->clock, 7274 dc_result, 7275 dc_status_to_str(dc_result)); 7276 7277 dc_stream_release(stream); 7278 stream = NULL; 7279 requested_bpc -= 2; /* lower bpc to retry validation */ 7280 } 7281 7282 } while (stream == NULL && requested_bpc >= 6); 7283 7284 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { 7285 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); 7286 7287 aconnector->force_yuv420_output = true; 7288 stream = create_validate_stream_for_sink(aconnector, drm_mode, 7289 dm_state, old_stream); 7290 aconnector->force_yuv420_output = false; 7291 } 7292 7293 return stream; 7294 } 7295 7296 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 7297 struct drm_display_mode *mode) 7298 { 7299 int result = MODE_ERROR; 7300 struct dc_sink *dc_sink; 7301 /* TODO: Unhardcode stream count */ 7302 struct dc_stream_state *stream; 7303 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7304 7305 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 7306 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 7307 return result; 7308 7309 /* 7310 * Only run this the first time mode_valid is called to initilialize 7311 * EDID mgmt 7312 */ 7313 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 7314 !aconnector->dc_em_sink) 7315 handle_edid_mgmt(aconnector); 7316 7317 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 7318 7319 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && 7320 aconnector->base.force != DRM_FORCE_ON) { 7321 DRM_ERROR("dc_sink is NULL!\n"); 7322 goto fail; 7323 } 7324 7325 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL); 7326 if (stream) { 7327 dc_stream_release(stream); 7328 result = MODE_OK; 7329 } 7330 7331 fail: 7332 /* TODO: error handling*/ 7333 return result; 7334 } 7335 7336 static int fill_hdr_info_packet(const struct drm_connector_state *state, 7337 struct dc_info_packet *out) 7338 { 7339 struct hdmi_drm_infoframe frame; 7340 unsigned char buf[30]; /* 26 + 4 */ 7341 ssize_t len; 7342 int ret, i; 7343 7344 memset(out, 0, sizeof(*out)); 7345 7346 if (!state->hdr_output_metadata) 7347 return 0; 7348 7349 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); 7350 if (ret) 7351 return ret; 7352 7353 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); 7354 if (len < 0) 7355 return (int)len; 7356 7357 /* Static metadata is a fixed 26 bytes + 4 byte header. */ 7358 if (len != 30) 7359 return -EINVAL; 7360 7361 /* Prepare the infopacket for DC. */ 7362 switch (state->connector->connector_type) { 7363 case DRM_MODE_CONNECTOR_HDMIA: 7364 out->hb0 = 0x87; /* type */ 7365 out->hb1 = 0x01; /* version */ 7366 out->hb2 = 0x1A; /* length */ 7367 out->sb[0] = buf[3]; /* checksum */ 7368 i = 1; 7369 break; 7370 7371 case DRM_MODE_CONNECTOR_DisplayPort: 7372 case DRM_MODE_CONNECTOR_eDP: 7373 out->hb0 = 0x00; /* sdp id, zero */ 7374 out->hb1 = 0x87; /* type */ 7375 out->hb2 = 0x1D; /* payload len - 1 */ 7376 out->hb3 = (0x13 << 2); /* sdp version */ 7377 out->sb[0] = 0x01; /* version */ 7378 out->sb[1] = 0x1A; /* length */ 7379 i = 2; 7380 break; 7381 7382 default: 7383 return -EINVAL; 7384 } 7385 7386 memcpy(&out->sb[i], &buf[4], 26); 7387 out->valid = true; 7388 7389 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, 7390 sizeof(out->sb), false); 7391 7392 return 0; 7393 } 7394 7395 static int 7396 amdgpu_dm_connector_atomic_check(struct drm_connector *conn, 7397 struct drm_atomic_state *state) 7398 { 7399 struct drm_connector_state *new_con_state = 7400 drm_atomic_get_new_connector_state(state, conn); 7401 struct drm_connector_state *old_con_state = 7402 drm_atomic_get_old_connector_state(state, conn); 7403 struct drm_crtc *crtc = new_con_state->crtc; 7404 struct drm_crtc_state *new_crtc_state; 7405 int ret; 7406 7407 trace_amdgpu_dm_connector_atomic_check(new_con_state); 7408 7409 if (!crtc) 7410 return 0; 7411 7412 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { 7413 struct dc_info_packet hdr_infopacket; 7414 7415 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); 7416 if (ret) 7417 return ret; 7418 7419 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 7420 if (IS_ERR(new_crtc_state)) 7421 return PTR_ERR(new_crtc_state); 7422 7423 /* 7424 * DC considers the stream backends changed if the 7425 * static metadata changes. Forcing the modeset also 7426 * gives a simple way for userspace to switch from 7427 * 8bpc to 10bpc when setting the metadata to enter 7428 * or exit HDR. 7429 * 7430 * Changing the static metadata after it's been 7431 * set is permissible, however. So only force a 7432 * modeset if we're entering or exiting HDR. 7433 */ 7434 new_crtc_state->mode_changed = 7435 !old_con_state->hdr_output_metadata || 7436 !new_con_state->hdr_output_metadata; 7437 } 7438 7439 return 0; 7440 } 7441 7442 static const struct drm_connector_helper_funcs 7443 amdgpu_dm_connector_helper_funcs = { 7444 /* 7445 * If hotplugging a second bigger display in FB Con mode, bigger resolution 7446 * modes will be filtered by drm_mode_validate_size(), and those modes 7447 * are missing after user start lightdm. So we need to renew modes list. 7448 * in get_modes call back, not just return the modes count 7449 */ 7450 .get_modes = get_modes, 7451 .mode_valid = amdgpu_dm_connector_mode_valid, 7452 .atomic_check = amdgpu_dm_connector_atomic_check, 7453 }; 7454 7455 static void dm_crtc_helper_disable(struct drm_crtc *crtc) 7456 { 7457 } 7458 7459 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) 7460 { 7461 struct drm_atomic_state *state = new_crtc_state->state; 7462 struct drm_plane *plane; 7463 int num_active = 0; 7464 7465 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) { 7466 struct drm_plane_state *new_plane_state; 7467 7468 /* Cursor planes are "fake". */ 7469 if (plane->type == DRM_PLANE_TYPE_CURSOR) 7470 continue; 7471 7472 new_plane_state = drm_atomic_get_new_plane_state(state, plane); 7473 7474 if (!new_plane_state) { 7475 /* 7476 * The plane is enable on the CRTC and hasn't changed 7477 * state. This means that it previously passed 7478 * validation and is therefore enabled. 7479 */ 7480 num_active += 1; 7481 continue; 7482 } 7483 7484 /* We need a framebuffer to be considered enabled. */ 7485 num_active += (new_plane_state->fb != NULL); 7486 } 7487 7488 return num_active; 7489 } 7490 7491 static void dm_update_crtc_active_planes(struct drm_crtc *crtc, 7492 struct drm_crtc_state *new_crtc_state) 7493 { 7494 struct dm_crtc_state *dm_new_crtc_state = 7495 to_dm_crtc_state(new_crtc_state); 7496 7497 dm_new_crtc_state->active_planes = 0; 7498 7499 if (!dm_new_crtc_state->stream) 7500 return; 7501 7502 dm_new_crtc_state->active_planes = 7503 count_crtc_active_planes(new_crtc_state); 7504 } 7505 7506 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, 7507 struct drm_atomic_state *state) 7508 { 7509 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 7510 crtc); 7511 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 7512 struct dc *dc = adev->dm.dc; 7513 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 7514 int ret = -EINVAL; 7515 7516 trace_amdgpu_dm_crtc_atomic_check(crtc_state); 7517 7518 dm_update_crtc_active_planes(crtc, crtc_state); 7519 7520 if (WARN_ON(unlikely(!dm_crtc_state->stream && 7521 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) { 7522 return ret; 7523 } 7524 7525 /* 7526 * We require the primary plane to be enabled whenever the CRTC is, otherwise 7527 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other 7528 * planes are disabled, which is not supported by the hardware. And there is legacy 7529 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL. 7530 */ 7531 if (crtc_state->enable && 7532 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) { 7533 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n"); 7534 return -EINVAL; 7535 } 7536 7537 /* In some use cases, like reset, no stream is attached */ 7538 if (!dm_crtc_state->stream) 7539 return 0; 7540 7541 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) 7542 return 0; 7543 7544 DRM_DEBUG_ATOMIC("Failed DC stream validation\n"); 7545 return ret; 7546 } 7547 7548 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, 7549 const struct drm_display_mode *mode, 7550 struct drm_display_mode *adjusted_mode) 7551 { 7552 return true; 7553 } 7554 7555 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { 7556 .disable = dm_crtc_helper_disable, 7557 .atomic_check = dm_crtc_helper_atomic_check, 7558 .mode_fixup = dm_crtc_helper_mode_fixup, 7559 .get_scanout_position = amdgpu_crtc_get_scanout_position, 7560 }; 7561 7562 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 7563 { 7564 7565 } 7566 7567 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) 7568 { 7569 switch (display_color_depth) { 7570 case COLOR_DEPTH_666: 7571 return 6; 7572 case COLOR_DEPTH_888: 7573 return 8; 7574 case COLOR_DEPTH_101010: 7575 return 10; 7576 case COLOR_DEPTH_121212: 7577 return 12; 7578 case COLOR_DEPTH_141414: 7579 return 14; 7580 case COLOR_DEPTH_161616: 7581 return 16; 7582 default: 7583 break; 7584 } 7585 return 0; 7586 } 7587 7588 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 7589 struct drm_crtc_state *crtc_state, 7590 struct drm_connector_state *conn_state) 7591 { 7592 struct drm_atomic_state *state = crtc_state->state; 7593 struct drm_connector *connector = conn_state->connector; 7594 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7595 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); 7596 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 7597 struct drm_dp_mst_topology_mgr *mst_mgr; 7598 struct drm_dp_mst_port *mst_port; 7599 enum dc_color_depth color_depth; 7600 int clock, bpp = 0; 7601 bool is_y420 = false; 7602 7603 if (!aconnector->port || !aconnector->dc_sink) 7604 return 0; 7605 7606 mst_port = aconnector->port; 7607 mst_mgr = &aconnector->mst_port->mst_mgr; 7608 7609 if (!crtc_state->connectors_changed && !crtc_state->mode_changed) 7610 return 0; 7611 7612 if (!state->duplicated) { 7613 int max_bpc = conn_state->max_requested_bpc; 7614 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && 7615 aconnector->force_yuv420_output; 7616 color_depth = convert_color_depth_from_display_info(connector, 7617 is_y420, 7618 max_bpc); 7619 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 7620 clock = adjusted_mode->clock; 7621 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); 7622 } 7623 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state, 7624 mst_mgr, 7625 mst_port, 7626 dm_new_connector_state->pbn, 7627 dm_mst_get_pbn_divider(aconnector->dc_link)); 7628 if (dm_new_connector_state->vcpi_slots < 0) { 7629 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); 7630 return dm_new_connector_state->vcpi_slots; 7631 } 7632 return 0; 7633 } 7634 7635 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 7636 .disable = dm_encoder_helper_disable, 7637 .atomic_check = dm_encoder_helper_atomic_check 7638 }; 7639 7640 #if defined(CONFIG_DRM_AMD_DC_DCN) 7641 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, 7642 struct dc_state *dc_state, 7643 struct dsc_mst_fairness_vars *vars) 7644 { 7645 struct dc_stream_state *stream = NULL; 7646 struct drm_connector *connector; 7647 struct drm_connector_state *new_con_state; 7648 struct amdgpu_dm_connector *aconnector; 7649 struct dm_connector_state *dm_conn_state; 7650 int i, j; 7651 int vcpi, pbn_div, pbn, slot_num = 0; 7652 7653 for_each_new_connector_in_state(state, connector, new_con_state, i) { 7654 7655 aconnector = to_amdgpu_dm_connector(connector); 7656 7657 if (!aconnector->port) 7658 continue; 7659 7660 if (!new_con_state || !new_con_state->crtc) 7661 continue; 7662 7663 dm_conn_state = to_dm_connector_state(new_con_state); 7664 7665 for (j = 0; j < dc_state->stream_count; j++) { 7666 stream = dc_state->streams[j]; 7667 if (!stream) 7668 continue; 7669 7670 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector) 7671 break; 7672 7673 stream = NULL; 7674 } 7675 7676 if (!stream) 7677 continue; 7678 7679 pbn_div = dm_mst_get_pbn_divider(stream->link); 7680 /* pbn is calculated by compute_mst_dsc_configs_for_state*/ 7681 for (j = 0; j < dc_state->stream_count; j++) { 7682 if (vars[j].aconnector == aconnector) { 7683 pbn = vars[j].pbn; 7684 break; 7685 } 7686 } 7687 7688 if (j == dc_state->stream_count) 7689 continue; 7690 7691 slot_num = DIV_ROUND_UP(pbn, pbn_div); 7692 7693 if (stream->timing.flags.DSC != 1) { 7694 dm_conn_state->pbn = pbn; 7695 dm_conn_state->vcpi_slots = slot_num; 7696 7697 drm_dp_mst_atomic_enable_dsc(state, 7698 aconnector->port, 7699 dm_conn_state->pbn, 7700 0, 7701 false); 7702 continue; 7703 } 7704 7705 vcpi = drm_dp_mst_atomic_enable_dsc(state, 7706 aconnector->port, 7707 pbn, pbn_div, 7708 true); 7709 if (vcpi < 0) 7710 return vcpi; 7711 7712 dm_conn_state->pbn = pbn; 7713 dm_conn_state->vcpi_slots = vcpi; 7714 } 7715 return 0; 7716 } 7717 #endif 7718 7719 static void dm_drm_plane_reset(struct drm_plane *plane) 7720 { 7721 struct dm_plane_state *amdgpu_state = NULL; 7722 7723 if (plane->state) 7724 plane->funcs->atomic_destroy_state(plane, plane->state); 7725 7726 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); 7727 WARN_ON(amdgpu_state == NULL); 7728 7729 if (amdgpu_state) 7730 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); 7731 } 7732 7733 static struct drm_plane_state * 7734 dm_drm_plane_duplicate_state(struct drm_plane *plane) 7735 { 7736 struct dm_plane_state *dm_plane_state, *old_dm_plane_state; 7737 7738 old_dm_plane_state = to_dm_plane_state(plane->state); 7739 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); 7740 if (!dm_plane_state) 7741 return NULL; 7742 7743 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); 7744 7745 if (old_dm_plane_state->dc_state) { 7746 dm_plane_state->dc_state = old_dm_plane_state->dc_state; 7747 dc_plane_state_retain(dm_plane_state->dc_state); 7748 } 7749 7750 return &dm_plane_state->base; 7751 } 7752 7753 static void dm_drm_plane_destroy_state(struct drm_plane *plane, 7754 struct drm_plane_state *state) 7755 { 7756 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 7757 7758 if (dm_plane_state->dc_state) 7759 dc_plane_state_release(dm_plane_state->dc_state); 7760 7761 drm_atomic_helper_plane_destroy_state(plane, state); 7762 } 7763 7764 static const struct drm_plane_funcs dm_plane_funcs = { 7765 .update_plane = drm_atomic_helper_update_plane, 7766 .disable_plane = drm_atomic_helper_disable_plane, 7767 .destroy = drm_primary_helper_destroy, 7768 .reset = dm_drm_plane_reset, 7769 .atomic_duplicate_state = dm_drm_plane_duplicate_state, 7770 .atomic_destroy_state = dm_drm_plane_destroy_state, 7771 .format_mod_supported = dm_plane_format_mod_supported, 7772 }; 7773 7774 static int dm_plane_helper_prepare_fb(struct drm_plane *plane, 7775 struct drm_plane_state *new_state) 7776 { 7777 struct amdgpu_framebuffer *afb; 7778 struct drm_gem_object *obj; 7779 struct amdgpu_device *adev; 7780 struct amdgpu_bo *rbo; 7781 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; 7782 uint32_t domain; 7783 int r; 7784 7785 if (!new_state->fb) { 7786 DRM_DEBUG_KMS("No FB bound\n"); 7787 return 0; 7788 } 7789 7790 afb = to_amdgpu_framebuffer(new_state->fb); 7791 obj = new_state->fb->obj[0]; 7792 rbo = gem_to_amdgpu_bo(obj); 7793 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 7794 7795 r = amdgpu_bo_reserve(rbo, true); 7796 if (r) { 7797 dev_err(adev->dev, "fail to reserve bo (%d)\n", r); 7798 return r; 7799 } 7800 7801 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); 7802 if (r) { 7803 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); 7804 goto error_unlock; 7805 } 7806 7807 if (plane->type != DRM_PLANE_TYPE_CURSOR) 7808 domain = amdgpu_display_supported_domains(adev, rbo->flags); 7809 else 7810 domain = AMDGPU_GEM_DOMAIN_VRAM; 7811 7812 r = amdgpu_bo_pin(rbo, domain); 7813 if (unlikely(r != 0)) { 7814 if (r != -ERESTARTSYS) 7815 DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 7816 goto error_unlock; 7817 } 7818 7819 r = amdgpu_ttm_alloc_gart(&rbo->tbo); 7820 if (unlikely(r != 0)) { 7821 DRM_ERROR("%p bind failed\n", rbo); 7822 goto error_unpin; 7823 } 7824 7825 amdgpu_bo_unreserve(rbo); 7826 7827 afb->address = amdgpu_bo_gpu_offset(rbo); 7828 7829 amdgpu_bo_ref(rbo); 7830 7831 /** 7832 * We don't do surface updates on planes that have been newly created, 7833 * but we also don't have the afb->address during atomic check. 7834 * 7835 * Fill in buffer attributes depending on the address here, but only on 7836 * newly created planes since they're not being used by DC yet and this 7837 * won't modify global state. 7838 */ 7839 dm_plane_state_old = to_dm_plane_state(plane->state); 7840 dm_plane_state_new = to_dm_plane_state(new_state); 7841 7842 if (dm_plane_state_new->dc_state && 7843 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { 7844 struct dc_plane_state *plane_state = 7845 dm_plane_state_new->dc_state; 7846 bool force_disable_dcc = !plane_state->dcc.enable; 7847 7848 fill_plane_buffer_attributes( 7849 adev, afb, plane_state->format, plane_state->rotation, 7850 afb->tiling_flags, 7851 &plane_state->tiling_info, &plane_state->plane_size, 7852 &plane_state->dcc, &plane_state->address, 7853 afb->tmz_surface, force_disable_dcc); 7854 } 7855 7856 return 0; 7857 7858 error_unpin: 7859 amdgpu_bo_unpin(rbo); 7860 7861 error_unlock: 7862 amdgpu_bo_unreserve(rbo); 7863 return r; 7864 } 7865 7866 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, 7867 struct drm_plane_state *old_state) 7868 { 7869 struct amdgpu_bo *rbo; 7870 int r; 7871 7872 if (!old_state->fb) 7873 return; 7874 7875 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); 7876 r = amdgpu_bo_reserve(rbo, false); 7877 if (unlikely(r)) { 7878 DRM_ERROR("failed to reserve rbo before unpin\n"); 7879 return; 7880 } 7881 7882 amdgpu_bo_unpin(rbo); 7883 amdgpu_bo_unreserve(rbo); 7884 amdgpu_bo_unref(&rbo); 7885 } 7886 7887 static int dm_plane_helper_check_state(struct drm_plane_state *state, 7888 struct drm_crtc_state *new_crtc_state) 7889 { 7890 struct drm_framebuffer *fb = state->fb; 7891 int min_downscale, max_upscale; 7892 int min_scale = 0; 7893 int max_scale = INT_MAX; 7894 7895 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */ 7896 if (fb && state->crtc) { 7897 /* Validate viewport to cover the case when only the position changes */ 7898 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) { 7899 int viewport_width = state->crtc_w; 7900 int viewport_height = state->crtc_h; 7901 7902 if (state->crtc_x < 0) 7903 viewport_width += state->crtc_x; 7904 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay) 7905 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x; 7906 7907 if (state->crtc_y < 0) 7908 viewport_height += state->crtc_y; 7909 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) 7910 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; 7911 7912 if (viewport_width < 0 || viewport_height < 0) { 7913 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); 7914 return -EINVAL; 7915 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ 7916 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); 7917 return -EINVAL; 7918 } else if (viewport_height < MIN_VIEWPORT_SIZE) { 7919 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); 7920 return -EINVAL; 7921 } 7922 7923 } 7924 7925 /* Get min/max allowed scaling factors from plane caps. */ 7926 get_min_max_dc_plane_scaling(state->crtc->dev, fb, 7927 &min_downscale, &max_upscale); 7928 /* 7929 * Convert to drm convention: 16.16 fixed point, instead of dc's 7930 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's 7931 * dst/src, so min_scale = 1.0 / max_upscale, etc. 7932 */ 7933 min_scale = (1000 << 16) / max_upscale; 7934 max_scale = (1000 << 16) / min_downscale; 7935 } 7936 7937 return drm_atomic_helper_check_plane_state( 7938 state, new_crtc_state, min_scale, max_scale, true, true); 7939 } 7940 7941 static int dm_plane_atomic_check(struct drm_plane *plane, 7942 struct drm_atomic_state *state) 7943 { 7944 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 7945 plane); 7946 struct amdgpu_device *adev = drm_to_adev(plane->dev); 7947 struct dc *dc = adev->dm.dc; 7948 struct dm_plane_state *dm_plane_state; 7949 struct dc_scaling_info scaling_info; 7950 struct drm_crtc_state *new_crtc_state; 7951 int ret; 7952 7953 trace_amdgpu_dm_plane_atomic_check(new_plane_state); 7954 7955 dm_plane_state = to_dm_plane_state(new_plane_state); 7956 7957 if (!dm_plane_state->dc_state) 7958 return 0; 7959 7960 new_crtc_state = 7961 drm_atomic_get_new_crtc_state(state, 7962 new_plane_state->crtc); 7963 if (!new_crtc_state) 7964 return -EINVAL; 7965 7966 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); 7967 if (ret) 7968 return ret; 7969 7970 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info); 7971 if (ret) 7972 return ret; 7973 7974 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 7975 return 0; 7976 7977 return -EINVAL; 7978 } 7979 7980 static int dm_plane_atomic_async_check(struct drm_plane *plane, 7981 struct drm_atomic_state *state) 7982 { 7983 /* Only support async updates on cursor planes. */ 7984 if (plane->type != DRM_PLANE_TYPE_CURSOR) 7985 return -EINVAL; 7986 7987 return 0; 7988 } 7989 7990 static void dm_plane_atomic_async_update(struct drm_plane *plane, 7991 struct drm_atomic_state *state) 7992 { 7993 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 7994 plane); 7995 struct drm_plane_state *old_state = 7996 drm_atomic_get_old_plane_state(state, plane); 7997 7998 trace_amdgpu_dm_atomic_update_cursor(new_state); 7999 8000 swap(plane->state->fb, new_state->fb); 8001 8002 plane->state->src_x = new_state->src_x; 8003 plane->state->src_y = new_state->src_y; 8004 plane->state->src_w = new_state->src_w; 8005 plane->state->src_h = new_state->src_h; 8006 plane->state->crtc_x = new_state->crtc_x; 8007 plane->state->crtc_y = new_state->crtc_y; 8008 plane->state->crtc_w = new_state->crtc_w; 8009 plane->state->crtc_h = new_state->crtc_h; 8010 8011 handle_cursor_update(plane, old_state); 8012 } 8013 8014 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { 8015 .prepare_fb = dm_plane_helper_prepare_fb, 8016 .cleanup_fb = dm_plane_helper_cleanup_fb, 8017 .atomic_check = dm_plane_atomic_check, 8018 .atomic_async_check = dm_plane_atomic_async_check, 8019 .atomic_async_update = dm_plane_atomic_async_update 8020 }; 8021 8022 /* 8023 * TODO: these are currently initialized to rgb formats only. 8024 * For future use cases we should either initialize them dynamically based on 8025 * plane capabilities, or initialize this array to all formats, so internal drm 8026 * check will succeed, and let DC implement proper check 8027 */ 8028 static const uint32_t rgb_formats[] = { 8029 DRM_FORMAT_XRGB8888, 8030 DRM_FORMAT_ARGB8888, 8031 DRM_FORMAT_RGBA8888, 8032 DRM_FORMAT_XRGB2101010, 8033 DRM_FORMAT_XBGR2101010, 8034 DRM_FORMAT_ARGB2101010, 8035 DRM_FORMAT_ABGR2101010, 8036 DRM_FORMAT_XRGB16161616, 8037 DRM_FORMAT_XBGR16161616, 8038 DRM_FORMAT_ARGB16161616, 8039 DRM_FORMAT_ABGR16161616, 8040 DRM_FORMAT_XBGR8888, 8041 DRM_FORMAT_ABGR8888, 8042 DRM_FORMAT_RGB565, 8043 }; 8044 8045 static const uint32_t overlay_formats[] = { 8046 DRM_FORMAT_XRGB8888, 8047 DRM_FORMAT_ARGB8888, 8048 DRM_FORMAT_RGBA8888, 8049 DRM_FORMAT_XBGR8888, 8050 DRM_FORMAT_ABGR8888, 8051 DRM_FORMAT_RGB565 8052 }; 8053 8054 static const u32 cursor_formats[] = { 8055 DRM_FORMAT_ARGB8888 8056 }; 8057 8058 static int get_plane_formats(const struct drm_plane *plane, 8059 const struct dc_plane_cap *plane_cap, 8060 uint32_t *formats, int max_formats) 8061 { 8062 int i, num_formats = 0; 8063 8064 /* 8065 * TODO: Query support for each group of formats directly from 8066 * DC plane caps. This will require adding more formats to the 8067 * caps list. 8068 */ 8069 8070 switch (plane->type) { 8071 case DRM_PLANE_TYPE_PRIMARY: 8072 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { 8073 if (num_formats >= max_formats) 8074 break; 8075 8076 formats[num_formats++] = rgb_formats[i]; 8077 } 8078 8079 if (plane_cap && plane_cap->pixel_format_support.nv12) 8080 formats[num_formats++] = DRM_FORMAT_NV12; 8081 if (plane_cap && plane_cap->pixel_format_support.p010) 8082 formats[num_formats++] = DRM_FORMAT_P010; 8083 if (plane_cap && plane_cap->pixel_format_support.fp16) { 8084 formats[num_formats++] = DRM_FORMAT_XRGB16161616F; 8085 formats[num_formats++] = DRM_FORMAT_ARGB16161616F; 8086 formats[num_formats++] = DRM_FORMAT_XBGR16161616F; 8087 formats[num_formats++] = DRM_FORMAT_ABGR16161616F; 8088 } 8089 break; 8090 8091 case DRM_PLANE_TYPE_OVERLAY: 8092 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { 8093 if (num_formats >= max_formats) 8094 break; 8095 8096 formats[num_formats++] = overlay_formats[i]; 8097 } 8098 break; 8099 8100 case DRM_PLANE_TYPE_CURSOR: 8101 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { 8102 if (num_formats >= max_formats) 8103 break; 8104 8105 formats[num_formats++] = cursor_formats[i]; 8106 } 8107 break; 8108 } 8109 8110 return num_formats; 8111 } 8112 8113 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 8114 struct drm_plane *plane, 8115 unsigned long possible_crtcs, 8116 const struct dc_plane_cap *plane_cap) 8117 { 8118 uint32_t formats[32]; 8119 int num_formats; 8120 int res = -EPERM; 8121 unsigned int supported_rotations; 8122 uint64_t *modifiers = NULL; 8123 8124 num_formats = get_plane_formats(plane, plane_cap, formats, 8125 ARRAY_SIZE(formats)); 8126 8127 res = get_plane_modifiers(dm->adev, plane->type, &modifiers); 8128 if (res) 8129 return res; 8130 8131 if (modifiers == NULL) 8132 adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true; 8133 8134 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, 8135 &dm_plane_funcs, formats, num_formats, 8136 modifiers, plane->type, NULL); 8137 kfree(modifiers); 8138 if (res) 8139 return res; 8140 8141 if (plane->type == DRM_PLANE_TYPE_OVERLAY && 8142 plane_cap && plane_cap->per_pixel_alpha) { 8143 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | 8144 BIT(DRM_MODE_BLEND_PREMULTI) | 8145 BIT(DRM_MODE_BLEND_COVERAGE); 8146 8147 drm_plane_create_alpha_property(plane); 8148 drm_plane_create_blend_mode_property(plane, blend_caps); 8149 } 8150 8151 if (plane->type == DRM_PLANE_TYPE_PRIMARY && 8152 plane_cap && 8153 (plane_cap->pixel_format_support.nv12 || 8154 plane_cap->pixel_format_support.p010)) { 8155 /* This only affects YUV formats. */ 8156 drm_plane_create_color_properties( 8157 plane, 8158 BIT(DRM_COLOR_YCBCR_BT601) | 8159 BIT(DRM_COLOR_YCBCR_BT709) | 8160 BIT(DRM_COLOR_YCBCR_BT2020), 8161 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | 8162 BIT(DRM_COLOR_YCBCR_FULL_RANGE), 8163 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); 8164 } 8165 8166 supported_rotations = 8167 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | 8168 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; 8169 8170 if (dm->adev->asic_type >= CHIP_BONAIRE && 8171 plane->type != DRM_PLANE_TYPE_CURSOR) 8172 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 8173 supported_rotations); 8174 8175 drm_plane_helper_add(plane, &dm_plane_helper_funcs); 8176 8177 /* Create (reset) the plane state */ 8178 if (plane->funcs->reset) 8179 plane->funcs->reset(plane); 8180 8181 return 0; 8182 } 8183 8184 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 8185 struct drm_plane *plane, 8186 uint32_t crtc_index) 8187 { 8188 struct amdgpu_crtc *acrtc = NULL; 8189 struct drm_plane *cursor_plane; 8190 8191 int res = -ENOMEM; 8192 8193 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); 8194 if (!cursor_plane) 8195 goto fail; 8196 8197 cursor_plane->type = DRM_PLANE_TYPE_CURSOR; 8198 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL); 8199 8200 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); 8201 if (!acrtc) 8202 goto fail; 8203 8204 res = drm_crtc_init_with_planes( 8205 dm->ddev, 8206 &acrtc->base, 8207 plane, 8208 cursor_plane, 8209 &amdgpu_dm_crtc_funcs, NULL); 8210 8211 if (res) 8212 goto fail; 8213 8214 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); 8215 8216 /* Create (reset) the plane state */ 8217 if (acrtc->base.funcs->reset) 8218 acrtc->base.funcs->reset(&acrtc->base); 8219 8220 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size; 8221 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size; 8222 8223 acrtc->crtc_id = crtc_index; 8224 acrtc->base.enabled = false; 8225 acrtc->otg_inst = -1; 8226 8227 dm->adev->mode_info.crtcs[crtc_index] = acrtc; 8228 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES, 8229 true, MAX_COLOR_LUT_ENTRIES); 8230 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); 8231 8232 return 0; 8233 8234 fail: 8235 kfree(acrtc); 8236 kfree(cursor_plane); 8237 return res; 8238 } 8239 8240 8241 static int to_drm_connector_type(enum signal_type st) 8242 { 8243 switch (st) { 8244 case SIGNAL_TYPE_HDMI_TYPE_A: 8245 return DRM_MODE_CONNECTOR_HDMIA; 8246 case SIGNAL_TYPE_EDP: 8247 return DRM_MODE_CONNECTOR_eDP; 8248 case SIGNAL_TYPE_LVDS: 8249 return DRM_MODE_CONNECTOR_LVDS; 8250 case SIGNAL_TYPE_RGB: 8251 return DRM_MODE_CONNECTOR_VGA; 8252 case SIGNAL_TYPE_DISPLAY_PORT: 8253 case SIGNAL_TYPE_DISPLAY_PORT_MST: 8254 return DRM_MODE_CONNECTOR_DisplayPort; 8255 case SIGNAL_TYPE_DVI_DUAL_LINK: 8256 case SIGNAL_TYPE_DVI_SINGLE_LINK: 8257 return DRM_MODE_CONNECTOR_DVID; 8258 case SIGNAL_TYPE_VIRTUAL: 8259 return DRM_MODE_CONNECTOR_VIRTUAL; 8260 8261 default: 8262 return DRM_MODE_CONNECTOR_Unknown; 8263 } 8264 } 8265 8266 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) 8267 { 8268 struct drm_encoder *encoder; 8269 8270 /* There is only one encoder per connector */ 8271 drm_connector_for_each_possible_encoder(connector, encoder) 8272 return encoder; 8273 8274 return NULL; 8275 } 8276 8277 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 8278 { 8279 struct drm_encoder *encoder; 8280 struct amdgpu_encoder *amdgpu_encoder; 8281 8282 encoder = amdgpu_dm_connector_to_encoder(connector); 8283 8284 if (encoder == NULL) 8285 return; 8286 8287 amdgpu_encoder = to_amdgpu_encoder(encoder); 8288 8289 amdgpu_encoder->native_mode.clock = 0; 8290 8291 if (!list_empty(&connector->probed_modes)) { 8292 struct drm_display_mode *preferred_mode = NULL; 8293 8294 list_for_each_entry(preferred_mode, 8295 &connector->probed_modes, 8296 head) { 8297 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 8298 amdgpu_encoder->native_mode = *preferred_mode; 8299 8300 break; 8301 } 8302 8303 } 8304 } 8305 8306 static struct drm_display_mode * 8307 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 8308 char *name, 8309 int hdisplay, int vdisplay) 8310 { 8311 struct drm_device *dev = encoder->dev; 8312 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 8313 struct drm_display_mode *mode = NULL; 8314 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 8315 8316 mode = drm_mode_duplicate(dev, native_mode); 8317 8318 if (mode == NULL) 8319 return NULL; 8320 8321 mode->hdisplay = hdisplay; 8322 mode->vdisplay = vdisplay; 8323 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 8324 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 8325 8326 return mode; 8327 8328 } 8329 8330 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 8331 struct drm_connector *connector) 8332 { 8333 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 8334 struct drm_display_mode *mode = NULL; 8335 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 8336 struct amdgpu_dm_connector *amdgpu_dm_connector = 8337 to_amdgpu_dm_connector(connector); 8338 int i; 8339 int n; 8340 struct mode_size { 8341 char name[DRM_DISPLAY_MODE_LEN]; 8342 int w; 8343 int h; 8344 } common_modes[] = { 8345 { "640x480", 640, 480}, 8346 { "800x600", 800, 600}, 8347 { "1024x768", 1024, 768}, 8348 { "1280x720", 1280, 720}, 8349 { "1280x800", 1280, 800}, 8350 {"1280x1024", 1280, 1024}, 8351 { "1440x900", 1440, 900}, 8352 {"1680x1050", 1680, 1050}, 8353 {"1600x1200", 1600, 1200}, 8354 {"1920x1080", 1920, 1080}, 8355 {"1920x1200", 1920, 1200} 8356 }; 8357 8358 n = ARRAY_SIZE(common_modes); 8359 8360 for (i = 0; i < n; i++) { 8361 struct drm_display_mode *curmode = NULL; 8362 bool mode_existed = false; 8363 8364 if (common_modes[i].w > native_mode->hdisplay || 8365 common_modes[i].h > native_mode->vdisplay || 8366 (common_modes[i].w == native_mode->hdisplay && 8367 common_modes[i].h == native_mode->vdisplay)) 8368 continue; 8369 8370 list_for_each_entry(curmode, &connector->probed_modes, head) { 8371 if (common_modes[i].w == curmode->hdisplay && 8372 common_modes[i].h == curmode->vdisplay) { 8373 mode_existed = true; 8374 break; 8375 } 8376 } 8377 8378 if (mode_existed) 8379 continue; 8380 8381 mode = amdgpu_dm_create_common_mode(encoder, 8382 common_modes[i].name, common_modes[i].w, 8383 common_modes[i].h); 8384 if (!mode) 8385 continue; 8386 8387 drm_mode_probed_add(connector, mode); 8388 amdgpu_dm_connector->num_modes++; 8389 } 8390 } 8391 8392 static void amdgpu_set_panel_orientation(struct drm_connector *connector) 8393 { 8394 struct drm_encoder *encoder; 8395 struct amdgpu_encoder *amdgpu_encoder; 8396 const struct drm_display_mode *native_mode; 8397 8398 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && 8399 connector->connector_type != DRM_MODE_CONNECTOR_LVDS) 8400 return; 8401 8402 encoder = amdgpu_dm_connector_to_encoder(connector); 8403 if (!encoder) 8404 return; 8405 8406 amdgpu_encoder = to_amdgpu_encoder(encoder); 8407 8408 native_mode = &amdgpu_encoder->native_mode; 8409 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) 8410 return; 8411 8412 drm_connector_set_panel_orientation_with_quirk(connector, 8413 DRM_MODE_PANEL_ORIENTATION_UNKNOWN, 8414 native_mode->hdisplay, 8415 native_mode->vdisplay); 8416 } 8417 8418 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 8419 struct edid *edid) 8420 { 8421 struct amdgpu_dm_connector *amdgpu_dm_connector = 8422 to_amdgpu_dm_connector(connector); 8423 8424 if (edid) { 8425 /* empty probed_modes */ 8426 INIT_LIST_HEAD(&connector->probed_modes); 8427 amdgpu_dm_connector->num_modes = 8428 drm_add_edid_modes(connector, edid); 8429 8430 /* sorting the probed modes before calling function 8431 * amdgpu_dm_get_native_mode() since EDID can have 8432 * more than one preferred mode. The modes that are 8433 * later in the probed mode list could be of higher 8434 * and preferred resolution. For example, 3840x2160 8435 * resolution in base EDID preferred timing and 4096x2160 8436 * preferred resolution in DID extension block later. 8437 */ 8438 drm_mode_sort(&connector->probed_modes); 8439 amdgpu_dm_get_native_mode(connector); 8440 8441 /* Freesync capabilities are reset by calling 8442 * drm_add_edid_modes() and need to be 8443 * restored here. 8444 */ 8445 amdgpu_dm_update_freesync_caps(connector, edid); 8446 8447 amdgpu_set_panel_orientation(connector); 8448 } else { 8449 amdgpu_dm_connector->num_modes = 0; 8450 } 8451 } 8452 8453 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, 8454 struct drm_display_mode *mode) 8455 { 8456 struct drm_display_mode *m; 8457 8458 list_for_each_entry (m, &aconnector->base.probed_modes, head) { 8459 if (drm_mode_equal(m, mode)) 8460 return true; 8461 } 8462 8463 return false; 8464 } 8465 8466 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) 8467 { 8468 const struct drm_display_mode *m; 8469 struct drm_display_mode *new_mode; 8470 uint i; 8471 uint32_t new_modes_count = 0; 8472 8473 /* Standard FPS values 8474 * 8475 * 23.976 - TV/NTSC 8476 * 24 - Cinema 8477 * 25 - TV/PAL 8478 * 29.97 - TV/NTSC 8479 * 30 - TV/NTSC 8480 * 48 - Cinema HFR 8481 * 50 - TV/PAL 8482 * 60 - Commonly used 8483 * 48,72,96,120 - Multiples of 24 8484 */ 8485 static const uint32_t common_rates[] = { 8486 23976, 24000, 25000, 29970, 30000, 8487 48000, 50000, 60000, 72000, 96000, 120000 8488 }; 8489 8490 /* 8491 * Find mode with highest refresh rate with the same resolution 8492 * as the preferred mode. Some monitors report a preferred mode 8493 * with lower resolution than the highest refresh rate supported. 8494 */ 8495 8496 m = get_highest_refresh_rate_mode(aconnector, true); 8497 if (!m) 8498 return 0; 8499 8500 for (i = 0; i < ARRAY_SIZE(common_rates); i++) { 8501 uint64_t target_vtotal, target_vtotal_diff; 8502 uint64_t num, den; 8503 8504 if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) 8505 continue; 8506 8507 if (common_rates[i] < aconnector->min_vfreq * 1000 || 8508 common_rates[i] > aconnector->max_vfreq * 1000) 8509 continue; 8510 8511 num = (unsigned long long)m->clock * 1000 * 1000; 8512 den = common_rates[i] * (unsigned long long)m->htotal; 8513 target_vtotal = div_u64(num, den); 8514 target_vtotal_diff = target_vtotal - m->vtotal; 8515 8516 /* Check for illegal modes */ 8517 if (m->vsync_start + target_vtotal_diff < m->vdisplay || 8518 m->vsync_end + target_vtotal_diff < m->vsync_start || 8519 m->vtotal + target_vtotal_diff < m->vsync_end) 8520 continue; 8521 8522 new_mode = drm_mode_duplicate(aconnector->base.dev, m); 8523 if (!new_mode) 8524 goto out; 8525 8526 new_mode->vtotal += (u16)target_vtotal_diff; 8527 new_mode->vsync_start += (u16)target_vtotal_diff; 8528 new_mode->vsync_end += (u16)target_vtotal_diff; 8529 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; 8530 new_mode->type |= DRM_MODE_TYPE_DRIVER; 8531 8532 if (!is_duplicate_mode(aconnector, new_mode)) { 8533 drm_mode_probed_add(&aconnector->base, new_mode); 8534 new_modes_count += 1; 8535 } else 8536 drm_mode_destroy(aconnector->base.dev, new_mode); 8537 } 8538 out: 8539 return new_modes_count; 8540 } 8541 8542 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, 8543 struct edid *edid) 8544 { 8545 struct amdgpu_dm_connector *amdgpu_dm_connector = 8546 to_amdgpu_dm_connector(connector); 8547 8548 if (!edid) 8549 return; 8550 8551 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 8552 amdgpu_dm_connector->num_modes += 8553 add_fs_modes(amdgpu_dm_connector); 8554 } 8555 8556 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 8557 { 8558 struct amdgpu_dm_connector *amdgpu_dm_connector = 8559 to_amdgpu_dm_connector(connector); 8560 struct drm_encoder *encoder; 8561 struct edid *edid = amdgpu_dm_connector->edid; 8562 8563 encoder = amdgpu_dm_connector_to_encoder(connector); 8564 8565 if (!drm_edid_is_valid(edid)) { 8566 amdgpu_dm_connector->num_modes = 8567 drm_add_modes_noedid(connector, 640, 480); 8568 } else { 8569 amdgpu_dm_connector_ddc_get_modes(connector, edid); 8570 amdgpu_dm_connector_add_common_modes(encoder, connector); 8571 amdgpu_dm_connector_add_freesync_modes(connector, edid); 8572 } 8573 amdgpu_dm_fbc_init(connector); 8574 8575 return amdgpu_dm_connector->num_modes; 8576 } 8577 8578 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 8579 struct amdgpu_dm_connector *aconnector, 8580 int connector_type, 8581 struct dc_link *link, 8582 int link_index) 8583 { 8584 struct amdgpu_device *adev = drm_to_adev(dm->ddev); 8585 8586 /* 8587 * Some of the properties below require access to state, like bpc. 8588 * Allocate some default initial connector state with our reset helper. 8589 */ 8590 if (aconnector->base.funcs->reset) 8591 aconnector->base.funcs->reset(&aconnector->base); 8592 8593 aconnector->connector_id = link_index; 8594 aconnector->dc_link = link; 8595 aconnector->base.interlace_allowed = false; 8596 aconnector->base.doublescan_allowed = false; 8597 aconnector->base.stereo_allowed = false; 8598 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 8599 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 8600 aconnector->audio_inst = -1; 8601 mutex_init(&aconnector->hpd_lock); 8602 8603 /* 8604 * configure support HPD hot plug connector_>polled default value is 0 8605 * which means HPD hot plug not supported 8606 */ 8607 switch (connector_type) { 8608 case DRM_MODE_CONNECTOR_HDMIA: 8609 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8610 aconnector->base.ycbcr_420_allowed = 8611 link->link_enc->features.hdmi_ycbcr420_supported ? true : false; 8612 break; 8613 case DRM_MODE_CONNECTOR_DisplayPort: 8614 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8615 link->link_enc = link_enc_cfg_get_link_enc(link); 8616 ASSERT(link->link_enc); 8617 if (link->link_enc) 8618 aconnector->base.ycbcr_420_allowed = 8619 link->link_enc->features.dp_ycbcr420_supported ? true : false; 8620 break; 8621 case DRM_MODE_CONNECTOR_DVID: 8622 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8623 break; 8624 default: 8625 break; 8626 } 8627 8628 drm_object_attach_property(&aconnector->base.base, 8629 dm->ddev->mode_config.scaling_mode_property, 8630 DRM_MODE_SCALE_NONE); 8631 8632 drm_object_attach_property(&aconnector->base.base, 8633 adev->mode_info.underscan_property, 8634 UNDERSCAN_OFF); 8635 drm_object_attach_property(&aconnector->base.base, 8636 adev->mode_info.underscan_hborder_property, 8637 0); 8638 drm_object_attach_property(&aconnector->base.base, 8639 adev->mode_info.underscan_vborder_property, 8640 0); 8641 8642 if (!aconnector->mst_port) 8643 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 8644 8645 /* This defaults to the max in the range, but we want 8bpc for non-edp. */ 8646 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8; 8647 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; 8648 8649 if (connector_type == DRM_MODE_CONNECTOR_eDP && 8650 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) { 8651 drm_object_attach_property(&aconnector->base.base, 8652 adev->mode_info.abm_level_property, 0); 8653 } 8654 8655 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 8656 connector_type == DRM_MODE_CONNECTOR_DisplayPort || 8657 connector_type == DRM_MODE_CONNECTOR_eDP) { 8658 drm_connector_attach_hdr_output_metadata_property(&aconnector->base); 8659 8660 if (!aconnector->mst_port) 8661 drm_connector_attach_vrr_capable_property(&aconnector->base); 8662 8663 #ifdef CONFIG_DRM_AMD_DC_HDCP 8664 if (adev->dm.hdcp_workqueue) 8665 drm_connector_attach_content_protection_property(&aconnector->base, true); 8666 #endif 8667 } 8668 } 8669 8670 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 8671 struct i2c_msg *msgs, int num) 8672 { 8673 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 8674 struct ddc_service *ddc_service = i2c->ddc_service; 8675 struct i2c_command cmd; 8676 int i; 8677 int result = -EIO; 8678 8679 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); 8680 8681 if (!cmd.payloads) 8682 return result; 8683 8684 cmd.number_of_payloads = num; 8685 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 8686 cmd.speed = 100; 8687 8688 for (i = 0; i < num; i++) { 8689 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 8690 cmd.payloads[i].address = msgs[i].addr; 8691 cmd.payloads[i].length = msgs[i].len; 8692 cmd.payloads[i].data = msgs[i].buf; 8693 } 8694 8695 if (dc_submit_i2c( 8696 ddc_service->ctx->dc, 8697 ddc_service->ddc_pin->hw_info.ddc_channel, 8698 &cmd)) 8699 result = num; 8700 8701 kfree(cmd.payloads); 8702 return result; 8703 } 8704 8705 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 8706 { 8707 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 8708 } 8709 8710 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 8711 .master_xfer = amdgpu_dm_i2c_xfer, 8712 .functionality = amdgpu_dm_i2c_func, 8713 }; 8714 8715 static struct amdgpu_i2c_adapter * 8716 create_i2c(struct ddc_service *ddc_service, 8717 int link_index, 8718 int *res) 8719 { 8720 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 8721 struct amdgpu_i2c_adapter *i2c; 8722 8723 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); 8724 if (!i2c) 8725 return NULL; 8726 i2c->base.owner = THIS_MODULE; 8727 i2c->base.class = I2C_CLASS_DDC; 8728 i2c->base.dev.parent = &adev->pdev->dev; 8729 i2c->base.algo = &amdgpu_dm_i2c_algo; 8730 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); 8731 i2c_set_adapdata(&i2c->base, i2c); 8732 i2c->ddc_service = ddc_service; 8733 if (i2c->ddc_service->ddc_pin) 8734 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index; 8735 8736 return i2c; 8737 } 8738 8739 8740 /* 8741 * Note: this function assumes that dc_link_detect() was called for the 8742 * dc_link which will be represented by this aconnector. 8743 */ 8744 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 8745 struct amdgpu_dm_connector *aconnector, 8746 uint32_t link_index, 8747 struct amdgpu_encoder *aencoder) 8748 { 8749 int res = 0; 8750 int connector_type; 8751 struct dc *dc = dm->dc; 8752 struct dc_link *link = dc_get_link_at_index(dc, link_index); 8753 struct amdgpu_i2c_adapter *i2c; 8754 8755 link->priv = aconnector; 8756 8757 DRM_DEBUG_DRIVER("%s()\n", __func__); 8758 8759 i2c = create_i2c(link->ddc, link->link_index, &res); 8760 if (!i2c) { 8761 DRM_ERROR("Failed to create i2c adapter data\n"); 8762 return -ENOMEM; 8763 } 8764 8765 aconnector->i2c = i2c; 8766 res = i2c_add_adapter(&i2c->base); 8767 8768 if (res) { 8769 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); 8770 goto out_free; 8771 } 8772 8773 connector_type = to_drm_connector_type(link->connector_signal); 8774 8775 res = drm_connector_init_with_ddc( 8776 dm->ddev, 8777 &aconnector->base, 8778 &amdgpu_dm_connector_funcs, 8779 connector_type, 8780 &i2c->base); 8781 8782 if (res) { 8783 DRM_ERROR("connector_init failed\n"); 8784 aconnector->connector_id = -1; 8785 goto out_free; 8786 } 8787 8788 drm_connector_helper_add( 8789 &aconnector->base, 8790 &amdgpu_dm_connector_helper_funcs); 8791 8792 amdgpu_dm_connector_init_helper( 8793 dm, 8794 aconnector, 8795 connector_type, 8796 link, 8797 link_index); 8798 8799 drm_connector_attach_encoder( 8800 &aconnector->base, &aencoder->base); 8801 8802 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 8803 || connector_type == DRM_MODE_CONNECTOR_eDP) 8804 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); 8805 8806 out_free: 8807 if (res) { 8808 kfree(i2c); 8809 aconnector->i2c = NULL; 8810 } 8811 return res; 8812 } 8813 8814 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 8815 { 8816 switch (adev->mode_info.num_crtc) { 8817 case 1: 8818 return 0x1; 8819 case 2: 8820 return 0x3; 8821 case 3: 8822 return 0x7; 8823 case 4: 8824 return 0xf; 8825 case 5: 8826 return 0x1f; 8827 case 6: 8828 default: 8829 return 0x3f; 8830 } 8831 } 8832 8833 static int amdgpu_dm_encoder_init(struct drm_device *dev, 8834 struct amdgpu_encoder *aencoder, 8835 uint32_t link_index) 8836 { 8837 struct amdgpu_device *adev = drm_to_adev(dev); 8838 8839 int res = drm_encoder_init(dev, 8840 &aencoder->base, 8841 &amdgpu_dm_encoder_funcs, 8842 DRM_MODE_ENCODER_TMDS, 8843 NULL); 8844 8845 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 8846 8847 if (!res) 8848 aencoder->encoder_id = link_index; 8849 else 8850 aencoder->encoder_id = -1; 8851 8852 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 8853 8854 return res; 8855 } 8856 8857 static void manage_dm_interrupts(struct amdgpu_device *adev, 8858 struct amdgpu_crtc *acrtc, 8859 bool enable) 8860 { 8861 /* 8862 * We have no guarantee that the frontend index maps to the same 8863 * backend index - some even map to more than one. 8864 * 8865 * TODO: Use a different interrupt or check DC itself for the mapping. 8866 */ 8867 int irq_type = 8868 amdgpu_display_crtc_idx_to_irq_type( 8869 adev, 8870 acrtc->crtc_id); 8871 8872 if (enable) { 8873 drm_crtc_vblank_on(&acrtc->base); 8874 amdgpu_irq_get( 8875 adev, 8876 &adev->pageflip_irq, 8877 irq_type); 8878 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8879 amdgpu_irq_get( 8880 adev, 8881 &adev->vline0_irq, 8882 irq_type); 8883 #endif 8884 } else { 8885 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8886 amdgpu_irq_put( 8887 adev, 8888 &adev->vline0_irq, 8889 irq_type); 8890 #endif 8891 amdgpu_irq_put( 8892 adev, 8893 &adev->pageflip_irq, 8894 irq_type); 8895 drm_crtc_vblank_off(&acrtc->base); 8896 } 8897 } 8898 8899 static void dm_update_pflip_irq_state(struct amdgpu_device *adev, 8900 struct amdgpu_crtc *acrtc) 8901 { 8902 int irq_type = 8903 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 8904 8905 /** 8906 * This reads the current state for the IRQ and force reapplies 8907 * the setting to hardware. 8908 */ 8909 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); 8910 } 8911 8912 static bool 8913 is_scaling_state_different(const struct dm_connector_state *dm_state, 8914 const struct dm_connector_state *old_dm_state) 8915 { 8916 if (dm_state->scaling != old_dm_state->scaling) 8917 return true; 8918 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 8919 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 8920 return true; 8921 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 8922 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 8923 return true; 8924 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 8925 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 8926 return true; 8927 return false; 8928 } 8929 8930 #ifdef CONFIG_DRM_AMD_DC_HDCP 8931 static bool is_content_protection_different(struct drm_connector_state *state, 8932 const struct drm_connector_state *old_state, 8933 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) 8934 { 8935 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8936 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 8937 8938 /* Handle: Type0/1 change */ 8939 if (old_state->hdcp_content_type != state->hdcp_content_type && 8940 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 8941 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8942 return true; 8943 } 8944 8945 /* CP is being re enabled, ignore this 8946 * 8947 * Handles: ENABLED -> DESIRED 8948 */ 8949 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 8950 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8951 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 8952 return false; 8953 } 8954 8955 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED 8956 * 8957 * Handles: UNDESIRED -> ENABLED 8958 */ 8959 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 8960 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 8961 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8962 8963 /* Stream removed and re-enabled 8964 * 8965 * Can sometimes overlap with the HPD case, 8966 * thus set update_hdcp to false to avoid 8967 * setting HDCP multiple times. 8968 * 8969 * Handles: DESIRED -> DESIRED (Special case) 8970 */ 8971 if (!(old_state->crtc && old_state->crtc->enabled) && 8972 state->crtc && state->crtc->enabled && 8973 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8974 dm_con_state->update_hdcp = false; 8975 return true; 8976 } 8977 8978 /* Hot-plug, headless s3, dpms 8979 * 8980 * Only start HDCP if the display is connected/enabled. 8981 * update_hdcp flag will be set to false until the next 8982 * HPD comes in. 8983 * 8984 * Handles: DESIRED -> DESIRED (Special case) 8985 */ 8986 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 8987 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 8988 dm_con_state->update_hdcp = false; 8989 return true; 8990 } 8991 8992 /* 8993 * Handles: UNDESIRED -> UNDESIRED 8994 * DESIRED -> DESIRED 8995 * ENABLED -> ENABLED 8996 */ 8997 if (old_state->content_protection == state->content_protection) 8998 return false; 8999 9000 /* 9001 * Handles: UNDESIRED -> DESIRED 9002 * DESIRED -> UNDESIRED 9003 * ENABLED -> UNDESIRED 9004 */ 9005 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) 9006 return true; 9007 9008 /* 9009 * Handles: DESIRED -> ENABLED 9010 */ 9011 return false; 9012 } 9013 9014 #endif 9015 static void remove_stream(struct amdgpu_device *adev, 9016 struct amdgpu_crtc *acrtc, 9017 struct dc_stream_state *stream) 9018 { 9019 /* this is the update mode case */ 9020 9021 acrtc->otg_inst = -1; 9022 acrtc->enabled = false; 9023 } 9024 9025 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, 9026 struct dc_cursor_position *position) 9027 { 9028 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 9029 int x, y; 9030 int xorigin = 0, yorigin = 0; 9031 9032 if (!crtc || !plane->state->fb) 9033 return 0; 9034 9035 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || 9036 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { 9037 DRM_ERROR("%s: bad cursor width or height %d x %d\n", 9038 __func__, 9039 plane->state->crtc_w, 9040 plane->state->crtc_h); 9041 return -EINVAL; 9042 } 9043 9044 x = plane->state->crtc_x; 9045 y = plane->state->crtc_y; 9046 9047 if (x <= -amdgpu_crtc->max_cursor_width || 9048 y <= -amdgpu_crtc->max_cursor_height) 9049 return 0; 9050 9051 if (x < 0) { 9052 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 9053 x = 0; 9054 } 9055 if (y < 0) { 9056 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 9057 y = 0; 9058 } 9059 position->enable = true; 9060 position->translate_by_source = true; 9061 position->x = x; 9062 position->y = y; 9063 position->x_hotspot = xorigin; 9064 position->y_hotspot = yorigin; 9065 9066 return 0; 9067 } 9068 9069 static void handle_cursor_update(struct drm_plane *plane, 9070 struct drm_plane_state *old_plane_state) 9071 { 9072 struct amdgpu_device *adev = drm_to_adev(plane->dev); 9073 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 9074 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 9075 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 9076 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 9077 uint64_t address = afb ? afb->address : 0; 9078 struct dc_cursor_position position = {0}; 9079 struct dc_cursor_attributes attributes; 9080 int ret; 9081 9082 if (!plane->state->fb && !old_plane_state->fb) 9083 return; 9084 9085 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n", 9086 __func__, 9087 amdgpu_crtc->crtc_id, 9088 plane->state->crtc_w, 9089 plane->state->crtc_h); 9090 9091 ret = get_cursor_position(plane, crtc, &position); 9092 if (ret) 9093 return; 9094 9095 if (!position.enable) { 9096 /* turn off cursor */ 9097 if (crtc_state && crtc_state->stream) { 9098 mutex_lock(&adev->dm.dc_lock); 9099 dc_stream_set_cursor_position(crtc_state->stream, 9100 &position); 9101 mutex_unlock(&adev->dm.dc_lock); 9102 } 9103 return; 9104 } 9105 9106 amdgpu_crtc->cursor_width = plane->state->crtc_w; 9107 amdgpu_crtc->cursor_height = plane->state->crtc_h; 9108 9109 memset(&attributes, 0, sizeof(attributes)); 9110 attributes.address.high_part = upper_32_bits(address); 9111 attributes.address.low_part = lower_32_bits(address); 9112 attributes.width = plane->state->crtc_w; 9113 attributes.height = plane->state->crtc_h; 9114 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 9115 attributes.rotation_angle = 0; 9116 attributes.attribute_flags.value = 0; 9117 9118 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; 9119 9120 if (crtc_state->stream) { 9121 mutex_lock(&adev->dm.dc_lock); 9122 if (!dc_stream_set_cursor_attributes(crtc_state->stream, 9123 &attributes)) 9124 DRM_ERROR("DC failed to set cursor attributes\n"); 9125 9126 if (!dc_stream_set_cursor_position(crtc_state->stream, 9127 &position)) 9128 DRM_ERROR("DC failed to set cursor position\n"); 9129 mutex_unlock(&adev->dm.dc_lock); 9130 } 9131 } 9132 9133 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 9134 { 9135 9136 assert_spin_locked(&acrtc->base.dev->event_lock); 9137 WARN_ON(acrtc->event); 9138 9139 acrtc->event = acrtc->base.state->event; 9140 9141 /* Set the flip status */ 9142 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 9143 9144 /* Mark this event as consumed */ 9145 acrtc->base.state->event = NULL; 9146 9147 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 9148 acrtc->crtc_id); 9149 } 9150 9151 static void update_freesync_state_on_stream( 9152 struct amdgpu_display_manager *dm, 9153 struct dm_crtc_state *new_crtc_state, 9154 struct dc_stream_state *new_stream, 9155 struct dc_plane_state *surface, 9156 u32 flip_timestamp_in_us) 9157 { 9158 struct mod_vrr_params vrr_params; 9159 struct dc_info_packet vrr_infopacket = {0}; 9160 struct amdgpu_device *adev = dm->adev; 9161 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 9162 unsigned long flags; 9163 bool pack_sdp_v1_3 = false; 9164 9165 if (!new_stream) 9166 return; 9167 9168 /* 9169 * TODO: Determine why min/max totals and vrefresh can be 0 here. 9170 * For now it's sufficient to just guard against these conditions. 9171 */ 9172 9173 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 9174 return; 9175 9176 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9177 vrr_params = acrtc->dm_irq_params.vrr_params; 9178 9179 if (surface) { 9180 mod_freesync_handle_preflip( 9181 dm->freesync_module, 9182 surface, 9183 new_stream, 9184 flip_timestamp_in_us, 9185 &vrr_params); 9186 9187 if (adev->family < AMDGPU_FAMILY_AI && 9188 amdgpu_dm_vrr_active(new_crtc_state)) { 9189 mod_freesync_handle_v_update(dm->freesync_module, 9190 new_stream, &vrr_params); 9191 9192 /* Need to call this before the frame ends. */ 9193 dc_stream_adjust_vmin_vmax(dm->dc, 9194 new_crtc_state->stream, 9195 &vrr_params.adjust); 9196 } 9197 } 9198 9199 mod_freesync_build_vrr_infopacket( 9200 dm->freesync_module, 9201 new_stream, 9202 &vrr_params, 9203 PACKET_TYPE_VRR, 9204 TRANSFER_FUNC_UNKNOWN, 9205 &vrr_infopacket, 9206 pack_sdp_v1_3); 9207 9208 new_crtc_state->freesync_timing_changed |= 9209 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, 9210 &vrr_params.adjust, 9211 sizeof(vrr_params.adjust)) != 0); 9212 9213 new_crtc_state->freesync_vrr_info_changed |= 9214 (memcmp(&new_crtc_state->vrr_infopacket, 9215 &vrr_infopacket, 9216 sizeof(vrr_infopacket)) != 0); 9217 9218 acrtc->dm_irq_params.vrr_params = vrr_params; 9219 new_crtc_state->vrr_infopacket = vrr_infopacket; 9220 9221 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust; 9222 new_stream->vrr_infopacket = vrr_infopacket; 9223 9224 if (new_crtc_state->freesync_vrr_info_changed) 9225 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", 9226 new_crtc_state->base.crtc->base.id, 9227 (int)new_crtc_state->base.vrr_enabled, 9228 (int)vrr_params.state); 9229 9230 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9231 } 9232 9233 static void update_stream_irq_parameters( 9234 struct amdgpu_display_manager *dm, 9235 struct dm_crtc_state *new_crtc_state) 9236 { 9237 struct dc_stream_state *new_stream = new_crtc_state->stream; 9238 struct mod_vrr_params vrr_params; 9239 struct mod_freesync_config config = new_crtc_state->freesync_config; 9240 struct amdgpu_device *adev = dm->adev; 9241 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 9242 unsigned long flags; 9243 9244 if (!new_stream) 9245 return; 9246 9247 /* 9248 * TODO: Determine why min/max totals and vrefresh can be 0 here. 9249 * For now it's sufficient to just guard against these conditions. 9250 */ 9251 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 9252 return; 9253 9254 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9255 vrr_params = acrtc->dm_irq_params.vrr_params; 9256 9257 if (new_crtc_state->vrr_supported && 9258 config.min_refresh_in_uhz && 9259 config.max_refresh_in_uhz) { 9260 /* 9261 * if freesync compatible mode was set, config.state will be set 9262 * in atomic check 9263 */ 9264 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && 9265 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || 9266 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { 9267 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; 9268 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; 9269 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; 9270 vrr_params.state = VRR_STATE_ACTIVE_FIXED; 9271 } else { 9272 config.state = new_crtc_state->base.vrr_enabled ? 9273 VRR_STATE_ACTIVE_VARIABLE : 9274 VRR_STATE_INACTIVE; 9275 } 9276 } else { 9277 config.state = VRR_STATE_UNSUPPORTED; 9278 } 9279 9280 mod_freesync_build_vrr_params(dm->freesync_module, 9281 new_stream, 9282 &config, &vrr_params); 9283 9284 new_crtc_state->freesync_timing_changed |= 9285 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, 9286 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0); 9287 9288 new_crtc_state->freesync_config = config; 9289 /* Copy state for access from DM IRQ handler */ 9290 acrtc->dm_irq_params.freesync_config = config; 9291 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; 9292 acrtc->dm_irq_params.vrr_params = vrr_params; 9293 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9294 } 9295 9296 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, 9297 struct dm_crtc_state *new_state) 9298 { 9299 bool old_vrr_active = amdgpu_dm_vrr_active(old_state); 9300 bool new_vrr_active = amdgpu_dm_vrr_active(new_state); 9301 9302 if (!old_vrr_active && new_vrr_active) { 9303 /* Transition VRR inactive -> active: 9304 * While VRR is active, we must not disable vblank irq, as a 9305 * reenable after disable would compute bogus vblank/pflip 9306 * timestamps if it likely happened inside display front-porch. 9307 * 9308 * We also need vupdate irq for the actual core vblank handling 9309 * at end of vblank. 9310 */ 9311 dm_set_vupdate_irq(new_state->base.crtc, true); 9312 drm_crtc_vblank_get(new_state->base.crtc); 9313 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", 9314 __func__, new_state->base.crtc->base.id); 9315 } else if (old_vrr_active && !new_vrr_active) { 9316 /* Transition VRR active -> inactive: 9317 * Allow vblank irq disable again for fixed refresh rate. 9318 */ 9319 dm_set_vupdate_irq(new_state->base.crtc, false); 9320 drm_crtc_vblank_put(new_state->base.crtc); 9321 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", 9322 __func__, new_state->base.crtc->base.id); 9323 } 9324 } 9325 9326 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 9327 { 9328 struct drm_plane *plane; 9329 struct drm_plane_state *old_plane_state; 9330 int i; 9331 9332 /* 9333 * TODO: Make this per-stream so we don't issue redundant updates for 9334 * commits with multiple streams. 9335 */ 9336 for_each_old_plane_in_state(state, plane, old_plane_state, i) 9337 if (plane->type == DRM_PLANE_TYPE_CURSOR) 9338 handle_cursor_update(plane, old_plane_state); 9339 } 9340 9341 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 9342 struct dc_state *dc_state, 9343 struct drm_device *dev, 9344 struct amdgpu_display_manager *dm, 9345 struct drm_crtc *pcrtc, 9346 bool wait_for_vblank) 9347 { 9348 uint32_t i; 9349 uint64_t timestamp_ns; 9350 struct drm_plane *plane; 9351 struct drm_plane_state *old_plane_state, *new_plane_state; 9352 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 9353 struct drm_crtc_state *new_pcrtc_state = 9354 drm_atomic_get_new_crtc_state(state, pcrtc); 9355 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 9356 struct dm_crtc_state *dm_old_crtc_state = 9357 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 9358 int planes_count = 0, vpos, hpos; 9359 long r; 9360 unsigned long flags; 9361 struct amdgpu_bo *abo; 9362 uint32_t target_vblank, last_flip_vblank; 9363 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); 9364 bool pflip_present = false; 9365 struct { 9366 struct dc_surface_update surface_updates[MAX_SURFACES]; 9367 struct dc_plane_info plane_infos[MAX_SURFACES]; 9368 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 9369 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 9370 struct dc_stream_update stream_update; 9371 } *bundle; 9372 9373 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 9374 9375 if (!bundle) { 9376 dm_error("Failed to allocate update bundle\n"); 9377 goto cleanup; 9378 } 9379 9380 /* 9381 * Disable the cursor first if we're disabling all the planes. 9382 * It'll remain on the screen after the planes are re-enabled 9383 * if we don't. 9384 */ 9385 if (acrtc_state->active_planes == 0) 9386 amdgpu_dm_commit_cursors(state); 9387 9388 /* update planes when needed */ 9389 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 9390 struct drm_crtc *crtc = new_plane_state->crtc; 9391 struct drm_crtc_state *new_crtc_state; 9392 struct drm_framebuffer *fb = new_plane_state->fb; 9393 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; 9394 bool plane_needs_flip; 9395 struct dc_plane_state *dc_plane; 9396 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 9397 9398 /* Cursor plane is handled after stream updates */ 9399 if (plane->type == DRM_PLANE_TYPE_CURSOR) 9400 continue; 9401 9402 if (!fb || !crtc || pcrtc != crtc) 9403 continue; 9404 9405 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 9406 if (!new_crtc_state->active) 9407 continue; 9408 9409 dc_plane = dm_new_plane_state->dc_state; 9410 9411 bundle->surface_updates[planes_count].surface = dc_plane; 9412 if (new_pcrtc_state->color_mgmt_changed) { 9413 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; 9414 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; 9415 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; 9416 } 9417 9418 fill_dc_scaling_info(dm->adev, new_plane_state, 9419 &bundle->scaling_infos[planes_count]); 9420 9421 bundle->surface_updates[planes_count].scaling_info = 9422 &bundle->scaling_infos[planes_count]; 9423 9424 plane_needs_flip = old_plane_state->fb && new_plane_state->fb; 9425 9426 pflip_present = pflip_present || plane_needs_flip; 9427 9428 if (!plane_needs_flip) { 9429 planes_count += 1; 9430 continue; 9431 } 9432 9433 abo = gem_to_amdgpu_bo(fb->obj[0]); 9434 9435 /* 9436 * Wait for all fences on this FB. Do limited wait to avoid 9437 * deadlock during GPU reset when this fence will not signal 9438 * but we hold reservation lock for the BO. 9439 */ 9440 r = dma_resv_wait_timeout(abo->tbo.base.resv, 9441 DMA_RESV_USAGE_WRITE, false, 9442 msecs_to_jiffies(5000)); 9443 if (unlikely(r <= 0)) 9444 DRM_ERROR("Waiting for fences timed out!"); 9445 9446 fill_dc_plane_info_and_addr( 9447 dm->adev, new_plane_state, 9448 afb->tiling_flags, 9449 &bundle->plane_infos[planes_count], 9450 &bundle->flip_addrs[planes_count].address, 9451 afb->tmz_surface, false); 9452 9453 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", 9454 new_plane_state->plane->index, 9455 bundle->plane_infos[planes_count].dcc.enable); 9456 9457 bundle->surface_updates[planes_count].plane_info = 9458 &bundle->plane_infos[planes_count]; 9459 9460 fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, 9461 new_crtc_state, 9462 &bundle->flip_addrs[planes_count]); 9463 9464 /* 9465 * Only allow immediate flips for fast updates that don't 9466 * change FB pitch, DCC state, rotation or mirroing. 9467 */ 9468 bundle->flip_addrs[planes_count].flip_immediate = 9469 crtc->state->async_flip && 9470 acrtc_state->update_type == UPDATE_TYPE_FAST; 9471 9472 timestamp_ns = ktime_get_ns(); 9473 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 9474 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; 9475 bundle->surface_updates[planes_count].surface = dc_plane; 9476 9477 if (!bundle->surface_updates[planes_count].surface) { 9478 DRM_ERROR("No surface for CRTC: id=%d\n", 9479 acrtc_attach->crtc_id); 9480 continue; 9481 } 9482 9483 if (plane == pcrtc->primary) 9484 update_freesync_state_on_stream( 9485 dm, 9486 acrtc_state, 9487 acrtc_state->stream, 9488 dc_plane, 9489 bundle->flip_addrs[planes_count].flip_timestamp_in_us); 9490 9491 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n", 9492 __func__, 9493 bundle->flip_addrs[planes_count].address.grph.addr.high_part, 9494 bundle->flip_addrs[planes_count].address.grph.addr.low_part); 9495 9496 planes_count += 1; 9497 9498 } 9499 9500 if (pflip_present) { 9501 if (!vrr_active) { 9502 /* Use old throttling in non-vrr fixed refresh rate mode 9503 * to keep flip scheduling based on target vblank counts 9504 * working in a backwards compatible way, e.g., for 9505 * clients using the GLX_OML_sync_control extension or 9506 * DRI3/Present extension with defined target_msc. 9507 */ 9508 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); 9509 } 9510 else { 9511 /* For variable refresh rate mode only: 9512 * Get vblank of last completed flip to avoid > 1 vrr 9513 * flips per video frame by use of throttling, but allow 9514 * flip programming anywhere in the possibly large 9515 * variable vrr vblank interval for fine-grained flip 9516 * timing control and more opportunity to avoid stutter 9517 * on late submission of flips. 9518 */ 9519 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9520 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; 9521 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9522 } 9523 9524 target_vblank = last_flip_vblank + wait_for_vblank; 9525 9526 /* 9527 * Wait until we're out of the vertical blank period before the one 9528 * targeted by the flip 9529 */ 9530 while ((acrtc_attach->enabled && 9531 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 9532 0, &vpos, &hpos, NULL, 9533 NULL, &pcrtc->hwmode) 9534 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 9535 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 9536 (int)(target_vblank - 9537 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { 9538 usleep_range(1000, 1100); 9539 } 9540 9541 /** 9542 * Prepare the flip event for the pageflip interrupt to handle. 9543 * 9544 * This only works in the case where we've already turned on the 9545 * appropriate hardware blocks (eg. HUBP) so in the transition case 9546 * from 0 -> n planes we have to skip a hardware generated event 9547 * and rely on sending it from software. 9548 */ 9549 if (acrtc_attach->base.state->event && 9550 acrtc_state->active_planes > 0) { 9551 drm_crtc_vblank_get(pcrtc); 9552 9553 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9554 9555 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); 9556 prepare_flip_isr(acrtc_attach); 9557 9558 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9559 } 9560 9561 if (acrtc_state->stream) { 9562 if (acrtc_state->freesync_vrr_info_changed) 9563 bundle->stream_update.vrr_infopacket = 9564 &acrtc_state->stream->vrr_infopacket; 9565 } 9566 } 9567 9568 /* Update the planes if changed or disable if we don't have any. */ 9569 if ((planes_count || acrtc_state->active_planes == 0) && 9570 acrtc_state->stream) { 9571 /* 9572 * If PSR or idle optimizations are enabled then flush out 9573 * any pending work before hardware programming. 9574 */ 9575 if (dm->vblank_control_workqueue) 9576 flush_workqueue(dm->vblank_control_workqueue); 9577 9578 bundle->stream_update.stream = acrtc_state->stream; 9579 if (new_pcrtc_state->mode_changed) { 9580 bundle->stream_update.src = acrtc_state->stream->src; 9581 bundle->stream_update.dst = acrtc_state->stream->dst; 9582 } 9583 9584 if (new_pcrtc_state->color_mgmt_changed) { 9585 /* 9586 * TODO: This isn't fully correct since we've actually 9587 * already modified the stream in place. 9588 */ 9589 bundle->stream_update.gamut_remap = 9590 &acrtc_state->stream->gamut_remap_matrix; 9591 bundle->stream_update.output_csc_transform = 9592 &acrtc_state->stream->csc_color_matrix; 9593 bundle->stream_update.out_transfer_func = 9594 acrtc_state->stream->out_transfer_func; 9595 } 9596 9597 acrtc_state->stream->abm_level = acrtc_state->abm_level; 9598 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) 9599 bundle->stream_update.abm_level = &acrtc_state->abm_level; 9600 9601 /* 9602 * If FreeSync state on the stream has changed then we need to 9603 * re-adjust the min/max bounds now that DC doesn't handle this 9604 * as part of commit. 9605 */ 9606 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { 9607 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9608 dc_stream_adjust_vmin_vmax( 9609 dm->dc, acrtc_state->stream, 9610 &acrtc_attach->dm_irq_params.vrr_params.adjust); 9611 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9612 } 9613 mutex_lock(&dm->dc_lock); 9614 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 9615 acrtc_state->stream->link->psr_settings.psr_allow_active) 9616 amdgpu_dm_psr_disable(acrtc_state->stream); 9617 9618 dc_commit_updates_for_stream(dm->dc, 9619 bundle->surface_updates, 9620 planes_count, 9621 acrtc_state->stream, 9622 &bundle->stream_update, 9623 dc_state); 9624 9625 /** 9626 * Enable or disable the interrupts on the backend. 9627 * 9628 * Most pipes are put into power gating when unused. 9629 * 9630 * When power gating is enabled on a pipe we lose the 9631 * interrupt enablement state when power gating is disabled. 9632 * 9633 * So we need to update the IRQ control state in hardware 9634 * whenever the pipe turns on (since it could be previously 9635 * power gated) or off (since some pipes can't be power gated 9636 * on some ASICs). 9637 */ 9638 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) 9639 dm_update_pflip_irq_state(drm_to_adev(dev), 9640 acrtc_attach); 9641 9642 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 9643 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && 9644 !acrtc_state->stream->link->psr_settings.psr_feature_enabled) 9645 amdgpu_dm_link_setup_psr(acrtc_state->stream); 9646 9647 /* Decrement skip count when PSR is enabled and we're doing fast updates. */ 9648 if (acrtc_state->update_type == UPDATE_TYPE_FAST && 9649 acrtc_state->stream->link->psr_settings.psr_feature_enabled) { 9650 struct amdgpu_dm_connector *aconn = 9651 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 9652 9653 if (aconn->psr_skip_count > 0) 9654 aconn->psr_skip_count--; 9655 9656 /* Allow PSR when skip count is 0. */ 9657 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; 9658 9659 /* 9660 * If sink supports PSR SU, there is no need to rely on 9661 * a vblank event disable request to enable PSR. PSR SU 9662 * can be enabled immediately once OS demonstrates an 9663 * adequate number of fast atomic commits to notify KMD 9664 * of update events. See `vblank_control_worker()`. 9665 */ 9666 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 9667 acrtc_attach->dm_irq_params.allow_psr_entry && 9668 !acrtc_state->stream->link->psr_settings.psr_allow_active) 9669 amdgpu_dm_psr_enable(acrtc_state->stream); 9670 } else { 9671 acrtc_attach->dm_irq_params.allow_psr_entry = false; 9672 } 9673 9674 mutex_unlock(&dm->dc_lock); 9675 } 9676 9677 /* 9678 * Update cursor state *after* programming all the planes. 9679 * This avoids redundant programming in the case where we're going 9680 * to be disabling a single plane - those pipes are being disabled. 9681 */ 9682 if (acrtc_state->active_planes) 9683 amdgpu_dm_commit_cursors(state); 9684 9685 cleanup: 9686 kfree(bundle); 9687 } 9688 9689 static void amdgpu_dm_commit_audio(struct drm_device *dev, 9690 struct drm_atomic_state *state) 9691 { 9692 struct amdgpu_device *adev = drm_to_adev(dev); 9693 struct amdgpu_dm_connector *aconnector; 9694 struct drm_connector *connector; 9695 struct drm_connector_state *old_con_state, *new_con_state; 9696 struct drm_crtc_state *new_crtc_state; 9697 struct dm_crtc_state *new_dm_crtc_state; 9698 const struct dc_stream_status *status; 9699 int i, inst; 9700 9701 /* Notify device removals. */ 9702 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9703 if (old_con_state->crtc != new_con_state->crtc) { 9704 /* CRTC changes require notification. */ 9705 goto notify; 9706 } 9707 9708 if (!new_con_state->crtc) 9709 continue; 9710 9711 new_crtc_state = drm_atomic_get_new_crtc_state( 9712 state, new_con_state->crtc); 9713 9714 if (!new_crtc_state) 9715 continue; 9716 9717 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9718 continue; 9719 9720 notify: 9721 aconnector = to_amdgpu_dm_connector(connector); 9722 9723 mutex_lock(&adev->dm.audio_lock); 9724 inst = aconnector->audio_inst; 9725 aconnector->audio_inst = -1; 9726 mutex_unlock(&adev->dm.audio_lock); 9727 9728 amdgpu_dm_audio_eld_notify(adev, inst); 9729 } 9730 9731 /* Notify audio device additions. */ 9732 for_each_new_connector_in_state(state, connector, new_con_state, i) { 9733 if (!new_con_state->crtc) 9734 continue; 9735 9736 new_crtc_state = drm_atomic_get_new_crtc_state( 9737 state, new_con_state->crtc); 9738 9739 if (!new_crtc_state) 9740 continue; 9741 9742 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9743 continue; 9744 9745 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 9746 if (!new_dm_crtc_state->stream) 9747 continue; 9748 9749 status = dc_stream_get_status(new_dm_crtc_state->stream); 9750 if (!status) 9751 continue; 9752 9753 aconnector = to_amdgpu_dm_connector(connector); 9754 9755 mutex_lock(&adev->dm.audio_lock); 9756 inst = status->audio_inst; 9757 aconnector->audio_inst = inst; 9758 mutex_unlock(&adev->dm.audio_lock); 9759 9760 amdgpu_dm_audio_eld_notify(adev, inst); 9761 } 9762 } 9763 9764 /* 9765 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 9766 * @crtc_state: the DRM CRTC state 9767 * @stream_state: the DC stream state. 9768 * 9769 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 9770 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 9771 */ 9772 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 9773 struct dc_stream_state *stream_state) 9774 { 9775 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 9776 } 9777 9778 /** 9779 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 9780 * @state: The atomic state to commit 9781 * 9782 * This will tell DC to commit the constructed DC state from atomic_check, 9783 * programming the hardware. Any failures here implies a hardware failure, since 9784 * atomic check should have filtered anything non-kosher. 9785 */ 9786 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 9787 { 9788 struct drm_device *dev = state->dev; 9789 struct amdgpu_device *adev = drm_to_adev(dev); 9790 struct amdgpu_display_manager *dm = &adev->dm; 9791 struct dm_atomic_state *dm_state; 9792 struct dc_state *dc_state = NULL, *dc_state_temp = NULL; 9793 uint32_t i, j; 9794 struct drm_crtc *crtc; 9795 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 9796 unsigned long flags; 9797 bool wait_for_vblank = true; 9798 struct drm_connector *connector; 9799 struct drm_connector_state *old_con_state, *new_con_state; 9800 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9801 int crtc_disable_count = 0; 9802 bool mode_set_reset_required = false; 9803 9804 trace_amdgpu_dm_atomic_commit_tail_begin(state); 9805 9806 drm_atomic_helper_update_legacy_modeset_state(dev, state); 9807 9808 dm_state = dm_atomic_get_new_state(state); 9809 if (dm_state && dm_state->context) { 9810 dc_state = dm_state->context; 9811 } else { 9812 /* No state changes, retain current state. */ 9813 dc_state_temp = dc_create_state(dm->dc); 9814 ASSERT(dc_state_temp); 9815 dc_state = dc_state_temp; 9816 dc_resource_state_copy_construct_current(dm->dc, dc_state); 9817 } 9818 9819 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state, 9820 new_crtc_state, i) { 9821 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9822 9823 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9824 9825 if (old_crtc_state->active && 9826 (!new_crtc_state->active || 9827 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 9828 manage_dm_interrupts(adev, acrtc, false); 9829 dc_stream_release(dm_old_crtc_state->stream); 9830 } 9831 } 9832 9833 drm_atomic_helper_calc_timestamping_constants(state); 9834 9835 /* update changed items */ 9836 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9837 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9838 9839 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9840 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9841 9842 drm_dbg_state(state->dev, 9843 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " 9844 "planes_changed:%d, mode_changed:%d,active_changed:%d," 9845 "connectors_changed:%d\n", 9846 acrtc->crtc_id, 9847 new_crtc_state->enable, 9848 new_crtc_state->active, 9849 new_crtc_state->planes_changed, 9850 new_crtc_state->mode_changed, 9851 new_crtc_state->active_changed, 9852 new_crtc_state->connectors_changed); 9853 9854 /* Disable cursor if disabling crtc */ 9855 if (old_crtc_state->active && !new_crtc_state->active) { 9856 struct dc_cursor_position position; 9857 9858 memset(&position, 0, sizeof(position)); 9859 mutex_lock(&dm->dc_lock); 9860 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position); 9861 mutex_unlock(&dm->dc_lock); 9862 } 9863 9864 /* Copy all transient state flags into dc state */ 9865 if (dm_new_crtc_state->stream) { 9866 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 9867 dm_new_crtc_state->stream); 9868 } 9869 9870 /* handles headless hotplug case, updating new_state and 9871 * aconnector as needed 9872 */ 9873 9874 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 9875 9876 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); 9877 9878 if (!dm_new_crtc_state->stream) { 9879 /* 9880 * this could happen because of issues with 9881 * userspace notifications delivery. 9882 * In this case userspace tries to set mode on 9883 * display which is disconnected in fact. 9884 * dc_sink is NULL in this case on aconnector. 9885 * We expect reset mode will come soon. 9886 * 9887 * This can also happen when unplug is done 9888 * during resume sequence ended 9889 * 9890 * In this case, we want to pretend we still 9891 * have a sink to keep the pipe running so that 9892 * hw state is consistent with the sw state 9893 */ 9894 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 9895 __func__, acrtc->base.base.id); 9896 continue; 9897 } 9898 9899 if (dm_old_crtc_state->stream) 9900 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9901 9902 pm_runtime_get_noresume(dev->dev); 9903 9904 acrtc->enabled = true; 9905 acrtc->hw_mode = new_crtc_state->mode; 9906 crtc->hwmode = new_crtc_state->mode; 9907 mode_set_reset_required = true; 9908 } else if (modereset_required(new_crtc_state)) { 9909 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); 9910 /* i.e. reset mode */ 9911 if (dm_old_crtc_state->stream) 9912 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9913 9914 mode_set_reset_required = true; 9915 } 9916 } /* for_each_crtc_in_state() */ 9917 9918 if (dc_state) { 9919 /* if there mode set or reset, disable eDP PSR */ 9920 if (mode_set_reset_required) { 9921 if (dm->vblank_control_workqueue) 9922 flush_workqueue(dm->vblank_control_workqueue); 9923 9924 amdgpu_dm_psr_disable_all(dm); 9925 } 9926 9927 dm_enable_per_frame_crtc_master_sync(dc_state); 9928 mutex_lock(&dm->dc_lock); 9929 WARN_ON(!dc_commit_state(dm->dc, dc_state)); 9930 9931 /* Allow idle optimization when vblank count is 0 for display off */ 9932 if (dm->active_vblank_irq_count == 0) 9933 dc_allow_idle_optimizations(dm->dc, true); 9934 mutex_unlock(&dm->dc_lock); 9935 } 9936 9937 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 9938 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9939 9940 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9941 9942 if (dm_new_crtc_state->stream != NULL) { 9943 const struct dc_stream_status *status = 9944 dc_stream_get_status(dm_new_crtc_state->stream); 9945 9946 if (!status) 9947 status = dc_stream_get_status_from_state(dc_state, 9948 dm_new_crtc_state->stream); 9949 if (!status) 9950 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); 9951 else 9952 acrtc->otg_inst = status->primary_otg_inst; 9953 } 9954 } 9955 #ifdef CONFIG_DRM_AMD_DC_HDCP 9956 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9957 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9958 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9959 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9960 9961 new_crtc_state = NULL; 9962 9963 if (acrtc) 9964 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9965 9966 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9967 9968 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && 9969 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 9970 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 9971 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 9972 dm_new_con_state->update_hdcp = true; 9973 continue; 9974 } 9975 9976 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) 9977 hdcp_update_display( 9978 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, 9979 new_con_state->hdcp_content_type, 9980 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); 9981 } 9982 #endif 9983 9984 /* Handle connector state changes */ 9985 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9986 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9987 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 9988 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9989 struct dc_surface_update dummy_updates[MAX_SURFACES]; 9990 struct dc_stream_update stream_update; 9991 struct dc_info_packet hdr_packet; 9992 struct dc_stream_status *status = NULL; 9993 bool abm_changed, hdr_changed, scaling_changed; 9994 9995 memset(&dummy_updates, 0, sizeof(dummy_updates)); 9996 memset(&stream_update, 0, sizeof(stream_update)); 9997 9998 if (acrtc) { 9999 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 10000 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 10001 } 10002 10003 /* Skip any modesets/resets */ 10004 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 10005 continue; 10006 10007 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10008 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10009 10010 scaling_changed = is_scaling_state_different(dm_new_con_state, 10011 dm_old_con_state); 10012 10013 abm_changed = dm_new_crtc_state->abm_level != 10014 dm_old_crtc_state->abm_level; 10015 10016 hdr_changed = 10017 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); 10018 10019 if (!scaling_changed && !abm_changed && !hdr_changed) 10020 continue; 10021 10022 stream_update.stream = dm_new_crtc_state->stream; 10023 if (scaling_changed) { 10024 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 10025 dm_new_con_state, dm_new_crtc_state->stream); 10026 10027 stream_update.src = dm_new_crtc_state->stream->src; 10028 stream_update.dst = dm_new_crtc_state->stream->dst; 10029 } 10030 10031 if (abm_changed) { 10032 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 10033 10034 stream_update.abm_level = &dm_new_crtc_state->abm_level; 10035 } 10036 10037 if (hdr_changed) { 10038 fill_hdr_info_packet(new_con_state, &hdr_packet); 10039 stream_update.hdr_static_metadata = &hdr_packet; 10040 } 10041 10042 status = dc_stream_get_status(dm_new_crtc_state->stream); 10043 10044 if (WARN_ON(!status)) 10045 continue; 10046 10047 WARN_ON(!status->plane_count); 10048 10049 /* 10050 * TODO: DC refuses to perform stream updates without a dc_surface_update. 10051 * Here we create an empty update on each plane. 10052 * To fix this, DC should permit updating only stream properties. 10053 */ 10054 for (j = 0; j < status->plane_count; j++) 10055 dummy_updates[j].surface = status->plane_states[0]; 10056 10057 10058 mutex_lock(&dm->dc_lock); 10059 dc_commit_updates_for_stream(dm->dc, 10060 dummy_updates, 10061 status->plane_count, 10062 dm_new_crtc_state->stream, 10063 &stream_update, 10064 dc_state); 10065 mutex_unlock(&dm->dc_lock); 10066 } 10067 10068 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 10069 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 10070 new_crtc_state, i) { 10071 if (old_crtc_state->active && !new_crtc_state->active) 10072 crtc_disable_count++; 10073 10074 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10075 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10076 10077 /* For freesync config update on crtc state and params for irq */ 10078 update_stream_irq_parameters(dm, dm_new_crtc_state); 10079 10080 /* Handle vrr on->off / off->on transitions */ 10081 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, 10082 dm_new_crtc_state); 10083 } 10084 10085 /** 10086 * Enable interrupts for CRTCs that are newly enabled or went through 10087 * a modeset. It was intentionally deferred until after the front end 10088 * state was modified to wait until the OTG was on and so the IRQ 10089 * handlers didn't access stale or invalid state. 10090 */ 10091 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10092 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 10093 #ifdef CONFIG_DEBUG_FS 10094 bool configure_crc = false; 10095 enum amdgpu_dm_pipe_crc_source cur_crc_src; 10096 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 10097 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk; 10098 #endif 10099 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10100 cur_crc_src = acrtc->dm_irq_params.crc_src; 10101 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10102 #endif 10103 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10104 10105 if (new_crtc_state->active && 10106 (!old_crtc_state->active || 10107 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 10108 dc_stream_retain(dm_new_crtc_state->stream); 10109 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; 10110 manage_dm_interrupts(adev, acrtc, true); 10111 10112 #ifdef CONFIG_DEBUG_FS 10113 /** 10114 * Frontend may have changed so reapply the CRC capture 10115 * settings for the stream. 10116 */ 10117 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10118 10119 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { 10120 configure_crc = true; 10121 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 10122 if (amdgpu_dm_crc_window_is_activated(crtc)) { 10123 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10124 acrtc->dm_irq_params.crc_window.update_win = true; 10125 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2; 10126 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); 10127 crc_rd_wrk->crtc = crtc; 10128 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); 10129 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10130 } 10131 #endif 10132 } 10133 10134 if (configure_crc) 10135 if (amdgpu_dm_crtc_configure_crc_source( 10136 crtc, dm_new_crtc_state, cur_crc_src)) 10137 DRM_DEBUG_DRIVER("Failed to configure crc source"); 10138 #endif 10139 } 10140 } 10141 10142 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 10143 if (new_crtc_state->async_flip) 10144 wait_for_vblank = false; 10145 10146 /* update planes when needed per crtc*/ 10147 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 10148 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10149 10150 if (dm_new_crtc_state->stream) 10151 amdgpu_dm_commit_planes(state, dc_state, dev, 10152 dm, crtc, wait_for_vblank); 10153 } 10154 10155 /* Update audio instances for each connector. */ 10156 amdgpu_dm_commit_audio(dev, state); 10157 10158 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ 10159 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 10160 /* restore the backlight level */ 10161 for (i = 0; i < dm->num_of_edps; i++) { 10162 if (dm->backlight_dev[i] && 10163 (dm->actual_brightness[i] != dm->brightness[i])) 10164 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 10165 } 10166 #endif 10167 /* 10168 * send vblank event on all events not handled in flip and 10169 * mark consumed event for drm_atomic_helper_commit_hw_done 10170 */ 10171 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 10172 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10173 10174 if (new_crtc_state->event) 10175 drm_send_event_locked(dev, &new_crtc_state->event->base); 10176 10177 new_crtc_state->event = NULL; 10178 } 10179 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 10180 10181 /* Signal HW programming completion */ 10182 drm_atomic_helper_commit_hw_done(state); 10183 10184 if (wait_for_vblank) 10185 drm_atomic_helper_wait_for_flip_done(dev, state); 10186 10187 drm_atomic_helper_cleanup_planes(dev, state); 10188 10189 /* return the stolen vga memory back to VRAM */ 10190 if (!adev->mman.keep_stolen_vga_memory) 10191 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 10192 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 10193 10194 /* 10195 * Finally, drop a runtime PM reference for each newly disabled CRTC, 10196 * so we can put the GPU into runtime suspend if we're not driving any 10197 * displays anymore 10198 */ 10199 for (i = 0; i < crtc_disable_count; i++) 10200 pm_runtime_put_autosuspend(dev->dev); 10201 pm_runtime_mark_last_busy(dev->dev); 10202 10203 if (dc_state_temp) 10204 dc_release_state(dc_state_temp); 10205 } 10206 10207 10208 static int dm_force_atomic_commit(struct drm_connector *connector) 10209 { 10210 int ret = 0; 10211 struct drm_device *ddev = connector->dev; 10212 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 10213 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 10214 struct drm_plane *plane = disconnected_acrtc->base.primary; 10215 struct drm_connector_state *conn_state; 10216 struct drm_crtc_state *crtc_state; 10217 struct drm_plane_state *plane_state; 10218 10219 if (!state) 10220 return -ENOMEM; 10221 10222 state->acquire_ctx = ddev->mode_config.acquire_ctx; 10223 10224 /* Construct an atomic state to restore previous display setting */ 10225 10226 /* 10227 * Attach connectors to drm_atomic_state 10228 */ 10229 conn_state = drm_atomic_get_connector_state(state, connector); 10230 10231 ret = PTR_ERR_OR_ZERO(conn_state); 10232 if (ret) 10233 goto out; 10234 10235 /* Attach crtc to drm_atomic_state*/ 10236 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 10237 10238 ret = PTR_ERR_OR_ZERO(crtc_state); 10239 if (ret) 10240 goto out; 10241 10242 /* force a restore */ 10243 crtc_state->mode_changed = true; 10244 10245 /* Attach plane to drm_atomic_state */ 10246 plane_state = drm_atomic_get_plane_state(state, plane); 10247 10248 ret = PTR_ERR_OR_ZERO(plane_state); 10249 if (ret) 10250 goto out; 10251 10252 /* Call commit internally with the state we just constructed */ 10253 ret = drm_atomic_commit(state); 10254 10255 out: 10256 drm_atomic_state_put(state); 10257 if (ret) 10258 DRM_ERROR("Restoring old state failed with %i\n", ret); 10259 10260 return ret; 10261 } 10262 10263 /* 10264 * This function handles all cases when set mode does not come upon hotplug. 10265 * This includes when a display is unplugged then plugged back into the 10266 * same port and when running without usermode desktop manager supprot 10267 */ 10268 void dm_restore_drm_connector_state(struct drm_device *dev, 10269 struct drm_connector *connector) 10270 { 10271 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 10272 struct amdgpu_crtc *disconnected_acrtc; 10273 struct dm_crtc_state *acrtc_state; 10274 10275 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 10276 return; 10277 10278 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 10279 if (!disconnected_acrtc) 10280 return; 10281 10282 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 10283 if (!acrtc_state->stream) 10284 return; 10285 10286 /* 10287 * If the previous sink is not released and different from the current, 10288 * we deduce we are in a state where we can not rely on usermode call 10289 * to turn on the display, so we do it here 10290 */ 10291 if (acrtc_state->stream->sink != aconnector->dc_sink) 10292 dm_force_atomic_commit(&aconnector->base); 10293 } 10294 10295 /* 10296 * Grabs all modesetting locks to serialize against any blocking commits, 10297 * Waits for completion of all non blocking commits. 10298 */ 10299 static int do_aquire_global_lock(struct drm_device *dev, 10300 struct drm_atomic_state *state) 10301 { 10302 struct drm_crtc *crtc; 10303 struct drm_crtc_commit *commit; 10304 long ret; 10305 10306 /* 10307 * Adding all modeset locks to aquire_ctx will 10308 * ensure that when the framework release it the 10309 * extra locks we are locking here will get released to 10310 */ 10311 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 10312 if (ret) 10313 return ret; 10314 10315 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 10316 spin_lock(&crtc->commit_lock); 10317 commit = list_first_entry_or_null(&crtc->commit_list, 10318 struct drm_crtc_commit, commit_entry); 10319 if (commit) 10320 drm_crtc_commit_get(commit); 10321 spin_unlock(&crtc->commit_lock); 10322 10323 if (!commit) 10324 continue; 10325 10326 /* 10327 * Make sure all pending HW programming completed and 10328 * page flips done 10329 */ 10330 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 10331 10332 if (ret > 0) 10333 ret = wait_for_completion_interruptible_timeout( 10334 &commit->flip_done, 10*HZ); 10335 10336 if (ret == 0) 10337 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " 10338 "timed out\n", crtc->base.id, crtc->name); 10339 10340 drm_crtc_commit_put(commit); 10341 } 10342 10343 return ret < 0 ? ret : 0; 10344 } 10345 10346 static void get_freesync_config_for_crtc( 10347 struct dm_crtc_state *new_crtc_state, 10348 struct dm_connector_state *new_con_state) 10349 { 10350 struct mod_freesync_config config = {0}; 10351 struct amdgpu_dm_connector *aconnector = 10352 to_amdgpu_dm_connector(new_con_state->base.connector); 10353 struct drm_display_mode *mode = &new_crtc_state->base.mode; 10354 int vrefresh = drm_mode_vrefresh(mode); 10355 bool fs_vid_mode = false; 10356 10357 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 10358 vrefresh >= aconnector->min_vfreq && 10359 vrefresh <= aconnector->max_vfreq; 10360 10361 if (new_crtc_state->vrr_supported) { 10362 new_crtc_state->stream->ignore_msa_timing_param = true; 10363 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 10364 10365 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; 10366 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; 10367 config.vsif_supported = true; 10368 config.btr = true; 10369 10370 if (fs_vid_mode) { 10371 config.state = VRR_STATE_ACTIVE_FIXED; 10372 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; 10373 goto out; 10374 } else if (new_crtc_state->base.vrr_enabled) { 10375 config.state = VRR_STATE_ACTIVE_VARIABLE; 10376 } else { 10377 config.state = VRR_STATE_INACTIVE; 10378 } 10379 } 10380 out: 10381 new_crtc_state->freesync_config = config; 10382 } 10383 10384 static void reset_freesync_config_for_crtc( 10385 struct dm_crtc_state *new_crtc_state) 10386 { 10387 new_crtc_state->vrr_supported = false; 10388 10389 memset(&new_crtc_state->vrr_infopacket, 0, 10390 sizeof(new_crtc_state->vrr_infopacket)); 10391 } 10392 10393 static bool 10394 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 10395 struct drm_crtc_state *new_crtc_state) 10396 { 10397 const struct drm_display_mode *old_mode, *new_mode; 10398 10399 if (!old_crtc_state || !new_crtc_state) 10400 return false; 10401 10402 old_mode = &old_crtc_state->mode; 10403 new_mode = &new_crtc_state->mode; 10404 10405 if (old_mode->clock == new_mode->clock && 10406 old_mode->hdisplay == new_mode->hdisplay && 10407 old_mode->vdisplay == new_mode->vdisplay && 10408 old_mode->htotal == new_mode->htotal && 10409 old_mode->vtotal != new_mode->vtotal && 10410 old_mode->hsync_start == new_mode->hsync_start && 10411 old_mode->vsync_start != new_mode->vsync_start && 10412 old_mode->hsync_end == new_mode->hsync_end && 10413 old_mode->vsync_end != new_mode->vsync_end && 10414 old_mode->hskew == new_mode->hskew && 10415 old_mode->vscan == new_mode->vscan && 10416 (old_mode->vsync_end - old_mode->vsync_start) == 10417 (new_mode->vsync_end - new_mode->vsync_start)) 10418 return true; 10419 10420 return false; 10421 } 10422 10423 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { 10424 uint64_t num, den, res; 10425 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; 10426 10427 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; 10428 10429 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; 10430 den = (unsigned long long)new_crtc_state->mode.htotal * 10431 (unsigned long long)new_crtc_state->mode.vtotal; 10432 10433 res = div_u64(num, den); 10434 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; 10435 } 10436 10437 static int dm_update_crtc_state(struct amdgpu_display_manager *dm, 10438 struct drm_atomic_state *state, 10439 struct drm_crtc *crtc, 10440 struct drm_crtc_state *old_crtc_state, 10441 struct drm_crtc_state *new_crtc_state, 10442 bool enable, 10443 bool *lock_and_validation_needed) 10444 { 10445 struct dm_atomic_state *dm_state = NULL; 10446 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 10447 struct dc_stream_state *new_stream; 10448 int ret = 0; 10449 10450 /* 10451 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 10452 * update changed items 10453 */ 10454 struct amdgpu_crtc *acrtc = NULL; 10455 struct amdgpu_dm_connector *aconnector = NULL; 10456 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 10457 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 10458 10459 new_stream = NULL; 10460 10461 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10462 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10463 acrtc = to_amdgpu_crtc(crtc); 10464 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 10465 10466 /* TODO This hack should go away */ 10467 if (aconnector && enable) { 10468 /* Make sure fake sink is created in plug-in scenario */ 10469 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 10470 &aconnector->base); 10471 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 10472 &aconnector->base); 10473 10474 if (IS_ERR(drm_new_conn_state)) { 10475 ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 10476 goto fail; 10477 } 10478 10479 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 10480 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 10481 10482 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10483 goto skip_modeset; 10484 10485 new_stream = create_validate_stream_for_sink(aconnector, 10486 &new_crtc_state->mode, 10487 dm_new_conn_state, 10488 dm_old_crtc_state->stream); 10489 10490 /* 10491 * we can have no stream on ACTION_SET if a display 10492 * was disconnected during S3, in this case it is not an 10493 * error, the OS will be updated after detection, and 10494 * will do the right thing on next atomic commit 10495 */ 10496 10497 if (!new_stream) { 10498 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 10499 __func__, acrtc->base.base.id); 10500 ret = -ENOMEM; 10501 goto fail; 10502 } 10503 10504 /* 10505 * TODO: Check VSDB bits to decide whether this should 10506 * be enabled or not. 10507 */ 10508 new_stream->triggered_crtc_reset.enabled = 10509 dm->force_timing_sync; 10510 10511 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 10512 10513 ret = fill_hdr_info_packet(drm_new_conn_state, 10514 &new_stream->hdr_static_metadata); 10515 if (ret) 10516 goto fail; 10517 10518 /* 10519 * If we already removed the old stream from the context 10520 * (and set the new stream to NULL) then we can't reuse 10521 * the old stream even if the stream and scaling are unchanged. 10522 * We'll hit the BUG_ON and black screen. 10523 * 10524 * TODO: Refactor this function to allow this check to work 10525 * in all conditions. 10526 */ 10527 if (dm_new_crtc_state->stream && 10528 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) 10529 goto skip_modeset; 10530 10531 if (dm_new_crtc_state->stream && 10532 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 10533 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 10534 new_crtc_state->mode_changed = false; 10535 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", 10536 new_crtc_state->mode_changed); 10537 } 10538 } 10539 10540 /* mode_changed flag may get updated above, need to check again */ 10541 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10542 goto skip_modeset; 10543 10544 drm_dbg_state(state->dev, 10545 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " 10546 "planes_changed:%d, mode_changed:%d,active_changed:%d," 10547 "connectors_changed:%d\n", 10548 acrtc->crtc_id, 10549 new_crtc_state->enable, 10550 new_crtc_state->active, 10551 new_crtc_state->planes_changed, 10552 new_crtc_state->mode_changed, 10553 new_crtc_state->active_changed, 10554 new_crtc_state->connectors_changed); 10555 10556 /* Remove stream for any changed/disabled CRTC */ 10557 if (!enable) { 10558 10559 if (!dm_old_crtc_state->stream) 10560 goto skip_modeset; 10561 10562 if (dm_new_crtc_state->stream && 10563 is_timing_unchanged_for_freesync(new_crtc_state, 10564 old_crtc_state)) { 10565 new_crtc_state->mode_changed = false; 10566 DRM_DEBUG_DRIVER( 10567 "Mode change not required for front porch change, " 10568 "setting mode_changed to %d", 10569 new_crtc_state->mode_changed); 10570 10571 set_freesync_fixed_config(dm_new_crtc_state); 10572 10573 goto skip_modeset; 10574 } else if (aconnector && 10575 is_freesync_video_mode(&new_crtc_state->mode, 10576 aconnector)) { 10577 struct drm_display_mode *high_mode; 10578 10579 high_mode = get_highest_refresh_rate_mode(aconnector, false); 10580 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) { 10581 set_freesync_fixed_config(dm_new_crtc_state); 10582 } 10583 } 10584 10585 ret = dm_atomic_get_state(state, &dm_state); 10586 if (ret) 10587 goto fail; 10588 10589 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 10590 crtc->base.id); 10591 10592 /* i.e. reset mode */ 10593 if (dc_remove_stream_from_ctx( 10594 dm->dc, 10595 dm_state->context, 10596 dm_old_crtc_state->stream) != DC_OK) { 10597 ret = -EINVAL; 10598 goto fail; 10599 } 10600 10601 dc_stream_release(dm_old_crtc_state->stream); 10602 dm_new_crtc_state->stream = NULL; 10603 10604 reset_freesync_config_for_crtc(dm_new_crtc_state); 10605 10606 *lock_and_validation_needed = true; 10607 10608 } else {/* Add stream for any updated/enabled CRTC */ 10609 /* 10610 * Quick fix to prevent NULL pointer on new_stream when 10611 * added MST connectors not found in existing crtc_state in the chained mode 10612 * TODO: need to dig out the root cause of that 10613 */ 10614 if (!aconnector) 10615 goto skip_modeset; 10616 10617 if (modereset_required(new_crtc_state)) 10618 goto skip_modeset; 10619 10620 if (modeset_required(new_crtc_state, new_stream, 10621 dm_old_crtc_state->stream)) { 10622 10623 WARN_ON(dm_new_crtc_state->stream); 10624 10625 ret = dm_atomic_get_state(state, &dm_state); 10626 if (ret) 10627 goto fail; 10628 10629 dm_new_crtc_state->stream = new_stream; 10630 10631 dc_stream_retain(new_stream); 10632 10633 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", 10634 crtc->base.id); 10635 10636 if (dc_add_stream_to_ctx( 10637 dm->dc, 10638 dm_state->context, 10639 dm_new_crtc_state->stream) != DC_OK) { 10640 ret = -EINVAL; 10641 goto fail; 10642 } 10643 10644 *lock_and_validation_needed = true; 10645 } 10646 } 10647 10648 skip_modeset: 10649 /* Release extra reference */ 10650 if (new_stream) 10651 dc_stream_release(new_stream); 10652 10653 /* 10654 * We want to do dc stream updates that do not require a 10655 * full modeset below. 10656 */ 10657 if (!(enable && aconnector && new_crtc_state->active)) 10658 return 0; 10659 /* 10660 * Given above conditions, the dc state cannot be NULL because: 10661 * 1. We're in the process of enabling CRTCs (just been added 10662 * to the dc context, or already is on the context) 10663 * 2. Has a valid connector attached, and 10664 * 3. Is currently active and enabled. 10665 * => The dc stream state currently exists. 10666 */ 10667 BUG_ON(dm_new_crtc_state->stream == NULL); 10668 10669 /* Scaling or underscan settings */ 10670 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || 10671 drm_atomic_crtc_needs_modeset(new_crtc_state)) 10672 update_stream_scaling_settings( 10673 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 10674 10675 /* ABM settings */ 10676 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 10677 10678 /* 10679 * Color management settings. We also update color properties 10680 * when a modeset is needed, to ensure it gets reprogrammed. 10681 */ 10682 if (dm_new_crtc_state->base.color_mgmt_changed || 10683 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 10684 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); 10685 if (ret) 10686 goto fail; 10687 } 10688 10689 /* Update Freesync settings. */ 10690 get_freesync_config_for_crtc(dm_new_crtc_state, 10691 dm_new_conn_state); 10692 10693 return ret; 10694 10695 fail: 10696 if (new_stream) 10697 dc_stream_release(new_stream); 10698 return ret; 10699 } 10700 10701 static bool should_reset_plane(struct drm_atomic_state *state, 10702 struct drm_plane *plane, 10703 struct drm_plane_state *old_plane_state, 10704 struct drm_plane_state *new_plane_state) 10705 { 10706 struct drm_plane *other; 10707 struct drm_plane_state *old_other_state, *new_other_state; 10708 struct drm_crtc_state *new_crtc_state; 10709 int i; 10710 10711 /* 10712 * TODO: Remove this hack once the checks below are sufficient 10713 * enough to determine when we need to reset all the planes on 10714 * the stream. 10715 */ 10716 if (state->allow_modeset) 10717 return true; 10718 10719 /* Exit early if we know that we're adding or removing the plane. */ 10720 if (old_plane_state->crtc != new_plane_state->crtc) 10721 return true; 10722 10723 /* old crtc == new_crtc == NULL, plane not in context. */ 10724 if (!new_plane_state->crtc) 10725 return false; 10726 10727 new_crtc_state = 10728 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 10729 10730 if (!new_crtc_state) 10731 return true; 10732 10733 /* CRTC Degamma changes currently require us to recreate planes. */ 10734 if (new_crtc_state->color_mgmt_changed) 10735 return true; 10736 10737 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 10738 return true; 10739 10740 /* 10741 * If there are any new primary or overlay planes being added or 10742 * removed then the z-order can potentially change. To ensure 10743 * correct z-order and pipe acquisition the current DC architecture 10744 * requires us to remove and recreate all existing planes. 10745 * 10746 * TODO: Come up with a more elegant solution for this. 10747 */ 10748 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { 10749 struct amdgpu_framebuffer *old_afb, *new_afb; 10750 if (other->type == DRM_PLANE_TYPE_CURSOR) 10751 continue; 10752 10753 if (old_other_state->crtc != new_plane_state->crtc && 10754 new_other_state->crtc != new_plane_state->crtc) 10755 continue; 10756 10757 if (old_other_state->crtc != new_other_state->crtc) 10758 return true; 10759 10760 /* Src/dst size and scaling updates. */ 10761 if (old_other_state->src_w != new_other_state->src_w || 10762 old_other_state->src_h != new_other_state->src_h || 10763 old_other_state->crtc_w != new_other_state->crtc_w || 10764 old_other_state->crtc_h != new_other_state->crtc_h) 10765 return true; 10766 10767 /* Rotation / mirroring updates. */ 10768 if (old_other_state->rotation != new_other_state->rotation) 10769 return true; 10770 10771 /* Blending updates. */ 10772 if (old_other_state->pixel_blend_mode != 10773 new_other_state->pixel_blend_mode) 10774 return true; 10775 10776 /* Alpha updates. */ 10777 if (old_other_state->alpha != new_other_state->alpha) 10778 return true; 10779 10780 /* Colorspace changes. */ 10781 if (old_other_state->color_range != new_other_state->color_range || 10782 old_other_state->color_encoding != new_other_state->color_encoding) 10783 return true; 10784 10785 /* Framebuffer checks fall at the end. */ 10786 if (!old_other_state->fb || !new_other_state->fb) 10787 continue; 10788 10789 /* Pixel format changes can require bandwidth updates. */ 10790 if (old_other_state->fb->format != new_other_state->fb->format) 10791 return true; 10792 10793 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; 10794 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; 10795 10796 /* Tiling and DCC changes also require bandwidth updates. */ 10797 if (old_afb->tiling_flags != new_afb->tiling_flags || 10798 old_afb->base.modifier != new_afb->base.modifier) 10799 return true; 10800 } 10801 10802 return false; 10803 } 10804 10805 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, 10806 struct drm_plane_state *new_plane_state, 10807 struct drm_framebuffer *fb) 10808 { 10809 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); 10810 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 10811 unsigned int pitch; 10812 bool linear; 10813 10814 if (fb->width > new_acrtc->max_cursor_width || 10815 fb->height > new_acrtc->max_cursor_height) { 10816 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n", 10817 new_plane_state->fb->width, 10818 new_plane_state->fb->height); 10819 return -EINVAL; 10820 } 10821 if (new_plane_state->src_w != fb->width << 16 || 10822 new_plane_state->src_h != fb->height << 16) { 10823 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 10824 return -EINVAL; 10825 } 10826 10827 /* Pitch in pixels */ 10828 pitch = fb->pitches[0] / fb->format->cpp[0]; 10829 10830 if (fb->width != pitch) { 10831 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d", 10832 fb->width, pitch); 10833 return -EINVAL; 10834 } 10835 10836 switch (pitch) { 10837 case 64: 10838 case 128: 10839 case 256: 10840 /* FB pitch is supported by cursor plane */ 10841 break; 10842 default: 10843 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch); 10844 return -EINVAL; 10845 } 10846 10847 /* Core DRM takes care of checking FB modifiers, so we only need to 10848 * check tiling flags when the FB doesn't have a modifier. */ 10849 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { 10850 if (adev->family < AMDGPU_FAMILY_AI) { 10851 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && 10852 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && 10853 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; 10854 } else { 10855 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; 10856 } 10857 if (!linear) { 10858 DRM_DEBUG_ATOMIC("Cursor FB not linear"); 10859 return -EINVAL; 10860 } 10861 } 10862 10863 return 0; 10864 } 10865 10866 static int dm_update_plane_state(struct dc *dc, 10867 struct drm_atomic_state *state, 10868 struct drm_plane *plane, 10869 struct drm_plane_state *old_plane_state, 10870 struct drm_plane_state *new_plane_state, 10871 bool enable, 10872 bool *lock_and_validation_needed) 10873 { 10874 10875 struct dm_atomic_state *dm_state = NULL; 10876 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 10877 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10878 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 10879 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 10880 struct amdgpu_crtc *new_acrtc; 10881 bool needs_reset; 10882 int ret = 0; 10883 10884 10885 new_plane_crtc = new_plane_state->crtc; 10886 old_plane_crtc = old_plane_state->crtc; 10887 dm_new_plane_state = to_dm_plane_state(new_plane_state); 10888 dm_old_plane_state = to_dm_plane_state(old_plane_state); 10889 10890 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 10891 if (!enable || !new_plane_crtc || 10892 drm_atomic_plane_disabling(plane->state, new_plane_state)) 10893 return 0; 10894 10895 new_acrtc = to_amdgpu_crtc(new_plane_crtc); 10896 10897 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { 10898 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 10899 return -EINVAL; 10900 } 10901 10902 if (new_plane_state->fb) { 10903 ret = dm_check_cursor_fb(new_acrtc, new_plane_state, 10904 new_plane_state->fb); 10905 if (ret) 10906 return ret; 10907 } 10908 10909 return 0; 10910 } 10911 10912 needs_reset = should_reset_plane(state, plane, old_plane_state, 10913 new_plane_state); 10914 10915 /* Remove any changed/removed planes */ 10916 if (!enable) { 10917 if (!needs_reset) 10918 return 0; 10919 10920 if (!old_plane_crtc) 10921 return 0; 10922 10923 old_crtc_state = drm_atomic_get_old_crtc_state( 10924 state, old_plane_crtc); 10925 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10926 10927 if (!dm_old_crtc_state->stream) 10928 return 0; 10929 10930 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 10931 plane->base.id, old_plane_crtc->base.id); 10932 10933 ret = dm_atomic_get_state(state, &dm_state); 10934 if (ret) 10935 return ret; 10936 10937 if (!dc_remove_plane_from_context( 10938 dc, 10939 dm_old_crtc_state->stream, 10940 dm_old_plane_state->dc_state, 10941 dm_state->context)) { 10942 10943 return -EINVAL; 10944 } 10945 10946 10947 dc_plane_state_release(dm_old_plane_state->dc_state); 10948 dm_new_plane_state->dc_state = NULL; 10949 10950 *lock_and_validation_needed = true; 10951 10952 } else { /* Add new planes */ 10953 struct dc_plane_state *dc_new_plane_state; 10954 10955 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 10956 return 0; 10957 10958 if (!new_plane_crtc) 10959 return 0; 10960 10961 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 10962 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10963 10964 if (!dm_new_crtc_state->stream) 10965 return 0; 10966 10967 if (!needs_reset) 10968 return 0; 10969 10970 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); 10971 if (ret) 10972 return ret; 10973 10974 WARN_ON(dm_new_plane_state->dc_state); 10975 10976 dc_new_plane_state = dc_create_plane_state(dc); 10977 if (!dc_new_plane_state) 10978 return -ENOMEM; 10979 10980 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", 10981 plane->base.id, new_plane_crtc->base.id); 10982 10983 ret = fill_dc_plane_attributes( 10984 drm_to_adev(new_plane_crtc->dev), 10985 dc_new_plane_state, 10986 new_plane_state, 10987 new_crtc_state); 10988 if (ret) { 10989 dc_plane_state_release(dc_new_plane_state); 10990 return ret; 10991 } 10992 10993 ret = dm_atomic_get_state(state, &dm_state); 10994 if (ret) { 10995 dc_plane_state_release(dc_new_plane_state); 10996 return ret; 10997 } 10998 10999 /* 11000 * Any atomic check errors that occur after this will 11001 * not need a release. The plane state will be attached 11002 * to the stream, and therefore part of the atomic 11003 * state. It'll be released when the atomic state is 11004 * cleaned. 11005 */ 11006 if (!dc_add_plane_to_context( 11007 dc, 11008 dm_new_crtc_state->stream, 11009 dc_new_plane_state, 11010 dm_state->context)) { 11011 11012 dc_plane_state_release(dc_new_plane_state); 11013 return -EINVAL; 11014 } 11015 11016 dm_new_plane_state->dc_state = dc_new_plane_state; 11017 11018 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); 11019 11020 /* Tell DC to do a full surface update every time there 11021 * is a plane change. Inefficient, but works for now. 11022 */ 11023 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 11024 11025 *lock_and_validation_needed = true; 11026 } 11027 11028 11029 return ret; 11030 } 11031 11032 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, 11033 int *src_w, int *src_h) 11034 { 11035 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 11036 case DRM_MODE_ROTATE_90: 11037 case DRM_MODE_ROTATE_270: 11038 *src_w = plane_state->src_h >> 16; 11039 *src_h = plane_state->src_w >> 16; 11040 break; 11041 case DRM_MODE_ROTATE_0: 11042 case DRM_MODE_ROTATE_180: 11043 default: 11044 *src_w = plane_state->src_w >> 16; 11045 *src_h = plane_state->src_h >> 16; 11046 break; 11047 } 11048 } 11049 11050 static int dm_check_crtc_cursor(struct drm_atomic_state *state, 11051 struct drm_crtc *crtc, 11052 struct drm_crtc_state *new_crtc_state) 11053 { 11054 struct drm_plane *cursor = crtc->cursor, *underlying; 11055 struct drm_plane_state *new_cursor_state, *new_underlying_state; 11056 int i; 11057 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; 11058 int cursor_src_w, cursor_src_h; 11059 int underlying_src_w, underlying_src_h; 11060 11061 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a 11062 * cursor per pipe but it's going to inherit the scaling and 11063 * positioning from the underlying pipe. Check the cursor plane's 11064 * blending properties match the underlying planes'. */ 11065 11066 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor); 11067 if (!new_cursor_state || !new_cursor_state->fb) { 11068 return 0; 11069 } 11070 11071 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h); 11072 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; 11073 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h; 11074 11075 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { 11076 /* Narrow down to non-cursor planes on the same CRTC as the cursor */ 11077 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor) 11078 continue; 11079 11080 /* Ignore disabled planes */ 11081 if (!new_underlying_state->fb) 11082 continue; 11083 11084 dm_get_oriented_plane_size(new_underlying_state, 11085 &underlying_src_w, &underlying_src_h); 11086 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w; 11087 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h; 11088 11089 if (cursor_scale_w != underlying_scale_w || 11090 cursor_scale_h != underlying_scale_h) { 11091 drm_dbg_atomic(crtc->dev, 11092 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n", 11093 cursor->base.id, cursor->name, underlying->base.id, underlying->name); 11094 return -EINVAL; 11095 } 11096 11097 /* If this plane covers the whole CRTC, no need to check planes underneath */ 11098 if (new_underlying_state->crtc_x <= 0 && 11099 new_underlying_state->crtc_y <= 0 && 11100 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay && 11101 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay) 11102 break; 11103 } 11104 11105 return 0; 11106 } 11107 11108 #if defined(CONFIG_DRM_AMD_DC_DCN) 11109 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) 11110 { 11111 struct drm_connector *connector; 11112 struct drm_connector_state *conn_state, *old_conn_state; 11113 struct amdgpu_dm_connector *aconnector = NULL; 11114 int i; 11115 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { 11116 if (!conn_state->crtc) 11117 conn_state = old_conn_state; 11118 11119 if (conn_state->crtc != crtc) 11120 continue; 11121 11122 aconnector = to_amdgpu_dm_connector(connector); 11123 if (!aconnector->port || !aconnector->mst_port) 11124 aconnector = NULL; 11125 else 11126 break; 11127 } 11128 11129 if (!aconnector) 11130 return 0; 11131 11132 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr); 11133 } 11134 #endif 11135 11136 /** 11137 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 11138 * @dev: The DRM device 11139 * @state: The atomic state to commit 11140 * 11141 * Validate that the given atomic state is programmable by DC into hardware. 11142 * This involves constructing a &struct dc_state reflecting the new hardware 11143 * state we wish to commit, then querying DC to see if it is programmable. It's 11144 * important not to modify the existing DC state. Otherwise, atomic_check 11145 * may unexpectedly commit hardware changes. 11146 * 11147 * When validating the DC state, it's important that the right locks are 11148 * acquired. For full updates case which removes/adds/updates streams on one 11149 * CRTC while flipping on another CRTC, acquiring global lock will guarantee 11150 * that any such full update commit will wait for completion of any outstanding 11151 * flip using DRMs synchronization events. 11152 * 11153 * Note that DM adds the affected connectors for all CRTCs in state, when that 11154 * might not seem necessary. This is because DC stream creation requires the 11155 * DC sink, which is tied to the DRM connector state. Cleaning this up should 11156 * be possible but non-trivial - a possible TODO item. 11157 * 11158 * Return: -Error code if validation failed. 11159 */ 11160 static int amdgpu_dm_atomic_check(struct drm_device *dev, 11161 struct drm_atomic_state *state) 11162 { 11163 struct amdgpu_device *adev = drm_to_adev(dev); 11164 struct dm_atomic_state *dm_state = NULL; 11165 struct dc *dc = adev->dm.dc; 11166 struct drm_connector *connector; 11167 struct drm_connector_state *old_con_state, *new_con_state; 11168 struct drm_crtc *crtc; 11169 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 11170 struct drm_plane *plane; 11171 struct drm_plane_state *old_plane_state, *new_plane_state; 11172 enum dc_status status; 11173 int ret, i; 11174 bool lock_and_validation_needed = false; 11175 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 11176 #if defined(CONFIG_DRM_AMD_DC_DCN) 11177 struct dsc_mst_fairness_vars vars[MAX_PIPES]; 11178 struct drm_dp_mst_topology_state *mst_state; 11179 struct drm_dp_mst_topology_mgr *mgr; 11180 #endif 11181 11182 trace_amdgpu_dm_atomic_check_begin(state); 11183 11184 ret = drm_atomic_helper_check_modeset(dev, state); 11185 if (ret) { 11186 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n"); 11187 goto fail; 11188 } 11189 11190 /* Check connector changes */ 11191 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 11192 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 11193 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 11194 11195 /* Skip connectors that are disabled or part of modeset already. */ 11196 if (!old_con_state->crtc && !new_con_state->crtc) 11197 continue; 11198 11199 if (!new_con_state->crtc) 11200 continue; 11201 11202 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); 11203 if (IS_ERR(new_crtc_state)) { 11204 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n"); 11205 ret = PTR_ERR(new_crtc_state); 11206 goto fail; 11207 } 11208 11209 if (dm_old_con_state->abm_level != 11210 dm_new_con_state->abm_level) 11211 new_crtc_state->connectors_changed = true; 11212 } 11213 11214 #if defined(CONFIG_DRM_AMD_DC_DCN) 11215 if (dc_resource_is_dsc_encoding_supported(dc)) { 11216 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11217 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 11218 ret = add_affected_mst_dsc_crtcs(state, crtc); 11219 if (ret) { 11220 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n"); 11221 goto fail; 11222 } 11223 } 11224 } 11225 if (!pre_validate_dsc(state, &dm_state, vars)) { 11226 ret = -EINVAL; 11227 goto fail; 11228 } 11229 } 11230 #endif 11231 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11232 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 11233 11234 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 11235 !new_crtc_state->color_mgmt_changed && 11236 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && 11237 dm_old_crtc_state->dsc_force_changed == false) 11238 continue; 11239 11240 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); 11241 if (ret) { 11242 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n"); 11243 goto fail; 11244 } 11245 11246 if (!new_crtc_state->enable) 11247 continue; 11248 11249 ret = drm_atomic_add_affected_connectors(state, crtc); 11250 if (ret) { 11251 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n"); 11252 goto fail; 11253 } 11254 11255 ret = drm_atomic_add_affected_planes(state, crtc); 11256 if (ret) { 11257 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n"); 11258 goto fail; 11259 } 11260 11261 if (dm_old_crtc_state->dsc_force_changed) 11262 new_crtc_state->mode_changed = true; 11263 } 11264 11265 /* 11266 * Add all primary and overlay planes on the CRTC to the state 11267 * whenever a plane is enabled to maintain correct z-ordering 11268 * and to enable fast surface updates. 11269 */ 11270 drm_for_each_crtc(crtc, dev) { 11271 bool modified = false; 11272 11273 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 11274 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11275 continue; 11276 11277 if (new_plane_state->crtc == crtc || 11278 old_plane_state->crtc == crtc) { 11279 modified = true; 11280 break; 11281 } 11282 } 11283 11284 if (!modified) 11285 continue; 11286 11287 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 11288 if (plane->type == DRM_PLANE_TYPE_CURSOR) 11289 continue; 11290 11291 new_plane_state = 11292 drm_atomic_get_plane_state(state, plane); 11293 11294 if (IS_ERR(new_plane_state)) { 11295 ret = PTR_ERR(new_plane_state); 11296 DRM_DEBUG_DRIVER("new_plane_state is BAD\n"); 11297 goto fail; 11298 } 11299 } 11300 } 11301 11302 /* Remove exiting planes if they are modified */ 11303 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 11304 ret = dm_update_plane_state(dc, state, plane, 11305 old_plane_state, 11306 new_plane_state, 11307 false, 11308 &lock_and_validation_needed); 11309 if (ret) { 11310 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); 11311 goto fail; 11312 } 11313 } 11314 11315 /* Disable all crtcs which require disable */ 11316 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11317 ret = dm_update_crtc_state(&adev->dm, state, crtc, 11318 old_crtc_state, 11319 new_crtc_state, 11320 false, 11321 &lock_and_validation_needed); 11322 if (ret) { 11323 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n"); 11324 goto fail; 11325 } 11326 } 11327 11328 /* Enable all crtcs which require enable */ 11329 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11330 ret = dm_update_crtc_state(&adev->dm, state, crtc, 11331 old_crtc_state, 11332 new_crtc_state, 11333 true, 11334 &lock_and_validation_needed); 11335 if (ret) { 11336 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n"); 11337 goto fail; 11338 } 11339 } 11340 11341 /* Add new/modified planes */ 11342 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 11343 ret = dm_update_plane_state(dc, state, plane, 11344 old_plane_state, 11345 new_plane_state, 11346 true, 11347 &lock_and_validation_needed); 11348 if (ret) { 11349 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); 11350 goto fail; 11351 } 11352 } 11353 11354 /* Run this here since we want to validate the streams we created */ 11355 ret = drm_atomic_helper_check_planes(dev, state); 11356 if (ret) { 11357 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n"); 11358 goto fail; 11359 } 11360 11361 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11362 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 11363 if (dm_new_crtc_state->mpo_requested) 11364 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc); 11365 } 11366 11367 /* Check cursor planes scaling */ 11368 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11369 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); 11370 if (ret) { 11371 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n"); 11372 goto fail; 11373 } 11374 } 11375 11376 if (state->legacy_cursor_update) { 11377 /* 11378 * This is a fast cursor update coming from the plane update 11379 * helper, check if it can be done asynchronously for better 11380 * performance. 11381 */ 11382 state->async_update = 11383 !drm_atomic_helper_async_check(dev, state); 11384 11385 /* 11386 * Skip the remaining global validation if this is an async 11387 * update. Cursor updates can be done without affecting 11388 * state or bandwidth calcs and this avoids the performance 11389 * penalty of locking the private state object and 11390 * allocating a new dc_state. 11391 */ 11392 if (state->async_update) 11393 return 0; 11394 } 11395 11396 /* Check scaling and underscan changes*/ 11397 /* TODO Removed scaling changes validation due to inability to commit 11398 * new stream into context w\o causing full reset. Need to 11399 * decide how to handle. 11400 */ 11401 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 11402 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 11403 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 11404 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 11405 11406 /* Skip any modesets/resets */ 11407 if (!acrtc || drm_atomic_crtc_needs_modeset( 11408 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 11409 continue; 11410 11411 /* Skip any thing not scale or underscan changes */ 11412 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 11413 continue; 11414 11415 lock_and_validation_needed = true; 11416 } 11417 11418 #if defined(CONFIG_DRM_AMD_DC_DCN) 11419 /* set the slot info for each mst_state based on the link encoding format */ 11420 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 11421 struct amdgpu_dm_connector *aconnector; 11422 struct drm_connector *connector; 11423 struct drm_connector_list_iter iter; 11424 u8 link_coding_cap; 11425 11426 if (!mgr->mst_state ) 11427 continue; 11428 11429 drm_connector_list_iter_begin(dev, &iter); 11430 drm_for_each_connector_iter(connector, &iter) { 11431 int id = connector->index; 11432 11433 if (id == mst_state->mgr->conn_base_id) { 11434 aconnector = to_amdgpu_dm_connector(connector); 11435 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 11436 drm_dp_mst_update_slots(mst_state, link_coding_cap); 11437 11438 break; 11439 } 11440 } 11441 drm_connector_list_iter_end(&iter); 11442 11443 } 11444 #endif 11445 /** 11446 * Streams and planes are reset when there are changes that affect 11447 * bandwidth. Anything that affects bandwidth needs to go through 11448 * DC global validation to ensure that the configuration can be applied 11449 * to hardware. 11450 * 11451 * We have to currently stall out here in atomic_check for outstanding 11452 * commits to finish in this case because our IRQ handlers reference 11453 * DRM state directly - we can end up disabling interrupts too early 11454 * if we don't. 11455 * 11456 * TODO: Remove this stall and drop DM state private objects. 11457 */ 11458 if (lock_and_validation_needed) { 11459 ret = dm_atomic_get_state(state, &dm_state); 11460 if (ret) { 11461 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n"); 11462 goto fail; 11463 } 11464 11465 ret = do_aquire_global_lock(dev, state); 11466 if (ret) { 11467 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n"); 11468 goto fail; 11469 } 11470 11471 #if defined(CONFIG_DRM_AMD_DC_DCN) 11472 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) { 11473 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); 11474 ret = -EINVAL; 11475 goto fail; 11476 } 11477 11478 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 11479 if (ret) { 11480 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n"); 11481 goto fail; 11482 } 11483 #endif 11484 11485 /* 11486 * Perform validation of MST topology in the state: 11487 * We need to perform MST atomic check before calling 11488 * dc_validate_global_state(), or there is a chance 11489 * to get stuck in an infinite loop and hang eventually. 11490 */ 11491 ret = drm_dp_mst_atomic_check(state); 11492 if (ret) { 11493 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n"); 11494 goto fail; 11495 } 11496 status = dc_validate_global_state(dc, dm_state->context, true); 11497 if (status != DC_OK) { 11498 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)", 11499 dc_status_to_str(status), status); 11500 ret = -EINVAL; 11501 goto fail; 11502 } 11503 } else { 11504 /* 11505 * The commit is a fast update. Fast updates shouldn't change 11506 * the DC context, affect global validation, and can have their 11507 * commit work done in parallel with other commits not touching 11508 * the same resource. If we have a new DC context as part of 11509 * the DM atomic state from validation we need to free it and 11510 * retain the existing one instead. 11511 * 11512 * Furthermore, since the DM atomic state only contains the DC 11513 * context and can safely be annulled, we can free the state 11514 * and clear the associated private object now to free 11515 * some memory and avoid a possible use-after-free later. 11516 */ 11517 11518 for (i = 0; i < state->num_private_objs; i++) { 11519 struct drm_private_obj *obj = state->private_objs[i].ptr; 11520 11521 if (obj->funcs == adev->dm.atomic_obj.funcs) { 11522 int j = state->num_private_objs-1; 11523 11524 dm_atomic_destroy_state(obj, 11525 state->private_objs[i].state); 11526 11527 /* If i is not at the end of the array then the 11528 * last element needs to be moved to where i was 11529 * before the array can safely be truncated. 11530 */ 11531 if (i != j) 11532 state->private_objs[i] = 11533 state->private_objs[j]; 11534 11535 state->private_objs[j].ptr = NULL; 11536 state->private_objs[j].state = NULL; 11537 state->private_objs[j].old_state = NULL; 11538 state->private_objs[j].new_state = NULL; 11539 11540 state->num_private_objs = j; 11541 break; 11542 } 11543 } 11544 } 11545 11546 /* Store the overall update type for use later in atomic check. */ 11547 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { 11548 struct dm_crtc_state *dm_new_crtc_state = 11549 to_dm_crtc_state(new_crtc_state); 11550 11551 dm_new_crtc_state->update_type = lock_and_validation_needed ? 11552 UPDATE_TYPE_FULL : 11553 UPDATE_TYPE_FAST; 11554 } 11555 11556 /* Must be success */ 11557 WARN_ON(ret); 11558 11559 trace_amdgpu_dm_atomic_check_finish(state, ret); 11560 11561 return ret; 11562 11563 fail: 11564 if (ret == -EDEADLK) 11565 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n"); 11566 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 11567 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); 11568 else 11569 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret); 11570 11571 trace_amdgpu_dm_atomic_check_finish(state, ret); 11572 11573 return ret; 11574 } 11575 11576 static bool is_dp_capable_without_timing_msa(struct dc *dc, 11577 struct amdgpu_dm_connector *amdgpu_dm_connector) 11578 { 11579 uint8_t dpcd_data; 11580 bool capable = false; 11581 11582 if (amdgpu_dm_connector->dc_link && 11583 dm_helpers_dp_read_dpcd( 11584 NULL, 11585 amdgpu_dm_connector->dc_link, 11586 DP_DOWN_STREAM_PORT_COUNT, 11587 &dpcd_data, 11588 sizeof(dpcd_data))) { 11589 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; 11590 } 11591 11592 return capable; 11593 } 11594 11595 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, 11596 unsigned int offset, 11597 unsigned int total_length, 11598 uint8_t *data, 11599 unsigned int length, 11600 struct amdgpu_hdmi_vsdb_info *vsdb) 11601 { 11602 bool res; 11603 union dmub_rb_cmd cmd; 11604 struct dmub_cmd_send_edid_cea *input; 11605 struct dmub_cmd_edid_cea_output *output; 11606 11607 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) 11608 return false; 11609 11610 memset(&cmd, 0, sizeof(cmd)); 11611 11612 input = &cmd.edid_cea.data.input; 11613 11614 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; 11615 cmd.edid_cea.header.sub_type = 0; 11616 cmd.edid_cea.header.payload_bytes = 11617 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); 11618 input->offset = offset; 11619 input->length = length; 11620 input->cea_total_length = total_length; 11621 memcpy(input->payload, data, length); 11622 11623 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); 11624 if (!res) { 11625 DRM_ERROR("EDID CEA parser failed\n"); 11626 return false; 11627 } 11628 11629 output = &cmd.edid_cea.data.output; 11630 11631 if (output->type == DMUB_CMD__EDID_CEA_ACK) { 11632 if (!output->ack.success) { 11633 DRM_ERROR("EDID CEA ack failed at offset %d\n", 11634 output->ack.offset); 11635 } 11636 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { 11637 if (!output->amd_vsdb.vsdb_found) 11638 return false; 11639 11640 vsdb->freesync_supported = output->amd_vsdb.freesync_supported; 11641 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; 11642 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; 11643 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; 11644 } else { 11645 DRM_WARN("Unknown EDID CEA parser results\n"); 11646 return false; 11647 } 11648 11649 return true; 11650 } 11651 11652 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, 11653 uint8_t *edid_ext, int len, 11654 struct amdgpu_hdmi_vsdb_info *vsdb_info) 11655 { 11656 int i; 11657 11658 /* send extension block to DMCU for parsing */ 11659 for (i = 0; i < len; i += 8) { 11660 bool res; 11661 int offset; 11662 11663 /* send 8 bytes a time */ 11664 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) 11665 return false; 11666 11667 if (i+8 == len) { 11668 /* EDID block sent completed, expect result */ 11669 int version, min_rate, max_rate; 11670 11671 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); 11672 if (res) { 11673 /* amd vsdb found */ 11674 vsdb_info->freesync_supported = 1; 11675 vsdb_info->amd_vsdb_version = version; 11676 vsdb_info->min_refresh_rate_hz = min_rate; 11677 vsdb_info->max_refresh_rate_hz = max_rate; 11678 return true; 11679 } 11680 /* not amd vsdb */ 11681 return false; 11682 } 11683 11684 /* check for ack*/ 11685 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); 11686 if (!res) 11687 return false; 11688 } 11689 11690 return false; 11691 } 11692 11693 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, 11694 uint8_t *edid_ext, int len, 11695 struct amdgpu_hdmi_vsdb_info *vsdb_info) 11696 { 11697 int i; 11698 11699 /* send extension block to DMCU for parsing */ 11700 for (i = 0; i < len; i += 8) { 11701 /* send 8 bytes a time */ 11702 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) 11703 return false; 11704 } 11705 11706 return vsdb_info->freesync_supported; 11707 } 11708 11709 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, 11710 uint8_t *edid_ext, int len, 11711 struct amdgpu_hdmi_vsdb_info *vsdb_info) 11712 { 11713 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 11714 11715 if (adev->dm.dmub_srv) 11716 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); 11717 else 11718 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); 11719 } 11720 11721 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, 11722 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 11723 { 11724 uint8_t *edid_ext = NULL; 11725 int i; 11726 bool valid_vsdb_found = false; 11727 11728 /*----- drm_find_cea_extension() -----*/ 11729 /* No EDID or EDID extensions */ 11730 if (edid == NULL || edid->extensions == 0) 11731 return -ENODEV; 11732 11733 /* Find CEA extension */ 11734 for (i = 0; i < edid->extensions; i++) { 11735 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); 11736 if (edid_ext[0] == CEA_EXT) 11737 break; 11738 } 11739 11740 if (i == edid->extensions) 11741 return -ENODEV; 11742 11743 /*----- cea_db_offsets() -----*/ 11744 if (edid_ext[0] != CEA_EXT) 11745 return -ENODEV; 11746 11747 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); 11748 11749 return valid_vsdb_found ? i : -ENODEV; 11750 } 11751 11752 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 11753 struct edid *edid) 11754 { 11755 int i = 0; 11756 struct detailed_timing *timing; 11757 struct detailed_non_pixel *data; 11758 struct detailed_data_monitor_range *range; 11759 struct amdgpu_dm_connector *amdgpu_dm_connector = 11760 to_amdgpu_dm_connector(connector); 11761 struct dm_connector_state *dm_con_state = NULL; 11762 struct dc_sink *sink; 11763 11764 struct drm_device *dev = connector->dev; 11765 struct amdgpu_device *adev = drm_to_adev(dev); 11766 bool freesync_capable = false; 11767 struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; 11768 11769 if (!connector->state) { 11770 DRM_ERROR("%s - Connector has no state", __func__); 11771 goto update; 11772 } 11773 11774 sink = amdgpu_dm_connector->dc_sink ? 11775 amdgpu_dm_connector->dc_sink : 11776 amdgpu_dm_connector->dc_em_sink; 11777 11778 if (!edid || !sink) { 11779 dm_con_state = to_dm_connector_state(connector->state); 11780 11781 amdgpu_dm_connector->min_vfreq = 0; 11782 amdgpu_dm_connector->max_vfreq = 0; 11783 amdgpu_dm_connector->pixel_clock_mhz = 0; 11784 connector->display_info.monitor_range.min_vfreq = 0; 11785 connector->display_info.monitor_range.max_vfreq = 0; 11786 freesync_capable = false; 11787 11788 goto update; 11789 } 11790 11791 dm_con_state = to_dm_connector_state(connector->state); 11792 11793 if (!adev->dm.freesync_module) 11794 goto update; 11795 11796 11797 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT 11798 || sink->sink_signal == SIGNAL_TYPE_EDP) { 11799 bool edid_check_required = false; 11800 11801 if (edid) { 11802 edid_check_required = is_dp_capable_without_timing_msa( 11803 adev->dm.dc, 11804 amdgpu_dm_connector); 11805 } 11806 11807 if (edid_check_required == true && (edid->version > 1 || 11808 (edid->version == 1 && edid->revision > 1))) { 11809 for (i = 0; i < 4; i++) { 11810 11811 timing = &edid->detailed_timings[i]; 11812 data = &timing->data.other_data; 11813 range = &data->data.range; 11814 /* 11815 * Check if monitor has continuous frequency mode 11816 */ 11817 if (data->type != EDID_DETAIL_MONITOR_RANGE) 11818 continue; 11819 /* 11820 * Check for flag range limits only. If flag == 1 then 11821 * no additional timing information provided. 11822 * Default GTF, GTF Secondary curve and CVT are not 11823 * supported 11824 */ 11825 if (range->flags != 1) 11826 continue; 11827 11828 amdgpu_dm_connector->min_vfreq = range->min_vfreq; 11829 amdgpu_dm_connector->max_vfreq = range->max_vfreq; 11830 amdgpu_dm_connector->pixel_clock_mhz = 11831 range->pixel_clock_mhz * 10; 11832 11833 connector->display_info.monitor_range.min_vfreq = range->min_vfreq; 11834 connector->display_info.monitor_range.max_vfreq = range->max_vfreq; 11835 11836 break; 11837 } 11838 11839 if (amdgpu_dm_connector->max_vfreq - 11840 amdgpu_dm_connector->min_vfreq > 10) { 11841 11842 freesync_capable = true; 11843 } 11844 } 11845 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { 11846 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 11847 if (i >= 0 && vsdb_info.freesync_supported) { 11848 timing = &edid->detailed_timings[i]; 11849 data = &timing->data.other_data; 11850 11851 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 11852 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 11853 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 11854 freesync_capable = true; 11855 11856 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 11857 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 11858 } 11859 } 11860 11861 update: 11862 if (dm_con_state) 11863 dm_con_state->freesync_capable = freesync_capable; 11864 11865 if (connector->vrr_capable_property) 11866 drm_connector_set_vrr_capable_property(connector, 11867 freesync_capable); 11868 } 11869 11870 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) 11871 { 11872 struct amdgpu_device *adev = drm_to_adev(dev); 11873 struct dc *dc = adev->dm.dc; 11874 int i; 11875 11876 mutex_lock(&adev->dm.dc_lock); 11877 if (dc->current_state) { 11878 for (i = 0; i < dc->current_state->stream_count; ++i) 11879 dc->current_state->streams[i] 11880 ->triggered_crtc_reset.enabled = 11881 adev->dm.force_timing_sync; 11882 11883 dm_enable_per_frame_crtc_master_sync(dc->current_state); 11884 dc_trigger_sync(dc, dc->current_state); 11885 } 11886 mutex_unlock(&adev->dm.dc_lock); 11887 } 11888 11889 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 11890 uint32_t value, const char *func_name) 11891 { 11892 #ifdef DM_CHECK_ADDR_0 11893 if (address == 0) { 11894 DC_ERR("invalid register write. address = 0"); 11895 return; 11896 } 11897 #endif 11898 cgs_write_register(ctx->cgs_device, address, value); 11899 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); 11900 } 11901 11902 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, 11903 const char *func_name) 11904 { 11905 uint32_t value; 11906 #ifdef DM_CHECK_ADDR_0 11907 if (address == 0) { 11908 DC_ERR("invalid register read; address = 0\n"); 11909 return 0; 11910 } 11911 #endif 11912 11913 if (ctx->dmub_srv && 11914 ctx->dmub_srv->reg_helper_offload.gather_in_progress && 11915 !ctx->dmub_srv->reg_helper_offload.should_burst_write) { 11916 ASSERT(false); 11917 return 0; 11918 } 11919 11920 value = cgs_read_register(ctx->cgs_device, address); 11921 11922 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); 11923 11924 return value; 11925 } 11926 11927 static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, 11928 struct dc_context *ctx, 11929 uint8_t status_type, 11930 uint32_t *operation_result) 11931 { 11932 struct amdgpu_device *adev = ctx->driver_context; 11933 int return_status = -1; 11934 struct dmub_notification *p_notify = adev->dm.dmub_notify; 11935 11936 if (is_cmd_aux) { 11937 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) { 11938 return_status = p_notify->aux_reply.length; 11939 *operation_result = p_notify->result; 11940 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) { 11941 *operation_result = AUX_RET_ERROR_TIMEOUT; 11942 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) { 11943 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; 11944 } else { 11945 *operation_result = AUX_RET_ERROR_UNKNOWN; 11946 } 11947 } else { 11948 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) { 11949 return_status = 0; 11950 *operation_result = p_notify->sc_status; 11951 } else { 11952 *operation_result = SET_CONFIG_UNKNOWN_ERROR; 11953 } 11954 } 11955 11956 return return_status; 11957 } 11958 11959 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx, 11960 unsigned int link_index, void *cmd_payload, void *operation_result) 11961 { 11962 struct amdgpu_device *adev = ctx->driver_context; 11963 int ret = 0; 11964 11965 if (is_cmd_aux) { 11966 dc_process_dmub_aux_transfer_async(ctx->dc, 11967 link_index, (struct aux_payload *)cmd_payload); 11968 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index, 11969 (struct set_config_cmd_payload *)cmd_payload, 11970 adev->dm.dmub_notify)) { 11971 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, 11972 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, 11973 (uint32_t *)operation_result); 11974 } 11975 11976 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ); 11977 if (ret == 0) { 11978 DRM_ERROR("wait_for_completion_timeout timeout!"); 11979 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, 11980 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT, 11981 (uint32_t *)operation_result); 11982 } 11983 11984 if (is_cmd_aux) { 11985 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) { 11986 struct aux_payload *payload = (struct aux_payload *)cmd_payload; 11987 11988 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; 11989 if (!payload->write && adev->dm.dmub_notify->aux_reply.length && 11990 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) { 11991 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data, 11992 adev->dm.dmub_notify->aux_reply.length); 11993 } 11994 } 11995 } 11996 11997 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, 11998 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, 11999 (uint32_t *)operation_result); 12000 } 12001 12002 /* 12003 * Check whether seamless boot is supported. 12004 * 12005 * So far we only support seamless boot on CHIP_VANGOGH. 12006 * If everything goes well, we may consider expanding 12007 * seamless boot to other ASICs. 12008 */ 12009 bool check_seamless_boot_capability(struct amdgpu_device *adev) 12010 { 12011 switch (adev->asic_type) { 12012 case CHIP_VANGOGH: 12013 if (!adev->mman.keep_stolen_vga_memory) 12014 return true; 12015 break; 12016 default: 12017 break; 12018 } 12019 12020 return false; 12021 } 12022