1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 /* The caprices of the preprocessor require that this be declared right here */ 27 #define CREATE_TRACE_POINTS 28 29 #include "dm_services_types.h" 30 #include "dc.h" 31 #include "dc_link_dp.h" 32 #include "dc/inc/core_types.h" 33 #include "dal_asic_id.h" 34 #include "dmub/dmub_srv.h" 35 #include "dc/inc/hw/dmcu.h" 36 #include "dc/inc/hw/abm.h" 37 #include "dc/dc_dmub_srv.h" 38 #include "dc/dc_edid_parser.h" 39 #include "dc/dc_stat.h" 40 #include "amdgpu_dm_trace.h" 41 42 #include "vid.h" 43 #include "amdgpu.h" 44 #include "amdgpu_display.h" 45 #include "amdgpu_ucode.h" 46 #include "atom.h" 47 #include "amdgpu_dm.h" 48 #ifdef CONFIG_DRM_AMD_DC_HDCP 49 #include "amdgpu_dm_hdcp.h" 50 #include <drm/drm_hdcp.h> 51 #endif 52 #include "amdgpu_pm.h" 53 54 #include "amd_shared.h" 55 #include "amdgpu_dm_irq.h" 56 #include "dm_helpers.h" 57 #include "amdgpu_dm_mst_types.h" 58 #if defined(CONFIG_DEBUG_FS) 59 #include "amdgpu_dm_debugfs.h" 60 #endif 61 #include "amdgpu_dm_psr.h" 62 63 #include "ivsrcid/ivsrcid_vislands30.h" 64 65 #include "i2caux_interface.h" 66 #include <linux/module.h> 67 #include <linux/moduleparam.h> 68 #include <linux/types.h> 69 #include <linux/pm_runtime.h> 70 #include <linux/pci.h> 71 #include <linux/firmware.h> 72 #include <linux/component.h> 73 74 #include <drm/drm_atomic.h> 75 #include <drm/drm_atomic_uapi.h> 76 #include <drm/drm_atomic_helper.h> 77 #include <drm/drm_dp_mst_helper.h> 78 #include <drm/drm_fb_helper.h> 79 #include <drm/drm_fourcc.h> 80 #include <drm/drm_edid.h> 81 #include <drm/drm_vblank.h> 82 #include <drm/drm_audio_component.h> 83 84 #if defined(CONFIG_DRM_AMD_DC_DCN) 85 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" 86 87 #include "dcn/dcn_1_0_offset.h" 88 #include "dcn/dcn_1_0_sh_mask.h" 89 #include "soc15_hw_ip.h" 90 #include "vega10_ip_offset.h" 91 92 #include "soc15_common.h" 93 #endif 94 95 #include "modules/inc/mod_freesync.h" 96 #include "modules/power/power_helpers.h" 97 #include "modules/inc/mod_info_packet.h" 98 99 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 100 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 101 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" 102 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); 103 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" 104 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); 105 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" 106 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); 107 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" 108 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); 109 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" 110 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); 111 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" 112 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); 113 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" 114 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); 115 116 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 117 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 118 119 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" 120 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); 121 122 /* Number of bytes in PSP header for firmware. */ 123 #define PSP_HEADER_BYTES 0x100 124 125 /* Number of bytes in PSP footer for firmware. */ 126 #define PSP_FOOTER_BYTES 0x100 127 128 /** 129 * DOC: overview 130 * 131 * The AMDgpu display manager, **amdgpu_dm** (or even simpler, 132 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM 133 * requests into DC requests, and DC responses into DRM responses. 134 * 135 * The root control structure is &struct amdgpu_display_manager. 136 */ 137 138 /* basic init/fini API */ 139 static int amdgpu_dm_init(struct amdgpu_device *adev); 140 static void amdgpu_dm_fini(struct amdgpu_device *adev); 141 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); 142 143 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) 144 { 145 switch (link->dpcd_caps.dongle_type) { 146 case DISPLAY_DONGLE_NONE: 147 return DRM_MODE_SUBCONNECTOR_Native; 148 case DISPLAY_DONGLE_DP_VGA_CONVERTER: 149 return DRM_MODE_SUBCONNECTOR_VGA; 150 case DISPLAY_DONGLE_DP_DVI_CONVERTER: 151 case DISPLAY_DONGLE_DP_DVI_DONGLE: 152 return DRM_MODE_SUBCONNECTOR_DVID; 153 case DISPLAY_DONGLE_DP_HDMI_CONVERTER: 154 case DISPLAY_DONGLE_DP_HDMI_DONGLE: 155 return DRM_MODE_SUBCONNECTOR_HDMIA; 156 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: 157 default: 158 return DRM_MODE_SUBCONNECTOR_Unknown; 159 } 160 } 161 162 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) 163 { 164 struct dc_link *link = aconnector->dc_link; 165 struct drm_connector *connector = &aconnector->base; 166 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 167 168 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 169 return; 170 171 if (aconnector->dc_sink) 172 subconnector = get_subconnector_type(link); 173 174 drm_object_property_set_value(&connector->base, 175 connector->dev->mode_config.dp_subconnector_property, 176 subconnector); 177 } 178 179 /* 180 * initializes drm_device display related structures, based on the information 181 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, 182 * drm_encoder, drm_mode_config 183 * 184 * Returns 0 on success 185 */ 186 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); 187 /* removes and deallocates the drm structures, created by the above function */ 188 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); 189 190 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 191 struct drm_plane *plane, 192 unsigned long possible_crtcs, 193 const struct dc_plane_cap *plane_cap); 194 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 195 struct drm_plane *plane, 196 uint32_t link_index); 197 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 198 struct amdgpu_dm_connector *amdgpu_dm_connector, 199 uint32_t link_index, 200 struct amdgpu_encoder *amdgpu_encoder); 201 static int amdgpu_dm_encoder_init(struct drm_device *dev, 202 struct amdgpu_encoder *aencoder, 203 uint32_t link_index); 204 205 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 206 207 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 208 209 static int amdgpu_dm_atomic_check(struct drm_device *dev, 210 struct drm_atomic_state *state); 211 212 static void handle_cursor_update(struct drm_plane *plane, 213 struct drm_plane_state *old_plane_state); 214 215 static const struct drm_format_info * 216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); 217 218 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); 219 220 static bool 221 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 222 struct drm_crtc_state *new_crtc_state); 223 /* 224 * dm_vblank_get_counter 225 * 226 * @brief 227 * Get counter for number of vertical blanks 228 * 229 * @param 230 * struct amdgpu_device *adev - [in] desired amdgpu device 231 * int disp_idx - [in] which CRTC to get the counter from 232 * 233 * @return 234 * Counter for vertical blanks 235 */ 236 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) 237 { 238 if (crtc >= adev->mode_info.num_crtc) 239 return 0; 240 else { 241 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; 242 243 if (acrtc->dm_irq_params.stream == NULL) { 244 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 245 crtc); 246 return 0; 247 } 248 249 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); 250 } 251 } 252 253 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 254 u32 *vbl, u32 *position) 255 { 256 uint32_t v_blank_start, v_blank_end, h_position, v_position; 257 258 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 259 return -EINVAL; 260 else { 261 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; 262 263 if (acrtc->dm_irq_params.stream == NULL) { 264 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", 265 crtc); 266 return 0; 267 } 268 269 /* 270 * TODO rework base driver to use values directly. 271 * for now parse it back into reg-format 272 */ 273 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, 274 &v_blank_start, 275 &v_blank_end, 276 &h_position, 277 &v_position); 278 279 *position = v_position | (h_position << 16); 280 *vbl = v_blank_start | (v_blank_end << 16); 281 } 282 283 return 0; 284 } 285 286 static bool dm_is_idle(void *handle) 287 { 288 /* XXX todo */ 289 return true; 290 } 291 292 static int dm_wait_for_idle(void *handle) 293 { 294 /* XXX todo */ 295 return 0; 296 } 297 298 static bool dm_check_soft_reset(void *handle) 299 { 300 return false; 301 } 302 303 static int dm_soft_reset(void *handle) 304 { 305 /* XXX todo */ 306 return 0; 307 } 308 309 static struct amdgpu_crtc * 310 get_crtc_by_otg_inst(struct amdgpu_device *adev, 311 int otg_inst) 312 { 313 struct drm_device *dev = adev_to_drm(adev); 314 struct drm_crtc *crtc; 315 struct amdgpu_crtc *amdgpu_crtc; 316 317 if (WARN_ON(otg_inst == -1)) 318 return adev->mode_info.crtcs[0]; 319 320 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 321 amdgpu_crtc = to_amdgpu_crtc(crtc); 322 323 if (amdgpu_crtc->otg_inst == otg_inst) 324 return amdgpu_crtc; 325 } 326 327 return NULL; 328 } 329 330 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc) 331 { 332 return acrtc->dm_irq_params.freesync_config.state == 333 VRR_STATE_ACTIVE_VARIABLE || 334 acrtc->dm_irq_params.freesync_config.state == 335 VRR_STATE_ACTIVE_FIXED; 336 } 337 338 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) 339 { 340 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || 341 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 342 } 343 344 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, 345 struct dm_crtc_state *new_state) 346 { 347 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) 348 return true; 349 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state)) 350 return true; 351 else 352 return false; 353 } 354 355 /** 356 * dm_pflip_high_irq() - Handle pageflip interrupt 357 * @interrupt_params: ignored 358 * 359 * Handles the pageflip interrupt by notifying all interested parties 360 * that the pageflip has been completed. 361 */ 362 static void dm_pflip_high_irq(void *interrupt_params) 363 { 364 struct amdgpu_crtc *amdgpu_crtc; 365 struct common_irq_params *irq_params = interrupt_params; 366 struct amdgpu_device *adev = irq_params->adev; 367 unsigned long flags; 368 struct drm_pending_vblank_event *e; 369 uint32_t vpos, hpos, v_blank_start, v_blank_end; 370 bool vrr_active; 371 372 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); 373 374 /* IRQ could occur when in initial stage */ 375 /* TODO work and BO cleanup */ 376 if (amdgpu_crtc == NULL) { 377 DC_LOG_PFLIP("CRTC is null, returning.\n"); 378 return; 379 } 380 381 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 382 383 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ 384 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", 385 amdgpu_crtc->pflip_status, 386 AMDGPU_FLIP_SUBMITTED, 387 amdgpu_crtc->crtc_id, 388 amdgpu_crtc); 389 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 390 return; 391 } 392 393 /* page flip completed. */ 394 e = amdgpu_crtc->event; 395 amdgpu_crtc->event = NULL; 396 397 WARN_ON(!e); 398 399 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc); 400 401 /* Fixed refresh rate, or VRR scanout position outside front-porch? */ 402 if (!vrr_active || 403 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, 404 &v_blank_end, &hpos, &vpos) || 405 (vpos < v_blank_start)) { 406 /* Update to correct count and vblank timestamp if racing with 407 * vblank irq. This also updates to the correct vblank timestamp 408 * even in VRR mode, as scanout is past the front-porch atm. 409 */ 410 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); 411 412 /* Wake up userspace by sending the pageflip event with proper 413 * count and timestamp of vblank of flip completion. 414 */ 415 if (e) { 416 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); 417 418 /* Event sent, so done with vblank for this flip */ 419 drm_crtc_vblank_put(&amdgpu_crtc->base); 420 } 421 } else if (e) { 422 /* VRR active and inside front-porch: vblank count and 423 * timestamp for pageflip event will only be up to date after 424 * drm_crtc_handle_vblank() has been executed from late vblank 425 * irq handler after start of back-porch (vline 0). We queue the 426 * pageflip event for send-out by drm_crtc_handle_vblank() with 427 * updated timestamp and count, once it runs after us. 428 * 429 * We need to open-code this instead of using the helper 430 * drm_crtc_arm_vblank_event(), as that helper would 431 * call drm_crtc_accurate_vblank_count(), which we must 432 * not call in VRR mode while we are in front-porch! 433 */ 434 435 /* sequence will be replaced by real count during send-out. */ 436 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); 437 e->pipe = amdgpu_crtc->crtc_id; 438 439 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); 440 e = NULL; 441 } 442 443 /* Keep track of vblank of this flip for flip throttling. We use the 444 * cooked hw counter, as that one incremented at start of this vblank 445 * of pageflip completion, so last_flip_vblank is the forbidden count 446 * for queueing new pageflips if vsync + VRR is enabled. 447 */ 448 amdgpu_crtc->dm_irq_params.last_flip_vblank = 449 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); 450 451 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 452 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 453 454 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", 455 amdgpu_crtc->crtc_id, amdgpu_crtc, 456 vrr_active, (int) !e); 457 } 458 459 static void dm_vupdate_high_irq(void *interrupt_params) 460 { 461 struct common_irq_params *irq_params = interrupt_params; 462 struct amdgpu_device *adev = irq_params->adev; 463 struct amdgpu_crtc *acrtc; 464 struct drm_device *drm_dev; 465 struct drm_vblank_crtc *vblank; 466 ktime_t frame_duration_ns, previous_timestamp; 467 unsigned long flags; 468 int vrr_active; 469 470 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 471 472 if (acrtc) { 473 vrr_active = amdgpu_dm_vrr_active_irq(acrtc); 474 drm_dev = acrtc->base.dev; 475 vblank = &drm_dev->vblank[acrtc->base.index]; 476 previous_timestamp = atomic64_read(&irq_params->previous_timestamp); 477 frame_duration_ns = vblank->time - previous_timestamp; 478 479 if (frame_duration_ns > 0) { 480 trace_amdgpu_refresh_rate_track(acrtc->base.index, 481 frame_duration_ns, 482 ktime_divns(NSEC_PER_SEC, frame_duration_ns)); 483 atomic64_set(&irq_params->previous_timestamp, vblank->time); 484 } 485 486 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n", 487 acrtc->crtc_id, 488 vrr_active); 489 490 /* Core vblank handling is done here after end of front-porch in 491 * vrr mode, as vblank timestamping will give valid results 492 * while now done after front-porch. This will also deliver 493 * page-flip completion events that have been queued to us 494 * if a pageflip happened inside front-porch. 495 */ 496 if (vrr_active) { 497 drm_crtc_handle_vblank(&acrtc->base); 498 499 /* BTR processing for pre-DCE12 ASICs */ 500 if (acrtc->dm_irq_params.stream && 501 adev->family < AMDGPU_FAMILY_AI) { 502 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 503 mod_freesync_handle_v_update( 504 adev->dm.freesync_module, 505 acrtc->dm_irq_params.stream, 506 &acrtc->dm_irq_params.vrr_params); 507 508 dc_stream_adjust_vmin_vmax( 509 adev->dm.dc, 510 acrtc->dm_irq_params.stream, 511 &acrtc->dm_irq_params.vrr_params.adjust); 512 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 513 } 514 } 515 } 516 } 517 518 /** 519 * dm_crtc_high_irq() - Handles CRTC interrupt 520 * @interrupt_params: used for determining the CRTC instance 521 * 522 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK 523 * event handler. 524 */ 525 static void dm_crtc_high_irq(void *interrupt_params) 526 { 527 struct common_irq_params *irq_params = interrupt_params; 528 struct amdgpu_device *adev = irq_params->adev; 529 struct amdgpu_crtc *acrtc; 530 unsigned long flags; 531 int vrr_active; 532 533 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 534 if (!acrtc) 535 return; 536 537 vrr_active = amdgpu_dm_vrr_active_irq(acrtc); 538 539 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, 540 vrr_active, acrtc->dm_irq_params.active_planes); 541 542 /** 543 * Core vblank handling at start of front-porch is only possible 544 * in non-vrr mode, as only there vblank timestamping will give 545 * valid results while done in front-porch. Otherwise defer it 546 * to dm_vupdate_high_irq after end of front-porch. 547 */ 548 if (!vrr_active) 549 drm_crtc_handle_vblank(&acrtc->base); 550 551 /** 552 * Following stuff must happen at start of vblank, for crc 553 * computation and below-the-range btr support in vrr mode. 554 */ 555 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 556 557 /* BTR updates need to happen before VUPDATE on Vega and above. */ 558 if (adev->family < AMDGPU_FAMILY_AI) 559 return; 560 561 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 562 563 if (acrtc->dm_irq_params.stream && 564 acrtc->dm_irq_params.vrr_params.supported && 565 acrtc->dm_irq_params.freesync_config.state == 566 VRR_STATE_ACTIVE_VARIABLE) { 567 mod_freesync_handle_v_update(adev->dm.freesync_module, 568 acrtc->dm_irq_params.stream, 569 &acrtc->dm_irq_params.vrr_params); 570 571 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, 572 &acrtc->dm_irq_params.vrr_params.adjust); 573 } 574 575 /* 576 * If there aren't any active_planes then DCH HUBP may be clock-gated. 577 * In that case, pageflip completion interrupts won't fire and pageflip 578 * completion events won't get delivered. Prevent this by sending 579 * pending pageflip events from here if a flip is still pending. 580 * 581 * If any planes are enabled, use dm_pflip_high_irq() instead, to 582 * avoid race conditions between flip programming and completion, 583 * which could cause too early flip completion events. 584 */ 585 if (adev->family >= AMDGPU_FAMILY_RV && 586 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && 587 acrtc->dm_irq_params.active_planes == 0) { 588 if (acrtc->event) { 589 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); 590 acrtc->event = NULL; 591 drm_crtc_vblank_put(&acrtc->base); 592 } 593 acrtc->pflip_status = AMDGPU_FLIP_NONE; 594 } 595 596 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 597 } 598 599 #if defined(CONFIG_DRM_AMD_DC_DCN) 600 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 601 /** 602 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for 603 * DCN generation ASICs 604 * @interrupt_params: interrupt parameters 605 * 606 * Used to set crc window/read out crc value at vertical line 0 position 607 */ 608 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) 609 { 610 struct common_irq_params *irq_params = interrupt_params; 611 struct amdgpu_device *adev = irq_params->adev; 612 struct amdgpu_crtc *acrtc; 613 614 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); 615 616 if (!acrtc) 617 return; 618 619 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); 620 } 621 #endif 622 623 /** 624 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. 625 * @adev: amdgpu_device pointer 626 * @notify: dmub notification structure 627 * 628 * Dmub AUX or SET_CONFIG command completion processing callback 629 * Copies dmub notification to DM which is to be read by AUX command. 630 * issuing thread and also signals the event to wake up the thread. 631 */ 632 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify) 633 { 634 if (adev->dm.dmub_notify) 635 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); 636 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) 637 complete(&adev->dm.dmub_aux_transfer_done); 638 } 639 640 /** 641 * dmub_hpd_callback - DMUB HPD interrupt processing callback. 642 * @adev: amdgpu_device pointer 643 * @notify: dmub notification structure 644 * 645 * Dmub Hpd interrupt processing callback. Gets displayindex through the 646 * ink index and calls helper to do the processing. 647 */ 648 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify) 649 { 650 struct amdgpu_dm_connector *aconnector; 651 struct drm_connector *connector; 652 struct drm_connector_list_iter iter; 653 struct dc_link *link; 654 uint8_t link_index = 0; 655 struct drm_device *dev = adev->dm.ddev; 656 657 if (adev == NULL) 658 return; 659 660 if (notify == NULL) { 661 DRM_ERROR("DMUB HPD callback notification was NULL"); 662 return; 663 } 664 665 if (notify->link_index > adev->dm.dc->link_count) { 666 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); 667 return; 668 } 669 670 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 671 672 link_index = notify->link_index; 673 674 link = adev->dm.dc->links[link_index]; 675 676 drm_connector_list_iter_begin(dev, &iter); 677 drm_for_each_connector_iter(connector, &iter) { 678 aconnector = to_amdgpu_dm_connector(connector); 679 if (link && aconnector->dc_link == link) { 680 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); 681 handle_hpd_irq_helper(aconnector); 682 break; 683 } 684 } 685 drm_connector_list_iter_end(&iter); 686 drm_modeset_unlock(&dev->mode_config.connection_mutex); 687 688 } 689 690 /** 691 * register_dmub_notify_callback - Sets callback for DMUB notify 692 * @adev: amdgpu_device pointer 693 * @type: Type of dmub notification 694 * @callback: Dmub interrupt callback function 695 * @dmub_int_thread_offload: offload indicator 696 * 697 * API to register a dmub callback handler for a dmub notification 698 * Also sets indicator whether callback processing to be offloaded. 699 * to dmub interrupt handling thread 700 * Return: true if successfully registered, false if there is existing registration 701 */ 702 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type, 703 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload) 704 { 705 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { 706 adev->dm.dmub_callback[type] = callback; 707 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; 708 } else 709 return false; 710 711 return true; 712 } 713 714 static void dm_handle_hpd_work(struct work_struct *work) 715 { 716 struct dmub_hpd_work *dmub_hpd_wrk; 717 718 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); 719 720 if (!dmub_hpd_wrk->dmub_notify) { 721 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); 722 return; 723 } 724 725 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { 726 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, 727 dmub_hpd_wrk->dmub_notify); 728 } 729 kfree(dmub_hpd_wrk); 730 731 } 732 733 #define DMUB_TRACE_MAX_READ 64 734 /** 735 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt 736 * @interrupt_params: used for determining the Outbox instance 737 * 738 * Handles the Outbox Interrupt 739 * event handler. 740 */ 741 static void dm_dmub_outbox1_low_irq(void *interrupt_params) 742 { 743 struct dmub_notification notify; 744 struct common_irq_params *irq_params = interrupt_params; 745 struct amdgpu_device *adev = irq_params->adev; 746 struct amdgpu_display_manager *dm = &adev->dm; 747 struct dmcub_trace_buf_entry entry = { 0 }; 748 uint32_t count = 0; 749 struct dmub_hpd_work *dmub_hpd_wrk; 750 751 if (dc_enable_dmub_notifications(adev->dm.dc)) { 752 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); 753 if (!dmub_hpd_wrk) { 754 DRM_ERROR("Failed to allocate dmub_hpd_wrk"); 755 return; 756 } 757 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); 758 759 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 760 do { 761 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); 762 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) { 763 DRM_ERROR("DM: notify type %d larger than the array size %zu!", notify.type, 764 ARRAY_SIZE(dm->dmub_thread_offload)); 765 continue; 766 } 767 if (dm->dmub_thread_offload[notify.type] == true) { 768 dmub_hpd_wrk->dmub_notify = ¬ify; 769 dmub_hpd_wrk->adev = adev; 770 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); 771 } else { 772 dm->dmub_callback[notify.type](adev, ¬ify); 773 } 774 775 } while (notify.pending_notification); 776 777 } else { 778 DRM_ERROR("DM: Failed to receive correct outbox IRQ !"); 779 } 780 } 781 782 783 do { 784 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 785 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 786 entry.param0, entry.param1); 787 788 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 789 entry.trace_code, entry.tick_count, entry.param0, entry.param1); 790 } else 791 break; 792 793 count++; 794 795 } while (count <= DMUB_TRACE_MAX_READ); 796 797 ASSERT(count <= DMUB_TRACE_MAX_READ); 798 } 799 #endif 800 801 static int dm_set_clockgating_state(void *handle, 802 enum amd_clockgating_state state) 803 { 804 return 0; 805 } 806 807 static int dm_set_powergating_state(void *handle, 808 enum amd_powergating_state state) 809 { 810 return 0; 811 } 812 813 /* Prototypes of private functions */ 814 static int dm_early_init(void* handle); 815 816 /* Allocate memory for FBC compressed data */ 817 static void amdgpu_dm_fbc_init(struct drm_connector *connector) 818 { 819 struct drm_device *dev = connector->dev; 820 struct amdgpu_device *adev = drm_to_adev(dev); 821 struct dm_compressor_info *compressor = &adev->dm.compressor; 822 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); 823 struct drm_display_mode *mode; 824 unsigned long max_size = 0; 825 826 if (adev->dm.dc->fbc_compressor == NULL) 827 return; 828 829 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) 830 return; 831 832 if (compressor->bo_ptr) 833 return; 834 835 836 list_for_each_entry(mode, &connector->modes, head) { 837 if (max_size < mode->htotal * mode->vtotal) 838 max_size = mode->htotal * mode->vtotal; 839 } 840 841 if (max_size) { 842 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, 843 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, 844 &compressor->gpu_addr, &compressor->cpu_addr); 845 846 if (r) 847 DRM_ERROR("DM: Failed to initialize FBC\n"); 848 else { 849 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; 850 DRM_INFO("DM: FBC alloc %lu\n", max_size*4); 851 } 852 853 } 854 855 } 856 857 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, 858 int pipe, bool *enabled, 859 unsigned char *buf, int max_bytes) 860 { 861 struct drm_device *dev = dev_get_drvdata(kdev); 862 struct amdgpu_device *adev = drm_to_adev(dev); 863 struct drm_connector *connector; 864 struct drm_connector_list_iter conn_iter; 865 struct amdgpu_dm_connector *aconnector; 866 int ret = 0; 867 868 *enabled = false; 869 870 mutex_lock(&adev->dm.audio_lock); 871 872 drm_connector_list_iter_begin(dev, &conn_iter); 873 drm_for_each_connector_iter(connector, &conn_iter) { 874 aconnector = to_amdgpu_dm_connector(connector); 875 if (aconnector->audio_inst != port) 876 continue; 877 878 *enabled = true; 879 ret = drm_eld_size(connector->eld); 880 memcpy(buf, connector->eld, min(max_bytes, ret)); 881 882 break; 883 } 884 drm_connector_list_iter_end(&conn_iter); 885 886 mutex_unlock(&adev->dm.audio_lock); 887 888 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); 889 890 return ret; 891 } 892 893 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { 894 .get_eld = amdgpu_dm_audio_component_get_eld, 895 }; 896 897 static int amdgpu_dm_audio_component_bind(struct device *kdev, 898 struct device *hda_kdev, void *data) 899 { 900 struct drm_device *dev = dev_get_drvdata(kdev); 901 struct amdgpu_device *adev = drm_to_adev(dev); 902 struct drm_audio_component *acomp = data; 903 904 acomp->ops = &amdgpu_dm_audio_component_ops; 905 acomp->dev = kdev; 906 adev->dm.audio_component = acomp; 907 908 return 0; 909 } 910 911 static void amdgpu_dm_audio_component_unbind(struct device *kdev, 912 struct device *hda_kdev, void *data) 913 { 914 struct drm_device *dev = dev_get_drvdata(kdev); 915 struct amdgpu_device *adev = drm_to_adev(dev); 916 struct drm_audio_component *acomp = data; 917 918 acomp->ops = NULL; 919 acomp->dev = NULL; 920 adev->dm.audio_component = NULL; 921 } 922 923 static const struct component_ops amdgpu_dm_audio_component_bind_ops = { 924 .bind = amdgpu_dm_audio_component_bind, 925 .unbind = amdgpu_dm_audio_component_unbind, 926 }; 927 928 static int amdgpu_dm_audio_init(struct amdgpu_device *adev) 929 { 930 int i, ret; 931 932 if (!amdgpu_audio) 933 return 0; 934 935 adev->mode_info.audio.enabled = true; 936 937 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; 938 939 for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 940 adev->mode_info.audio.pin[i].channels = -1; 941 adev->mode_info.audio.pin[i].rate = -1; 942 adev->mode_info.audio.pin[i].bits_per_sample = -1; 943 adev->mode_info.audio.pin[i].status_bits = 0; 944 adev->mode_info.audio.pin[i].category_code = 0; 945 adev->mode_info.audio.pin[i].connected = false; 946 adev->mode_info.audio.pin[i].id = 947 adev->dm.dc->res_pool->audios[i]->inst; 948 adev->mode_info.audio.pin[i].offset = 0; 949 } 950 951 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); 952 if (ret < 0) 953 return ret; 954 955 adev->dm.audio_registered = true; 956 957 return 0; 958 } 959 960 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) 961 { 962 if (!amdgpu_audio) 963 return; 964 965 if (!adev->mode_info.audio.enabled) 966 return; 967 968 if (adev->dm.audio_registered) { 969 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); 970 adev->dm.audio_registered = false; 971 } 972 973 /* TODO: Disable audio? */ 974 975 adev->mode_info.audio.enabled = false; 976 } 977 978 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) 979 { 980 struct drm_audio_component *acomp = adev->dm.audio_component; 981 982 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { 983 DRM_DEBUG_KMS("Notify ELD: %d\n", pin); 984 985 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, 986 pin, -1); 987 } 988 } 989 990 static int dm_dmub_hw_init(struct amdgpu_device *adev) 991 { 992 const struct dmcub_firmware_header_v1_0 *hdr; 993 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 994 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 995 const struct firmware *dmub_fw = adev->dm.dmub_fw; 996 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 997 struct abm *abm = adev->dm.dc->res_pool->abm; 998 struct dmub_srv_hw_params hw_params; 999 enum dmub_status status; 1000 const unsigned char *fw_inst_const, *fw_bss_data; 1001 uint32_t i, fw_inst_const_size, fw_bss_data_size; 1002 bool has_hw_support; 1003 1004 if (!dmub_srv) 1005 /* DMUB isn't supported on the ASIC. */ 1006 return 0; 1007 1008 if (!fb_info) { 1009 DRM_ERROR("No framebuffer info for DMUB service.\n"); 1010 return -EINVAL; 1011 } 1012 1013 if (!dmub_fw) { 1014 /* Firmware required for DMUB support. */ 1015 DRM_ERROR("No firmware provided for DMUB.\n"); 1016 return -EINVAL; 1017 } 1018 1019 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); 1020 if (status != DMUB_STATUS_OK) { 1021 DRM_ERROR("Error checking HW support for DMUB: %d\n", status); 1022 return -EINVAL; 1023 } 1024 1025 if (!has_hw_support) { 1026 DRM_INFO("DMUB unsupported on ASIC\n"); 1027 return 0; 1028 } 1029 1030 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 1031 1032 fw_inst_const = dmub_fw->data + 1033 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1034 PSP_HEADER_BYTES; 1035 1036 fw_bss_data = dmub_fw->data + 1037 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1038 le32_to_cpu(hdr->inst_const_bytes); 1039 1040 /* Copy firmware and bios info into FB memory. */ 1041 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1042 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1043 1044 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1045 1046 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, 1047 * amdgpu_ucode_init_single_fw will load dmub firmware 1048 * fw_inst_const part to cw0; otherwise, the firmware back door load 1049 * will be done by dm_dmub_hw_init 1050 */ 1051 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1052 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 1053 fw_inst_const_size); 1054 } 1055 1056 if (fw_bss_data_size) 1057 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, 1058 fw_bss_data, fw_bss_data_size); 1059 1060 /* Copy firmware bios info into FB memory. */ 1061 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, 1062 adev->bios_size); 1063 1064 /* Reset regions that need to be reset. */ 1065 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, 1066 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); 1067 1068 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, 1069 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); 1070 1071 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, 1072 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); 1073 1074 /* Initialize hardware. */ 1075 memset(&hw_params, 0, sizeof(hw_params)); 1076 hw_params.fb_base = adev->gmc.fb_start; 1077 hw_params.fb_offset = adev->gmc.aper_base; 1078 1079 /* backdoor load firmware and trigger dmub running */ 1080 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 1081 hw_params.load_inst_const = true; 1082 1083 if (dmcu) 1084 hw_params.psp_version = dmcu->psp_version; 1085 1086 for (i = 0; i < fb_info->num_fb; ++i) 1087 hw_params.fb[i] = &fb_info->fb[i]; 1088 1089 status = dmub_srv_hw_init(dmub_srv, &hw_params); 1090 if (status != DMUB_STATUS_OK) { 1091 DRM_ERROR("Error initializing DMUB HW: %d\n", status); 1092 return -EINVAL; 1093 } 1094 1095 /* Wait for firmware load to finish. */ 1096 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); 1097 if (status != DMUB_STATUS_OK) 1098 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); 1099 1100 /* Init DMCU and ABM if available. */ 1101 if (dmcu && abm) { 1102 dmcu->funcs->dmcu_init(dmcu); 1103 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); 1104 } 1105 1106 if (!adev->dm.dc->ctx->dmub_srv) 1107 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); 1108 if (!adev->dm.dc->ctx->dmub_srv) { 1109 DRM_ERROR("Couldn't allocate DC DMUB server!\n"); 1110 return -ENOMEM; 1111 } 1112 1113 DRM_INFO("DMUB hardware initialized: version=0x%08X\n", 1114 adev->dm.dmcub_fw_version); 1115 1116 return 0; 1117 } 1118 1119 #if defined(CONFIG_DRM_AMD_DC_DCN) 1120 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 1121 { 1122 uint64_t pt_base; 1123 uint32_t logical_addr_low; 1124 uint32_t logical_addr_high; 1125 uint32_t agp_base, agp_bot, agp_top; 1126 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; 1127 1128 memset(pa_config, 0, sizeof(*pa_config)); 1129 1130 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; 1131 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); 1132 1133 if (adev->apu_flags & AMD_APU_IS_RAVEN2) 1134 /* 1135 * Raven2 has a HW issue that it is unable to use the vram which 1136 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 1137 * workaround that increase system aperture high address (add 1) 1138 * to get rid of the VM fault and hardware hang. 1139 */ 1140 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); 1141 else 1142 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; 1143 1144 agp_base = 0; 1145 agp_bot = adev->gmc.agp_start >> 24; 1146 agp_top = adev->gmc.agp_end >> 24; 1147 1148 1149 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF; 1150 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12); 1151 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF; 1152 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12); 1153 page_table_base.high_part = upper_32_bits(pt_base) & 0xF; 1154 page_table_base.low_part = lower_32_bits(pt_base); 1155 1156 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; 1157 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; 1158 1159 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ; 1160 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; 1161 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; 1162 1163 pa_config->system_aperture.fb_base = adev->gmc.fb_start; 1164 pa_config->system_aperture.fb_offset = adev->gmc.aper_base; 1165 pa_config->system_aperture.fb_top = adev->gmc.fb_end; 1166 1167 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; 1168 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; 1169 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; 1170 1171 pa_config->is_hvm_enabled = 0; 1172 1173 } 1174 #endif 1175 #if defined(CONFIG_DRM_AMD_DC_DCN) 1176 static void vblank_control_worker(struct work_struct *work) 1177 { 1178 struct vblank_control_work *vblank_work = 1179 container_of(work, struct vblank_control_work, work); 1180 struct amdgpu_display_manager *dm = vblank_work->dm; 1181 1182 mutex_lock(&dm->dc_lock); 1183 1184 if (vblank_work->enable) 1185 dm->active_vblank_irq_count++; 1186 else if(dm->active_vblank_irq_count) 1187 dm->active_vblank_irq_count--; 1188 1189 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0); 1190 1191 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); 1192 1193 /* Control PSR based on vblank requirements from OS */ 1194 if (vblank_work->stream && vblank_work->stream->link) { 1195 if (vblank_work->enable) { 1196 if (vblank_work->stream->link->psr_settings.psr_allow_active) 1197 amdgpu_dm_psr_disable(vblank_work->stream); 1198 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && 1199 !vblank_work->stream->link->psr_settings.psr_allow_active && 1200 vblank_work->acrtc->dm_irq_params.allow_psr_entry) { 1201 amdgpu_dm_psr_enable(vblank_work->stream); 1202 } 1203 } 1204 1205 mutex_unlock(&dm->dc_lock); 1206 1207 dc_stream_release(vblank_work->stream); 1208 1209 kfree(vblank_work); 1210 } 1211 1212 #endif 1213 1214 static void dm_handle_hpd_rx_offload_work(struct work_struct *work) 1215 { 1216 struct hpd_rx_irq_offload_work *offload_work; 1217 struct amdgpu_dm_connector *aconnector; 1218 struct dc_link *dc_link; 1219 struct amdgpu_device *adev; 1220 enum dc_connection_type new_connection_type = dc_connection_none; 1221 unsigned long flags; 1222 1223 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); 1224 aconnector = offload_work->offload_wq->aconnector; 1225 1226 if (!aconnector) { 1227 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); 1228 goto skip; 1229 } 1230 1231 adev = drm_to_adev(aconnector->base.dev); 1232 dc_link = aconnector->dc_link; 1233 1234 mutex_lock(&aconnector->hpd_lock); 1235 if (!dc_link_detect_sink(dc_link, &new_connection_type)) 1236 DRM_ERROR("KMS: Failed to detect connector\n"); 1237 mutex_unlock(&aconnector->hpd_lock); 1238 1239 if (new_connection_type == dc_connection_none) 1240 goto skip; 1241 1242 if (amdgpu_in_reset(adev)) 1243 goto skip; 1244 1245 mutex_lock(&adev->dm.dc_lock); 1246 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) 1247 dc_link_dp_handle_automated_test(dc_link); 1248 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && 1249 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) && 1250 dc_link_dp_allow_hpd_rx_irq(dc_link)) { 1251 dc_link_dp_handle_link_loss(dc_link); 1252 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); 1253 offload_work->offload_wq->is_handling_link_loss = false; 1254 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); 1255 } 1256 mutex_unlock(&adev->dm.dc_lock); 1257 1258 skip: 1259 kfree(offload_work); 1260 1261 } 1262 1263 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) 1264 { 1265 int max_caps = dc->caps.max_links; 1266 int i = 0; 1267 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; 1268 1269 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); 1270 1271 if (!hpd_rx_offload_wq) 1272 return NULL; 1273 1274 1275 for (i = 0; i < max_caps; i++) { 1276 hpd_rx_offload_wq[i].wq = 1277 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); 1278 1279 if (hpd_rx_offload_wq[i].wq == NULL) { 1280 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); 1281 return NULL; 1282 } 1283 1284 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); 1285 } 1286 1287 return hpd_rx_offload_wq; 1288 } 1289 1290 static int amdgpu_dm_init(struct amdgpu_device *adev) 1291 { 1292 struct dc_init_data init_data; 1293 #ifdef CONFIG_DRM_AMD_DC_HDCP 1294 struct dc_callback_init init_params; 1295 #endif 1296 int r; 1297 1298 adev->dm.ddev = adev_to_drm(adev); 1299 adev->dm.adev = adev; 1300 1301 /* Zero all the fields */ 1302 memset(&init_data, 0, sizeof(init_data)); 1303 #ifdef CONFIG_DRM_AMD_DC_HDCP 1304 memset(&init_params, 0, sizeof(init_params)); 1305 #endif 1306 1307 mutex_init(&adev->dm.dc_lock); 1308 mutex_init(&adev->dm.audio_lock); 1309 #if defined(CONFIG_DRM_AMD_DC_DCN) 1310 spin_lock_init(&adev->dm.vblank_lock); 1311 #endif 1312 1313 if(amdgpu_dm_irq_init(adev)) { 1314 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); 1315 goto error; 1316 } 1317 1318 init_data.asic_id.chip_family = adev->family; 1319 1320 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1321 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 1322 init_data.asic_id.chip_id = adev->pdev->device; 1323 1324 init_data.asic_id.vram_width = adev->gmc.vram_width; 1325 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ 1326 init_data.asic_id.atombios_base_address = 1327 adev->mode_info.atom_context->bios; 1328 1329 init_data.driver = adev; 1330 1331 adev->dm.cgs_device = amdgpu_cgs_create_device(adev); 1332 1333 if (!adev->dm.cgs_device) { 1334 DRM_ERROR("amdgpu: failed to create cgs device.\n"); 1335 goto error; 1336 } 1337 1338 init_data.cgs_device = adev->dm.cgs_device; 1339 1340 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; 1341 1342 switch (adev->asic_type) { 1343 case CHIP_CARRIZO: 1344 case CHIP_STONEY: 1345 case CHIP_RAVEN: 1346 case CHIP_RENOIR: 1347 init_data.flags.gpu_vm_support = true; 1348 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 1349 init_data.flags.disable_dmcu = true; 1350 break; 1351 case CHIP_VANGOGH: 1352 case CHIP_YELLOW_CARP: 1353 init_data.flags.gpu_vm_support = true; 1354 break; 1355 default: 1356 break; 1357 } 1358 1359 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 1360 init_data.flags.fbc_support = true; 1361 1362 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) 1363 init_data.flags.multi_mon_pp_mclk_switch = true; 1364 1365 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) 1366 init_data.flags.disable_fractional_pwm = true; 1367 1368 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) 1369 init_data.flags.edp_no_power_sequencing = true; 1370 1371 init_data.flags.power_down_display_on_boot = true; 1372 1373 INIT_LIST_HEAD(&adev->dm.da_list); 1374 /* Display Core create. */ 1375 adev->dm.dc = dc_create(&init_data); 1376 1377 if (adev->dm.dc) { 1378 DRM_INFO("Display Core initialized with v%s!\n", DC_VER); 1379 } else { 1380 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); 1381 goto error; 1382 } 1383 1384 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { 1385 adev->dm.dc->debug.force_single_disp_pipe_split = false; 1386 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; 1387 } 1388 1389 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 1390 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 1391 1392 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) 1393 adev->dm.dc->debug.disable_stutter = true; 1394 1395 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) 1396 adev->dm.dc->debug.disable_dsc = true; 1397 1398 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) 1399 adev->dm.dc->debug.disable_clock_gate = true; 1400 1401 r = dm_dmub_hw_init(adev); 1402 if (r) { 1403 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 1404 goto error; 1405 } 1406 1407 dc_hardware_init(adev->dm.dc); 1408 1409 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); 1410 if (!adev->dm.hpd_rx_offload_wq) { 1411 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); 1412 goto error; 1413 } 1414 1415 #if defined(CONFIG_DRM_AMD_DC_DCN) 1416 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { 1417 struct dc_phy_addr_space_config pa_config; 1418 1419 mmhub_read_system_context(adev, &pa_config); 1420 1421 // Call the DC init_memory func 1422 dc_setup_system_context(adev->dm.dc, &pa_config); 1423 } 1424 #endif 1425 1426 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 1427 if (!adev->dm.freesync_module) { 1428 DRM_ERROR( 1429 "amdgpu: failed to initialize freesync_module.\n"); 1430 } else 1431 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", 1432 adev->dm.freesync_module); 1433 1434 amdgpu_dm_init_color_mod(); 1435 1436 #if defined(CONFIG_DRM_AMD_DC_DCN) 1437 if (adev->dm.dc->caps.max_links > 0) { 1438 adev->dm.vblank_control_workqueue = 1439 create_singlethread_workqueue("dm_vblank_control_workqueue"); 1440 if (!adev->dm.vblank_control_workqueue) 1441 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); 1442 } 1443 #endif 1444 1445 #ifdef CONFIG_DRM_AMD_DC_HDCP 1446 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) { 1447 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); 1448 1449 if (!adev->dm.hdcp_workqueue) 1450 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); 1451 else 1452 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); 1453 1454 dc_init_callbacks(adev->dm.dc, &init_params); 1455 } 1456 #endif 1457 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1458 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); 1459 #endif 1460 if (dc_enable_dmub_notifications(adev->dm.dc)) { 1461 init_completion(&adev->dm.dmub_aux_transfer_done); 1462 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); 1463 if (!adev->dm.dmub_notify) { 1464 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); 1465 goto error; 1466 } 1467 1468 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); 1469 if (!adev->dm.delayed_hpd_wq) { 1470 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); 1471 goto error; 1472 } 1473 1474 amdgpu_dm_outbox_init(adev); 1475 #if defined(CONFIG_DRM_AMD_DC_DCN) 1476 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, 1477 dmub_aux_setconfig_callback, false)) { 1478 DRM_ERROR("amdgpu: fail to register dmub aux callback"); 1479 goto error; 1480 } 1481 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { 1482 DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 1483 goto error; 1484 } 1485 #endif 1486 } 1487 1488 if (amdgpu_dm_initialize_drm_device(adev)) { 1489 DRM_ERROR( 1490 "amdgpu: failed to initialize sw for display support.\n"); 1491 goto error; 1492 } 1493 1494 /* create fake encoders for MST */ 1495 dm_dp_create_fake_mst_encoders(adev); 1496 1497 /* TODO: Add_display_info? */ 1498 1499 /* TODO use dynamic cursor width */ 1500 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; 1501 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; 1502 1503 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { 1504 DRM_ERROR( 1505 "amdgpu: failed to initialize sw for display support.\n"); 1506 goto error; 1507 } 1508 1509 1510 DRM_DEBUG_DRIVER("KMS initialized.\n"); 1511 1512 return 0; 1513 error: 1514 amdgpu_dm_fini(adev); 1515 1516 return -EINVAL; 1517 } 1518 1519 static int amdgpu_dm_early_fini(void *handle) 1520 { 1521 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1522 1523 amdgpu_dm_audio_fini(adev); 1524 1525 return 0; 1526 } 1527 1528 static void amdgpu_dm_fini(struct amdgpu_device *adev) 1529 { 1530 int i; 1531 1532 #if defined(CONFIG_DRM_AMD_DC_DCN) 1533 if (adev->dm.vblank_control_workqueue) { 1534 destroy_workqueue(adev->dm.vblank_control_workqueue); 1535 adev->dm.vblank_control_workqueue = NULL; 1536 } 1537 #endif 1538 1539 for (i = 0; i < adev->dm.display_indexes_num; i++) { 1540 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); 1541 } 1542 1543 amdgpu_dm_destroy_drm_device(&adev->dm); 1544 1545 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1546 if (adev->dm.crc_rd_wrk) { 1547 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); 1548 kfree(adev->dm.crc_rd_wrk); 1549 adev->dm.crc_rd_wrk = NULL; 1550 } 1551 #endif 1552 #ifdef CONFIG_DRM_AMD_DC_HDCP 1553 if (adev->dm.hdcp_workqueue) { 1554 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); 1555 adev->dm.hdcp_workqueue = NULL; 1556 } 1557 1558 if (adev->dm.dc) 1559 dc_deinit_callbacks(adev->dm.dc); 1560 #endif 1561 1562 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 1563 1564 if (dc_enable_dmub_notifications(adev->dm.dc)) { 1565 kfree(adev->dm.dmub_notify); 1566 adev->dm.dmub_notify = NULL; 1567 destroy_workqueue(adev->dm.delayed_hpd_wq); 1568 adev->dm.delayed_hpd_wq = NULL; 1569 } 1570 1571 if (adev->dm.dmub_bo) 1572 amdgpu_bo_free_kernel(&adev->dm.dmub_bo, 1573 &adev->dm.dmub_bo_gpu_addr, 1574 &adev->dm.dmub_bo_cpu_addr); 1575 1576 if (adev->dm.hpd_rx_offload_wq) { 1577 for (i = 0; i < adev->dm.dc->caps.max_links; i++) { 1578 if (adev->dm.hpd_rx_offload_wq[i].wq) { 1579 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); 1580 adev->dm.hpd_rx_offload_wq[i].wq = NULL; 1581 } 1582 } 1583 1584 kfree(adev->dm.hpd_rx_offload_wq); 1585 adev->dm.hpd_rx_offload_wq = NULL; 1586 } 1587 1588 /* DC Destroy TODO: Replace destroy DAL */ 1589 if (adev->dm.dc) 1590 dc_destroy(&adev->dm.dc); 1591 /* 1592 * TODO: pageflip, vlank interrupt 1593 * 1594 * amdgpu_dm_irq_fini(adev); 1595 */ 1596 1597 if (adev->dm.cgs_device) { 1598 amdgpu_cgs_destroy_device(adev->dm.cgs_device); 1599 adev->dm.cgs_device = NULL; 1600 } 1601 if (adev->dm.freesync_module) { 1602 mod_freesync_destroy(adev->dm.freesync_module); 1603 adev->dm.freesync_module = NULL; 1604 } 1605 1606 mutex_destroy(&adev->dm.audio_lock); 1607 mutex_destroy(&adev->dm.dc_lock); 1608 1609 return; 1610 } 1611 1612 static int load_dmcu_fw(struct amdgpu_device *adev) 1613 { 1614 const char *fw_name_dmcu = NULL; 1615 int r; 1616 const struct dmcu_firmware_header_v1_0 *hdr; 1617 1618 switch(adev->asic_type) { 1619 #if defined(CONFIG_DRM_AMD_DC_SI) 1620 case CHIP_TAHITI: 1621 case CHIP_PITCAIRN: 1622 case CHIP_VERDE: 1623 case CHIP_OLAND: 1624 #endif 1625 case CHIP_BONAIRE: 1626 case CHIP_HAWAII: 1627 case CHIP_KAVERI: 1628 case CHIP_KABINI: 1629 case CHIP_MULLINS: 1630 case CHIP_TONGA: 1631 case CHIP_FIJI: 1632 case CHIP_CARRIZO: 1633 case CHIP_STONEY: 1634 case CHIP_POLARIS11: 1635 case CHIP_POLARIS10: 1636 case CHIP_POLARIS12: 1637 case CHIP_VEGAM: 1638 case CHIP_VEGA10: 1639 case CHIP_VEGA12: 1640 case CHIP_VEGA20: 1641 case CHIP_NAVI10: 1642 case CHIP_NAVI14: 1643 case CHIP_RENOIR: 1644 case CHIP_SIENNA_CICHLID: 1645 case CHIP_NAVY_FLOUNDER: 1646 case CHIP_DIMGREY_CAVEFISH: 1647 case CHIP_BEIGE_GOBY: 1648 case CHIP_VANGOGH: 1649 case CHIP_YELLOW_CARP: 1650 return 0; 1651 case CHIP_NAVI12: 1652 fw_name_dmcu = FIRMWARE_NAVI12_DMCU; 1653 break; 1654 case CHIP_RAVEN: 1655 if (ASICREV_IS_PICASSO(adev->external_rev_id)) 1656 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1657 else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) 1658 fw_name_dmcu = FIRMWARE_RAVEN_DMCU; 1659 else 1660 return 0; 1661 break; 1662 default: 1663 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 1664 return -EINVAL; 1665 } 1666 1667 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 1668 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); 1669 return 0; 1670 } 1671 1672 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev); 1673 if (r == -ENOENT) { 1674 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ 1675 DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); 1676 adev->dm.fw_dmcu = NULL; 1677 return 0; 1678 } 1679 if (r) { 1680 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n", 1681 fw_name_dmcu); 1682 return r; 1683 } 1684 1685 r = amdgpu_ucode_validate(adev->dm.fw_dmcu); 1686 if (r) { 1687 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", 1688 fw_name_dmcu); 1689 release_firmware(adev->dm.fw_dmcu); 1690 adev->dm.fw_dmcu = NULL; 1691 return r; 1692 } 1693 1694 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; 1695 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; 1696 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; 1697 adev->firmware.fw_size += 1698 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 1699 1700 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; 1701 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; 1702 adev->firmware.fw_size += 1703 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); 1704 1705 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); 1706 1707 DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); 1708 1709 return 0; 1710 } 1711 1712 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) 1713 { 1714 struct amdgpu_device *adev = ctx; 1715 1716 return dm_read_reg(adev->dm.dc->ctx, address); 1717 } 1718 1719 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, 1720 uint32_t value) 1721 { 1722 struct amdgpu_device *adev = ctx; 1723 1724 return dm_write_reg(adev->dm.dc->ctx, address, value); 1725 } 1726 1727 static int dm_dmub_sw_init(struct amdgpu_device *adev) 1728 { 1729 struct dmub_srv_create_params create_params; 1730 struct dmub_srv_region_params region_params; 1731 struct dmub_srv_region_info region_info; 1732 struct dmub_srv_fb_params fb_params; 1733 struct dmub_srv_fb_info *fb_info; 1734 struct dmub_srv *dmub_srv; 1735 const struct dmcub_firmware_header_v1_0 *hdr; 1736 const char *fw_name_dmub; 1737 enum dmub_asic dmub_asic; 1738 enum dmub_status status; 1739 int r; 1740 1741 switch (adev->asic_type) { 1742 case CHIP_RENOIR: 1743 dmub_asic = DMUB_ASIC_DCN21; 1744 fw_name_dmub = FIRMWARE_RENOIR_DMUB; 1745 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) 1746 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; 1747 break; 1748 case CHIP_SIENNA_CICHLID: 1749 dmub_asic = DMUB_ASIC_DCN30; 1750 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; 1751 break; 1752 case CHIP_NAVY_FLOUNDER: 1753 dmub_asic = DMUB_ASIC_DCN30; 1754 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; 1755 break; 1756 case CHIP_VANGOGH: 1757 dmub_asic = DMUB_ASIC_DCN301; 1758 fw_name_dmub = FIRMWARE_VANGOGH_DMUB; 1759 break; 1760 case CHIP_DIMGREY_CAVEFISH: 1761 dmub_asic = DMUB_ASIC_DCN302; 1762 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; 1763 break; 1764 case CHIP_BEIGE_GOBY: 1765 dmub_asic = DMUB_ASIC_DCN303; 1766 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; 1767 break; 1768 case CHIP_YELLOW_CARP: 1769 dmub_asic = DMUB_ASIC_DCN31; 1770 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; 1771 break; 1772 1773 default: 1774 /* ASIC doesn't support DMUB. */ 1775 return 0; 1776 } 1777 1778 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); 1779 if (r) { 1780 DRM_ERROR("DMUB firmware loading failed: %d\n", r); 1781 return 0; 1782 } 1783 1784 r = amdgpu_ucode_validate(adev->dm.dmub_fw); 1785 if (r) { 1786 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r); 1787 return 0; 1788 } 1789 1790 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; 1791 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); 1792 1793 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1794 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = 1795 AMDGPU_UCODE_ID_DMCUB; 1796 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = 1797 adev->dm.dmub_fw; 1798 adev->firmware.fw_size += 1799 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); 1800 1801 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", 1802 adev->dm.dmcub_fw_version); 1803 } 1804 1805 1806 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 1807 dmub_srv = adev->dm.dmub_srv; 1808 1809 if (!dmub_srv) { 1810 DRM_ERROR("Failed to allocate DMUB service!\n"); 1811 return -ENOMEM; 1812 } 1813 1814 memset(&create_params, 0, sizeof(create_params)); 1815 create_params.user_ctx = adev; 1816 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 1817 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 1818 create_params.asic = dmub_asic; 1819 1820 /* Create the DMUB service. */ 1821 status = dmub_srv_create(dmub_srv, &create_params); 1822 if (status != DMUB_STATUS_OK) { 1823 DRM_ERROR("Error creating DMUB service: %d\n", status); 1824 return -EINVAL; 1825 } 1826 1827 /* Calculate the size of all the regions for the DMUB service. */ 1828 memset(®ion_params, 0, sizeof(region_params)); 1829 1830 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1831 PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1832 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1833 region_params.vbios_size = adev->bios_size; 1834 region_params.fw_bss_data = region_params.bss_data_size ? 1835 adev->dm.dmub_fw->data + 1836 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1837 le32_to_cpu(hdr->inst_const_bytes) : NULL; 1838 region_params.fw_inst_const = 1839 adev->dm.dmub_fw->data + 1840 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1841 PSP_HEADER_BYTES; 1842 1843 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, 1844 ®ion_info); 1845 1846 if (status != DMUB_STATUS_OK) { 1847 DRM_ERROR("Error calculating DMUB region info: %d\n", status); 1848 return -EINVAL; 1849 } 1850 1851 /* 1852 * Allocate a framebuffer based on the total size of all the regions. 1853 * TODO: Move this into GART. 1854 */ 1855 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 1856 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, 1857 &adev->dm.dmub_bo_gpu_addr, 1858 &adev->dm.dmub_bo_cpu_addr); 1859 if (r) 1860 return r; 1861 1862 /* Rebase the regions on the framebuffer address. */ 1863 memset(&fb_params, 0, sizeof(fb_params)); 1864 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; 1865 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; 1866 fb_params.region_info = ®ion_info; 1867 1868 adev->dm.dmub_fb_info = 1869 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); 1870 fb_info = adev->dm.dmub_fb_info; 1871 1872 if (!fb_info) { 1873 DRM_ERROR( 1874 "Failed to allocate framebuffer info for DMUB service!\n"); 1875 return -ENOMEM; 1876 } 1877 1878 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info); 1879 if (status != DMUB_STATUS_OK) { 1880 DRM_ERROR("Error calculating DMUB FB info: %d\n", status); 1881 return -EINVAL; 1882 } 1883 1884 return 0; 1885 } 1886 1887 static int dm_sw_init(void *handle) 1888 { 1889 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1890 int r; 1891 1892 r = dm_dmub_sw_init(adev); 1893 if (r) 1894 return r; 1895 1896 return load_dmcu_fw(adev); 1897 } 1898 1899 static int dm_sw_fini(void *handle) 1900 { 1901 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1902 1903 kfree(adev->dm.dmub_fb_info); 1904 adev->dm.dmub_fb_info = NULL; 1905 1906 if (adev->dm.dmub_srv) { 1907 dmub_srv_destroy(adev->dm.dmub_srv); 1908 adev->dm.dmub_srv = NULL; 1909 } 1910 1911 release_firmware(adev->dm.dmub_fw); 1912 adev->dm.dmub_fw = NULL; 1913 1914 release_firmware(adev->dm.fw_dmcu); 1915 adev->dm.fw_dmcu = NULL; 1916 1917 return 0; 1918 } 1919 1920 static int detect_mst_link_for_all_connectors(struct drm_device *dev) 1921 { 1922 struct amdgpu_dm_connector *aconnector; 1923 struct drm_connector *connector; 1924 struct drm_connector_list_iter iter; 1925 int ret = 0; 1926 1927 drm_connector_list_iter_begin(dev, &iter); 1928 drm_for_each_connector_iter(connector, &iter) { 1929 aconnector = to_amdgpu_dm_connector(connector); 1930 if (aconnector->dc_link->type == dc_connection_mst_branch && 1931 aconnector->mst_mgr.aux) { 1932 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", 1933 aconnector, 1934 aconnector->base.base.id); 1935 1936 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); 1937 if (ret < 0) { 1938 DRM_ERROR("DM_MST: Failed to start MST\n"); 1939 aconnector->dc_link->type = 1940 dc_connection_single; 1941 break; 1942 } 1943 } 1944 } 1945 drm_connector_list_iter_end(&iter); 1946 1947 return ret; 1948 } 1949 1950 static int dm_late_init(void *handle) 1951 { 1952 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1953 1954 struct dmcu_iram_parameters params; 1955 unsigned int linear_lut[16]; 1956 int i; 1957 struct dmcu *dmcu = NULL; 1958 1959 dmcu = adev->dm.dc->res_pool->dmcu; 1960 1961 for (i = 0; i < 16; i++) 1962 linear_lut[i] = 0xFFFF * i / 15; 1963 1964 params.set = 0; 1965 params.backlight_ramping_override = false; 1966 params.backlight_ramping_start = 0xCCCC; 1967 params.backlight_ramping_reduction = 0xCCCCCCCC; 1968 params.backlight_lut_array_size = 16; 1969 params.backlight_lut_array = linear_lut; 1970 1971 /* Min backlight level after ABM reduction, Don't allow below 1% 1972 * 0xFFFF x 0.01 = 0x28F 1973 */ 1974 params.min_abm_backlight = 0x28F; 1975 /* In the case where abm is implemented on dmcub, 1976 * dmcu object will be null. 1977 * ABM 2.4 and up are implemented on dmcub. 1978 */ 1979 if (dmcu) { 1980 if (!dmcu_load_iram(dmcu, params)) 1981 return -EINVAL; 1982 } else if (adev->dm.dc->ctx->dmub_srv) { 1983 struct dc_link *edp_links[MAX_NUM_EDP]; 1984 int edp_num; 1985 1986 get_edp_links(adev->dm.dc, edp_links, &edp_num); 1987 for (i = 0; i < edp_num; i++) { 1988 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) 1989 return -EINVAL; 1990 } 1991 } 1992 1993 return detect_mst_link_for_all_connectors(adev_to_drm(adev)); 1994 } 1995 1996 static void s3_handle_mst(struct drm_device *dev, bool suspend) 1997 { 1998 struct amdgpu_dm_connector *aconnector; 1999 struct drm_connector *connector; 2000 struct drm_connector_list_iter iter; 2001 struct drm_dp_mst_topology_mgr *mgr; 2002 int ret; 2003 bool need_hotplug = false; 2004 2005 drm_connector_list_iter_begin(dev, &iter); 2006 drm_for_each_connector_iter(connector, &iter) { 2007 aconnector = to_amdgpu_dm_connector(connector); 2008 if (aconnector->dc_link->type != dc_connection_mst_branch || 2009 aconnector->mst_port) 2010 continue; 2011 2012 mgr = &aconnector->mst_mgr; 2013 2014 if (suspend) { 2015 drm_dp_mst_topology_mgr_suspend(mgr); 2016 } else { 2017 ret = drm_dp_mst_topology_mgr_resume(mgr, true); 2018 if (ret < 0) { 2019 drm_dp_mst_topology_mgr_set_mst(mgr, false); 2020 need_hotplug = true; 2021 } 2022 } 2023 } 2024 drm_connector_list_iter_end(&iter); 2025 2026 if (need_hotplug) 2027 drm_kms_helper_hotplug_event(dev); 2028 } 2029 2030 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) 2031 { 2032 struct smu_context *smu = &adev->smu; 2033 int ret = 0; 2034 2035 if (!is_support_sw_smu(adev)) 2036 return 0; 2037 2038 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends 2039 * on window driver dc implementation. 2040 * For Navi1x, clock settings of dcn watermarks are fixed. the settings 2041 * should be passed to smu during boot up and resume from s3. 2042 * boot up: dc calculate dcn watermark clock settings within dc_create, 2043 * dcn20_resource_construct 2044 * then call pplib functions below to pass the settings to smu: 2045 * smu_set_watermarks_for_clock_ranges 2046 * smu_set_watermarks_table 2047 * navi10_set_watermarks_table 2048 * smu_write_watermarks_table 2049 * 2050 * For Renoir, clock settings of dcn watermark are also fixed values. 2051 * dc has implemented different flow for window driver: 2052 * dc_hardware_init / dc_set_power_state 2053 * dcn10_init_hw 2054 * notify_wm_ranges 2055 * set_wm_ranges 2056 * -- Linux 2057 * smu_set_watermarks_for_clock_ranges 2058 * renoir_set_watermarks_table 2059 * smu_write_watermarks_table 2060 * 2061 * For Linux, 2062 * dc_hardware_init -> amdgpu_dm_init 2063 * dc_set_power_state --> dm_resume 2064 * 2065 * therefore, this function apply to navi10/12/14 but not Renoir 2066 * * 2067 */ 2068 switch(adev->asic_type) { 2069 case CHIP_NAVI10: 2070 case CHIP_NAVI14: 2071 case CHIP_NAVI12: 2072 break; 2073 default: 2074 return 0; 2075 } 2076 2077 ret = smu_write_watermarks_table(smu); 2078 if (ret) { 2079 DRM_ERROR("Failed to update WMTABLE!\n"); 2080 return ret; 2081 } 2082 2083 return 0; 2084 } 2085 2086 /** 2087 * dm_hw_init() - Initialize DC device 2088 * @handle: The base driver device containing the amdgpu_dm device. 2089 * 2090 * Initialize the &struct amdgpu_display_manager device. This involves calling 2091 * the initializers of each DM component, then populating the struct with them. 2092 * 2093 * Although the function implies hardware initialization, both hardware and 2094 * software are initialized here. Splitting them out to their relevant init 2095 * hooks is a future TODO item. 2096 * 2097 * Some notable things that are initialized here: 2098 * 2099 * - Display Core, both software and hardware 2100 * - DC modules that we need (freesync and color management) 2101 * - DRM software states 2102 * - Interrupt sources and handlers 2103 * - Vblank support 2104 * - Debug FS entries, if enabled 2105 */ 2106 static int dm_hw_init(void *handle) 2107 { 2108 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2109 /* Create DAL display manager */ 2110 amdgpu_dm_init(adev); 2111 amdgpu_dm_hpd_init(adev); 2112 2113 return 0; 2114 } 2115 2116 /** 2117 * dm_hw_fini() - Teardown DC device 2118 * @handle: The base driver device containing the amdgpu_dm device. 2119 * 2120 * Teardown components within &struct amdgpu_display_manager that require 2121 * cleanup. This involves cleaning up the DRM device, DC, and any modules that 2122 * were loaded. Also flush IRQ workqueues and disable them. 2123 */ 2124 static int dm_hw_fini(void *handle) 2125 { 2126 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2127 2128 amdgpu_dm_hpd_fini(adev); 2129 2130 amdgpu_dm_irq_fini(adev); 2131 amdgpu_dm_fini(adev); 2132 return 0; 2133 } 2134 2135 2136 static int dm_enable_vblank(struct drm_crtc *crtc); 2137 static void dm_disable_vblank(struct drm_crtc *crtc); 2138 2139 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, 2140 struct dc_state *state, bool enable) 2141 { 2142 enum dc_irq_source irq_source; 2143 struct amdgpu_crtc *acrtc; 2144 int rc = -EBUSY; 2145 int i = 0; 2146 2147 for (i = 0; i < state->stream_count; i++) { 2148 acrtc = get_crtc_by_otg_inst( 2149 adev, state->stream_status[i].primary_otg_inst); 2150 2151 if (acrtc && state->stream_status[i].plane_count != 0) { 2152 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; 2153 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 2154 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", 2155 acrtc->crtc_id, enable ? "en" : "dis", rc); 2156 if (rc) 2157 DRM_WARN("Failed to %s pflip interrupts\n", 2158 enable ? "enable" : "disable"); 2159 2160 if (enable) { 2161 rc = dm_enable_vblank(&acrtc->base); 2162 if (rc) 2163 DRM_WARN("Failed to enable vblank interrupts\n"); 2164 } else { 2165 dm_disable_vblank(&acrtc->base); 2166 } 2167 2168 } 2169 } 2170 2171 } 2172 2173 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) 2174 { 2175 struct dc_state *context = NULL; 2176 enum dc_status res = DC_ERROR_UNEXPECTED; 2177 int i; 2178 struct dc_stream_state *del_streams[MAX_PIPES]; 2179 int del_streams_count = 0; 2180 2181 memset(del_streams, 0, sizeof(del_streams)); 2182 2183 context = dc_create_state(dc); 2184 if (context == NULL) 2185 goto context_alloc_fail; 2186 2187 dc_resource_state_copy_construct_current(dc, context); 2188 2189 /* First remove from context all streams */ 2190 for (i = 0; i < context->stream_count; i++) { 2191 struct dc_stream_state *stream = context->streams[i]; 2192 2193 del_streams[del_streams_count++] = stream; 2194 } 2195 2196 /* Remove all planes for removed streams and then remove the streams */ 2197 for (i = 0; i < del_streams_count; i++) { 2198 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { 2199 res = DC_FAIL_DETACH_SURFACES; 2200 goto fail; 2201 } 2202 2203 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); 2204 if (res != DC_OK) 2205 goto fail; 2206 } 2207 2208 2209 res = dc_validate_global_state(dc, context, false); 2210 2211 if (res != DC_OK) { 2212 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res); 2213 goto fail; 2214 } 2215 2216 res = dc_commit_state(dc, context); 2217 2218 fail: 2219 dc_release_state(context); 2220 2221 context_alloc_fail: 2222 return res; 2223 } 2224 2225 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) 2226 { 2227 int i; 2228 2229 if (dm->hpd_rx_offload_wq) { 2230 for (i = 0; i < dm->dc->caps.max_links; i++) 2231 flush_workqueue(dm->hpd_rx_offload_wq[i].wq); 2232 } 2233 } 2234 2235 static int dm_suspend(void *handle) 2236 { 2237 struct amdgpu_device *adev = handle; 2238 struct amdgpu_display_manager *dm = &adev->dm; 2239 int ret = 0; 2240 2241 if (amdgpu_in_reset(adev)) { 2242 mutex_lock(&dm->dc_lock); 2243 2244 #if defined(CONFIG_DRM_AMD_DC_DCN) 2245 dc_allow_idle_optimizations(adev->dm.dc, false); 2246 #endif 2247 2248 dm->cached_dc_state = dc_copy_state(dm->dc->current_state); 2249 2250 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); 2251 2252 amdgpu_dm_commit_zero_streams(dm->dc); 2253 2254 amdgpu_dm_irq_suspend(adev); 2255 2256 hpd_rx_irq_work_suspend(dm); 2257 2258 return ret; 2259 } 2260 2261 WARN_ON(adev->dm.cached_state); 2262 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); 2263 2264 s3_handle_mst(adev_to_drm(adev), true); 2265 2266 amdgpu_dm_irq_suspend(adev); 2267 2268 hpd_rx_irq_work_suspend(dm); 2269 2270 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); 2271 2272 return 0; 2273 } 2274 2275 static struct amdgpu_dm_connector * 2276 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, 2277 struct drm_crtc *crtc) 2278 { 2279 uint32_t i; 2280 struct drm_connector_state *new_con_state; 2281 struct drm_connector *connector; 2282 struct drm_crtc *crtc_from_state; 2283 2284 for_each_new_connector_in_state(state, connector, new_con_state, i) { 2285 crtc_from_state = new_con_state->crtc; 2286 2287 if (crtc_from_state == crtc) 2288 return to_amdgpu_dm_connector(connector); 2289 } 2290 2291 return NULL; 2292 } 2293 2294 static void emulated_link_detect(struct dc_link *link) 2295 { 2296 struct dc_sink_init_data sink_init_data = { 0 }; 2297 struct display_sink_capability sink_caps = { 0 }; 2298 enum dc_edid_status edid_status; 2299 struct dc_context *dc_ctx = link->ctx; 2300 struct dc_sink *sink = NULL; 2301 struct dc_sink *prev_sink = NULL; 2302 2303 link->type = dc_connection_none; 2304 prev_sink = link->local_sink; 2305 2306 if (prev_sink) 2307 dc_sink_release(prev_sink); 2308 2309 switch (link->connector_signal) { 2310 case SIGNAL_TYPE_HDMI_TYPE_A: { 2311 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2312 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; 2313 break; 2314 } 2315 2316 case SIGNAL_TYPE_DVI_SINGLE_LINK: { 2317 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2318 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; 2319 break; 2320 } 2321 2322 case SIGNAL_TYPE_DVI_DUAL_LINK: { 2323 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2324 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; 2325 break; 2326 } 2327 2328 case SIGNAL_TYPE_LVDS: { 2329 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; 2330 sink_caps.signal = SIGNAL_TYPE_LVDS; 2331 break; 2332 } 2333 2334 case SIGNAL_TYPE_EDP: { 2335 sink_caps.transaction_type = 2336 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2337 sink_caps.signal = SIGNAL_TYPE_EDP; 2338 break; 2339 } 2340 2341 case SIGNAL_TYPE_DISPLAY_PORT: { 2342 sink_caps.transaction_type = 2343 DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 2344 sink_caps.signal = SIGNAL_TYPE_VIRTUAL; 2345 break; 2346 } 2347 2348 default: 2349 DC_ERROR("Invalid connector type! signal:%d\n", 2350 link->connector_signal); 2351 return; 2352 } 2353 2354 sink_init_data.link = link; 2355 sink_init_data.sink_signal = sink_caps.signal; 2356 2357 sink = dc_sink_create(&sink_init_data); 2358 if (!sink) { 2359 DC_ERROR("Failed to create sink!\n"); 2360 return; 2361 } 2362 2363 /* dc_sink_create returns a new reference */ 2364 link->local_sink = sink; 2365 2366 edid_status = dm_helpers_read_local_edid( 2367 link->ctx, 2368 link, 2369 sink); 2370 2371 if (edid_status != EDID_OK) 2372 DC_ERROR("Failed to read EDID"); 2373 2374 } 2375 2376 static void dm_gpureset_commit_state(struct dc_state *dc_state, 2377 struct amdgpu_display_manager *dm) 2378 { 2379 struct { 2380 struct dc_surface_update surface_updates[MAX_SURFACES]; 2381 struct dc_plane_info plane_infos[MAX_SURFACES]; 2382 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 2383 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 2384 struct dc_stream_update stream_update; 2385 } * bundle; 2386 int k, m; 2387 2388 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 2389 2390 if (!bundle) { 2391 dm_error("Failed to allocate update bundle\n"); 2392 goto cleanup; 2393 } 2394 2395 for (k = 0; k < dc_state->stream_count; k++) { 2396 bundle->stream_update.stream = dc_state->streams[k]; 2397 2398 for (m = 0; m < dc_state->stream_status->plane_count; m++) { 2399 bundle->surface_updates[m].surface = 2400 dc_state->stream_status->plane_states[m]; 2401 bundle->surface_updates[m].surface->force_full_update = 2402 true; 2403 } 2404 dc_commit_updates_for_stream( 2405 dm->dc, bundle->surface_updates, 2406 dc_state->stream_status->plane_count, 2407 dc_state->streams[k], &bundle->stream_update, dc_state); 2408 } 2409 2410 cleanup: 2411 kfree(bundle); 2412 2413 return; 2414 } 2415 2416 static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state) 2417 { 2418 struct dc_stream_state *stream_state; 2419 struct amdgpu_dm_connector *aconnector = link->priv; 2420 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 2421 struct dc_stream_update stream_update; 2422 bool dpms_off = true; 2423 2424 memset(&stream_update, 0, sizeof(stream_update)); 2425 stream_update.dpms_off = &dpms_off; 2426 2427 mutex_lock(&adev->dm.dc_lock); 2428 stream_state = dc_stream_find_from_link(link); 2429 2430 if (stream_state == NULL) { 2431 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n"); 2432 mutex_unlock(&adev->dm.dc_lock); 2433 return; 2434 } 2435 2436 stream_update.stream = stream_state; 2437 acrtc_state->force_dpms_off = true; 2438 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0, 2439 stream_state, &stream_update, 2440 stream_state->ctx->dc->current_state); 2441 mutex_unlock(&adev->dm.dc_lock); 2442 } 2443 2444 static int dm_resume(void *handle) 2445 { 2446 struct amdgpu_device *adev = handle; 2447 struct drm_device *ddev = adev_to_drm(adev); 2448 struct amdgpu_display_manager *dm = &adev->dm; 2449 struct amdgpu_dm_connector *aconnector; 2450 struct drm_connector *connector; 2451 struct drm_connector_list_iter iter; 2452 struct drm_crtc *crtc; 2453 struct drm_crtc_state *new_crtc_state; 2454 struct dm_crtc_state *dm_new_crtc_state; 2455 struct drm_plane *plane; 2456 struct drm_plane_state *new_plane_state; 2457 struct dm_plane_state *dm_new_plane_state; 2458 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 2459 enum dc_connection_type new_connection_type = dc_connection_none; 2460 struct dc_state *dc_state; 2461 int i, r, j; 2462 2463 if (amdgpu_in_reset(adev)) { 2464 dc_state = dm->cached_dc_state; 2465 2466 r = dm_dmub_hw_init(adev); 2467 if (r) 2468 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 2469 2470 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2471 dc_resume(dm->dc); 2472 2473 amdgpu_dm_irq_resume_early(adev); 2474 2475 for (i = 0; i < dc_state->stream_count; i++) { 2476 dc_state->streams[i]->mode_changed = true; 2477 for (j = 0; j < dc_state->stream_status->plane_count; j++) { 2478 dc_state->stream_status->plane_states[j]->update_flags.raw 2479 = 0xffffffff; 2480 } 2481 } 2482 #if defined(CONFIG_DRM_AMD_DC_DCN) 2483 /* 2484 * Resource allocation happens for link encoders for newer ASIC in 2485 * dc_validate_global_state, so we need to revalidate it. 2486 * 2487 * This shouldn't fail (it passed once before), so warn if it does. 2488 */ 2489 WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK); 2490 #endif 2491 2492 WARN_ON(!dc_commit_state(dm->dc, dc_state)); 2493 2494 dm_gpureset_commit_state(dm->cached_dc_state, dm); 2495 2496 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); 2497 2498 dc_release_state(dm->cached_dc_state); 2499 dm->cached_dc_state = NULL; 2500 2501 amdgpu_dm_irq_resume_late(adev); 2502 2503 mutex_unlock(&dm->dc_lock); 2504 2505 return 0; 2506 } 2507 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 2508 dc_release_state(dm_state->context); 2509 dm_state->context = dc_create_state(dm->dc); 2510 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 2511 dc_resource_state_construct(dm->dc, dm_state->context); 2512 2513 /* Before powering on DC we need to re-initialize DMUB. */ 2514 r = dm_dmub_hw_init(adev); 2515 if (r) 2516 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 2517 2518 /* power on hardware */ 2519 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 2520 2521 /* program HPD filter */ 2522 dc_resume(dm->dc); 2523 2524 /* 2525 * early enable HPD Rx IRQ, should be done before set mode as short 2526 * pulse interrupts are used for MST 2527 */ 2528 amdgpu_dm_irq_resume_early(adev); 2529 2530 /* On resume we need to rewrite the MSTM control bits to enable MST*/ 2531 s3_handle_mst(ddev, false); 2532 2533 /* Do detection*/ 2534 drm_connector_list_iter_begin(ddev, &iter); 2535 drm_for_each_connector_iter(connector, &iter) { 2536 aconnector = to_amdgpu_dm_connector(connector); 2537 2538 /* 2539 * this is the case when traversing through already created 2540 * MST connectors, should be skipped 2541 */ 2542 if (aconnector->mst_port) 2543 continue; 2544 2545 mutex_lock(&aconnector->hpd_lock); 2546 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) 2547 DRM_ERROR("KMS: Failed to detect connector\n"); 2548 2549 if (aconnector->base.force && new_connection_type == dc_connection_none) 2550 emulated_link_detect(aconnector->dc_link); 2551 else 2552 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 2553 2554 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 2555 aconnector->fake_enable = false; 2556 2557 if (aconnector->dc_sink) 2558 dc_sink_release(aconnector->dc_sink); 2559 aconnector->dc_sink = NULL; 2560 amdgpu_dm_update_connector_after_detect(aconnector); 2561 mutex_unlock(&aconnector->hpd_lock); 2562 } 2563 drm_connector_list_iter_end(&iter); 2564 2565 /* Force mode set in atomic commit */ 2566 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) 2567 new_crtc_state->active_changed = true; 2568 2569 /* 2570 * atomic_check is expected to create the dc states. We need to release 2571 * them here, since they were duplicated as part of the suspend 2572 * procedure. 2573 */ 2574 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { 2575 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 2576 if (dm_new_crtc_state->stream) { 2577 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); 2578 dc_stream_release(dm_new_crtc_state->stream); 2579 dm_new_crtc_state->stream = NULL; 2580 } 2581 } 2582 2583 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { 2584 dm_new_plane_state = to_dm_plane_state(new_plane_state); 2585 if (dm_new_plane_state->dc_state) { 2586 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); 2587 dc_plane_state_release(dm_new_plane_state->dc_state); 2588 dm_new_plane_state->dc_state = NULL; 2589 } 2590 } 2591 2592 drm_atomic_helper_resume(ddev, dm->cached_state); 2593 2594 dm->cached_state = NULL; 2595 2596 amdgpu_dm_irq_resume_late(adev); 2597 2598 amdgpu_dm_smu_write_watermarks_table(adev); 2599 2600 return 0; 2601 } 2602 2603 /** 2604 * DOC: DM Lifecycle 2605 * 2606 * DM (and consequently DC) is registered in the amdgpu base driver as a IP 2607 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to 2608 * the base driver's device list to be initialized and torn down accordingly. 2609 * 2610 * The functions to do so are provided as hooks in &struct amd_ip_funcs. 2611 */ 2612 2613 static const struct amd_ip_funcs amdgpu_dm_funcs = { 2614 .name = "dm", 2615 .early_init = dm_early_init, 2616 .late_init = dm_late_init, 2617 .sw_init = dm_sw_init, 2618 .sw_fini = dm_sw_fini, 2619 .early_fini = amdgpu_dm_early_fini, 2620 .hw_init = dm_hw_init, 2621 .hw_fini = dm_hw_fini, 2622 .suspend = dm_suspend, 2623 .resume = dm_resume, 2624 .is_idle = dm_is_idle, 2625 .wait_for_idle = dm_wait_for_idle, 2626 .check_soft_reset = dm_check_soft_reset, 2627 .soft_reset = dm_soft_reset, 2628 .set_clockgating_state = dm_set_clockgating_state, 2629 .set_powergating_state = dm_set_powergating_state, 2630 }; 2631 2632 const struct amdgpu_ip_block_version dm_ip_block = 2633 { 2634 .type = AMD_IP_BLOCK_TYPE_DCE, 2635 .major = 1, 2636 .minor = 0, 2637 .rev = 0, 2638 .funcs = &amdgpu_dm_funcs, 2639 }; 2640 2641 2642 /** 2643 * DOC: atomic 2644 * 2645 * *WIP* 2646 */ 2647 2648 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 2649 .fb_create = amdgpu_display_user_framebuffer_create, 2650 .get_format_info = amd_get_format_info, 2651 .output_poll_changed = drm_fb_helper_output_poll_changed, 2652 .atomic_check = amdgpu_dm_atomic_check, 2653 .atomic_commit = drm_atomic_helper_commit, 2654 }; 2655 2656 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 2657 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail 2658 }; 2659 2660 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) 2661 { 2662 u32 max_cll, min_cll, max, min, q, r; 2663 struct amdgpu_dm_backlight_caps *caps; 2664 struct amdgpu_display_manager *dm; 2665 struct drm_connector *conn_base; 2666 struct amdgpu_device *adev; 2667 struct dc_link *link = NULL; 2668 static const u8 pre_computed_values[] = { 2669 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69, 2670 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98}; 2671 int i; 2672 2673 if (!aconnector || !aconnector->dc_link) 2674 return; 2675 2676 link = aconnector->dc_link; 2677 if (link->connector_signal != SIGNAL_TYPE_EDP) 2678 return; 2679 2680 conn_base = &aconnector->base; 2681 adev = drm_to_adev(conn_base->dev); 2682 dm = &adev->dm; 2683 for (i = 0; i < dm->num_of_edps; i++) { 2684 if (link == dm->backlight_link[i]) 2685 break; 2686 } 2687 if (i >= dm->num_of_edps) 2688 return; 2689 caps = &dm->backlight_caps[i]; 2690 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; 2691 caps->aux_support = false; 2692 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; 2693 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; 2694 2695 if (caps->ext_caps->bits.oled == 1 /*|| 2696 caps->ext_caps->bits.sdr_aux_backlight_control == 1 || 2697 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/) 2698 caps->aux_support = true; 2699 2700 if (amdgpu_backlight == 0) 2701 caps->aux_support = false; 2702 else if (amdgpu_backlight == 1) 2703 caps->aux_support = true; 2704 2705 /* From the specification (CTA-861-G), for calculating the maximum 2706 * luminance we need to use: 2707 * Luminance = 50*2**(CV/32) 2708 * Where CV is a one-byte value. 2709 * For calculating this expression we may need float point precision; 2710 * to avoid this complexity level, we take advantage that CV is divided 2711 * by a constant. From the Euclids division algorithm, we know that CV 2712 * can be written as: CV = 32*q + r. Next, we replace CV in the 2713 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just 2714 * need to pre-compute the value of r/32. For pre-computing the values 2715 * We just used the following Ruby line: 2716 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round} 2717 * The results of the above expressions can be verified at 2718 * pre_computed_values. 2719 */ 2720 q = max_cll >> 5; 2721 r = max_cll % 32; 2722 max = (1 << q) * pre_computed_values[r]; 2723 2724 // min luminance: maxLum * (CV/255)^2 / 100 2725 q = DIV_ROUND_CLOSEST(min_cll, 255); 2726 min = max * DIV_ROUND_CLOSEST((q * q), 100); 2727 2728 caps->aux_max_input_signal = max; 2729 caps->aux_min_input_signal = min; 2730 } 2731 2732 void amdgpu_dm_update_connector_after_detect( 2733 struct amdgpu_dm_connector *aconnector) 2734 { 2735 struct drm_connector *connector = &aconnector->base; 2736 struct drm_device *dev = connector->dev; 2737 struct dc_sink *sink; 2738 2739 /* MST handled by drm_mst framework */ 2740 if (aconnector->mst_mgr.mst_state == true) 2741 return; 2742 2743 sink = aconnector->dc_link->local_sink; 2744 if (sink) 2745 dc_sink_retain(sink); 2746 2747 /* 2748 * Edid mgmt connector gets first update only in mode_valid hook and then 2749 * the connector sink is set to either fake or physical sink depends on link status. 2750 * Skip if already done during boot. 2751 */ 2752 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED 2753 && aconnector->dc_em_sink) { 2754 2755 /* 2756 * For S3 resume with headless use eml_sink to fake stream 2757 * because on resume connector->sink is set to NULL 2758 */ 2759 mutex_lock(&dev->mode_config.mutex); 2760 2761 if (sink) { 2762 if (aconnector->dc_sink) { 2763 amdgpu_dm_update_freesync_caps(connector, NULL); 2764 /* 2765 * retain and release below are used to 2766 * bump up refcount for sink because the link doesn't point 2767 * to it anymore after disconnect, so on next crtc to connector 2768 * reshuffle by UMD we will get into unwanted dc_sink release 2769 */ 2770 dc_sink_release(aconnector->dc_sink); 2771 } 2772 aconnector->dc_sink = sink; 2773 dc_sink_retain(aconnector->dc_sink); 2774 amdgpu_dm_update_freesync_caps(connector, 2775 aconnector->edid); 2776 } else { 2777 amdgpu_dm_update_freesync_caps(connector, NULL); 2778 if (!aconnector->dc_sink) { 2779 aconnector->dc_sink = aconnector->dc_em_sink; 2780 dc_sink_retain(aconnector->dc_sink); 2781 } 2782 } 2783 2784 mutex_unlock(&dev->mode_config.mutex); 2785 2786 if (sink) 2787 dc_sink_release(sink); 2788 return; 2789 } 2790 2791 /* 2792 * TODO: temporary guard to look for proper fix 2793 * if this sink is MST sink, we should not do anything 2794 */ 2795 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 2796 dc_sink_release(sink); 2797 return; 2798 } 2799 2800 if (aconnector->dc_sink == sink) { 2801 /* 2802 * We got a DP short pulse (Link Loss, DP CTS, etc...). 2803 * Do nothing!! 2804 */ 2805 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", 2806 aconnector->connector_id); 2807 if (sink) 2808 dc_sink_release(sink); 2809 return; 2810 } 2811 2812 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", 2813 aconnector->connector_id, aconnector->dc_sink, sink); 2814 2815 mutex_lock(&dev->mode_config.mutex); 2816 2817 /* 2818 * 1. Update status of the drm connector 2819 * 2. Send an event and let userspace tell us what to do 2820 */ 2821 if (sink) { 2822 /* 2823 * TODO: check if we still need the S3 mode update workaround. 2824 * If yes, put it here. 2825 */ 2826 if (aconnector->dc_sink) { 2827 amdgpu_dm_update_freesync_caps(connector, NULL); 2828 dc_sink_release(aconnector->dc_sink); 2829 } 2830 2831 aconnector->dc_sink = sink; 2832 dc_sink_retain(aconnector->dc_sink); 2833 if (sink->dc_edid.length == 0) { 2834 aconnector->edid = NULL; 2835 if (aconnector->dc_link->aux_mode) { 2836 drm_dp_cec_unset_edid( 2837 &aconnector->dm_dp_aux.aux); 2838 } 2839 } else { 2840 aconnector->edid = 2841 (struct edid *)sink->dc_edid.raw_edid; 2842 2843 drm_connector_update_edid_property(connector, 2844 aconnector->edid); 2845 if (aconnector->dc_link->aux_mode) 2846 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, 2847 aconnector->edid); 2848 } 2849 2850 amdgpu_dm_update_freesync_caps(connector, aconnector->edid); 2851 update_connector_ext_caps(aconnector); 2852 } else { 2853 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 2854 amdgpu_dm_update_freesync_caps(connector, NULL); 2855 drm_connector_update_edid_property(connector, NULL); 2856 aconnector->num_modes = 0; 2857 dc_sink_release(aconnector->dc_sink); 2858 aconnector->dc_sink = NULL; 2859 aconnector->edid = NULL; 2860 #ifdef CONFIG_DRM_AMD_DC_HDCP 2861 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ 2862 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 2863 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 2864 #endif 2865 } 2866 2867 mutex_unlock(&dev->mode_config.mutex); 2868 2869 update_subconnector_property(aconnector); 2870 2871 if (sink) 2872 dc_sink_release(sink); 2873 } 2874 2875 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) 2876 { 2877 struct drm_connector *connector = &aconnector->base; 2878 struct drm_device *dev = connector->dev; 2879 enum dc_connection_type new_connection_type = dc_connection_none; 2880 struct amdgpu_device *adev = drm_to_adev(dev); 2881 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 2882 struct dm_crtc_state *dm_crtc_state = NULL; 2883 2884 if (adev->dm.disable_hpd_irq) 2885 return; 2886 2887 if (dm_con_state->base.state && dm_con_state->base.crtc) 2888 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state( 2889 dm_con_state->base.state, 2890 dm_con_state->base.crtc)); 2891 /* 2892 * In case of failure or MST no need to update connector status or notify the OS 2893 * since (for MST case) MST does this in its own context. 2894 */ 2895 mutex_lock(&aconnector->hpd_lock); 2896 2897 #ifdef CONFIG_DRM_AMD_DC_HDCP 2898 if (adev->dm.hdcp_workqueue) { 2899 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 2900 dm_con_state->update_hdcp = true; 2901 } 2902 #endif 2903 if (aconnector->fake_enable) 2904 aconnector->fake_enable = false; 2905 2906 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) 2907 DRM_ERROR("KMS: Failed to detect connector\n"); 2908 2909 if (aconnector->base.force && new_connection_type == dc_connection_none) { 2910 emulated_link_detect(aconnector->dc_link); 2911 2912 2913 drm_modeset_lock_all(dev); 2914 dm_restore_drm_connector_state(dev, connector); 2915 drm_modeset_unlock_all(dev); 2916 2917 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 2918 drm_kms_helper_hotplug_event(dev); 2919 2920 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { 2921 if (new_connection_type == dc_connection_none && 2922 aconnector->dc_link->type == dc_connection_none && 2923 dm_crtc_state) 2924 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state); 2925 2926 amdgpu_dm_update_connector_after_detect(aconnector); 2927 2928 drm_modeset_lock_all(dev); 2929 dm_restore_drm_connector_state(dev, connector); 2930 drm_modeset_unlock_all(dev); 2931 2932 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) 2933 drm_kms_helper_hotplug_event(dev); 2934 } 2935 mutex_unlock(&aconnector->hpd_lock); 2936 2937 } 2938 2939 static void handle_hpd_irq(void *param) 2940 { 2941 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 2942 2943 handle_hpd_irq_helper(aconnector); 2944 2945 } 2946 2947 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector) 2948 { 2949 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; 2950 uint8_t dret; 2951 bool new_irq_handled = false; 2952 int dpcd_addr; 2953 int dpcd_bytes_to_read; 2954 2955 const int max_process_count = 30; 2956 int process_count = 0; 2957 2958 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); 2959 2960 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { 2961 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; 2962 /* DPCD 0x200 - 0x201 for downstream IRQ */ 2963 dpcd_addr = DP_SINK_COUNT; 2964 } else { 2965 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; 2966 /* DPCD 0x2002 - 0x2005 for downstream IRQ */ 2967 dpcd_addr = DP_SINK_COUNT_ESI; 2968 } 2969 2970 dret = drm_dp_dpcd_read( 2971 &aconnector->dm_dp_aux.aux, 2972 dpcd_addr, 2973 esi, 2974 dpcd_bytes_to_read); 2975 2976 while (dret == dpcd_bytes_to_read && 2977 process_count < max_process_count) { 2978 uint8_t retry; 2979 dret = 0; 2980 2981 process_count++; 2982 2983 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); 2984 /* handle HPD short pulse irq */ 2985 if (aconnector->mst_mgr.mst_state) 2986 drm_dp_mst_hpd_irq( 2987 &aconnector->mst_mgr, 2988 esi, 2989 &new_irq_handled); 2990 2991 if (new_irq_handled) { 2992 /* ACK at DPCD to notify down stream */ 2993 const int ack_dpcd_bytes_to_write = 2994 dpcd_bytes_to_read - 1; 2995 2996 for (retry = 0; retry < 3; retry++) { 2997 uint8_t wret; 2998 2999 wret = drm_dp_dpcd_write( 3000 &aconnector->dm_dp_aux.aux, 3001 dpcd_addr + 1, 3002 &esi[1], 3003 ack_dpcd_bytes_to_write); 3004 if (wret == ack_dpcd_bytes_to_write) 3005 break; 3006 } 3007 3008 /* check if there is new irq to be handled */ 3009 dret = drm_dp_dpcd_read( 3010 &aconnector->dm_dp_aux.aux, 3011 dpcd_addr, 3012 esi, 3013 dpcd_bytes_to_read); 3014 3015 new_irq_handled = false; 3016 } else { 3017 break; 3018 } 3019 } 3020 3021 if (process_count == max_process_count) 3022 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); 3023 } 3024 3025 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, 3026 union hpd_irq_data hpd_irq_data) 3027 { 3028 struct hpd_rx_irq_offload_work *offload_work = 3029 kzalloc(sizeof(*offload_work), GFP_KERNEL); 3030 3031 if (!offload_work) { 3032 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); 3033 return; 3034 } 3035 3036 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); 3037 offload_work->data = hpd_irq_data; 3038 offload_work->offload_wq = offload_wq; 3039 3040 queue_work(offload_wq->wq, &offload_work->work); 3041 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); 3042 } 3043 3044 static void handle_hpd_rx_irq(void *param) 3045 { 3046 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 3047 struct drm_connector *connector = &aconnector->base; 3048 struct drm_device *dev = connector->dev; 3049 struct dc_link *dc_link = aconnector->dc_link; 3050 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 3051 bool result = false; 3052 enum dc_connection_type new_connection_type = dc_connection_none; 3053 struct amdgpu_device *adev = drm_to_adev(dev); 3054 union hpd_irq_data hpd_irq_data; 3055 bool link_loss = false; 3056 bool has_left_work = false; 3057 int idx = aconnector->base.index; 3058 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; 3059 3060 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 3061 3062 if (adev->dm.disable_hpd_irq) 3063 return; 3064 3065 /* 3066 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio 3067 * conflict, after implement i2c helper, this mutex should be 3068 * retired. 3069 */ 3070 mutex_lock(&aconnector->hpd_lock); 3071 3072 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, 3073 &link_loss, true, &has_left_work); 3074 3075 if (!has_left_work) 3076 goto out; 3077 3078 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { 3079 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3080 goto out; 3081 } 3082 3083 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { 3084 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || 3085 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { 3086 dm_handle_mst_sideband_msg(aconnector); 3087 goto out; 3088 } 3089 3090 if (link_loss) { 3091 bool skip = false; 3092 3093 spin_lock(&offload_wq->offload_lock); 3094 skip = offload_wq->is_handling_link_loss; 3095 3096 if (!skip) 3097 offload_wq->is_handling_link_loss = true; 3098 3099 spin_unlock(&offload_wq->offload_lock); 3100 3101 if (!skip) 3102 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); 3103 3104 goto out; 3105 } 3106 } 3107 3108 out: 3109 if (result && !is_mst_root_connector) { 3110 /* Downstream Port status changed. */ 3111 if (!dc_link_detect_sink(dc_link, &new_connection_type)) 3112 DRM_ERROR("KMS: Failed to detect connector\n"); 3113 3114 if (aconnector->base.force && new_connection_type == dc_connection_none) { 3115 emulated_link_detect(dc_link); 3116 3117 if (aconnector->fake_enable) 3118 aconnector->fake_enable = false; 3119 3120 amdgpu_dm_update_connector_after_detect(aconnector); 3121 3122 3123 drm_modeset_lock_all(dev); 3124 dm_restore_drm_connector_state(dev, connector); 3125 drm_modeset_unlock_all(dev); 3126 3127 drm_kms_helper_hotplug_event(dev); 3128 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { 3129 3130 if (aconnector->fake_enable) 3131 aconnector->fake_enable = false; 3132 3133 amdgpu_dm_update_connector_after_detect(aconnector); 3134 3135 3136 drm_modeset_lock_all(dev); 3137 dm_restore_drm_connector_state(dev, connector); 3138 drm_modeset_unlock_all(dev); 3139 3140 drm_kms_helper_hotplug_event(dev); 3141 } 3142 } 3143 #ifdef CONFIG_DRM_AMD_DC_HDCP 3144 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { 3145 if (adev->dm.hdcp_workqueue) 3146 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); 3147 } 3148 #endif 3149 3150 if (dc_link->type != dc_connection_mst_branch) 3151 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); 3152 3153 mutex_unlock(&aconnector->hpd_lock); 3154 } 3155 3156 static void register_hpd_handlers(struct amdgpu_device *adev) 3157 { 3158 struct drm_device *dev = adev_to_drm(adev); 3159 struct drm_connector *connector; 3160 struct amdgpu_dm_connector *aconnector; 3161 const struct dc_link *dc_link; 3162 struct dc_interrupt_params int_params = {0}; 3163 3164 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3165 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3166 3167 list_for_each_entry(connector, 3168 &dev->mode_config.connector_list, head) { 3169 3170 aconnector = to_amdgpu_dm_connector(connector); 3171 dc_link = aconnector->dc_link; 3172 3173 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { 3174 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3175 int_params.irq_source = dc_link->irq_source_hpd; 3176 3177 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3178 handle_hpd_irq, 3179 (void *) aconnector); 3180 } 3181 3182 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { 3183 3184 /* Also register for DP short pulse (hpd_rx). */ 3185 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3186 int_params.irq_source = dc_link->irq_source_hpd_rx; 3187 3188 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3189 handle_hpd_rx_irq, 3190 (void *) aconnector); 3191 3192 if (adev->dm.hpd_rx_offload_wq) 3193 adev->dm.hpd_rx_offload_wq[connector->index].aconnector = 3194 aconnector; 3195 } 3196 } 3197 } 3198 3199 #if defined(CONFIG_DRM_AMD_DC_SI) 3200 /* Register IRQ sources and initialize IRQ callbacks */ 3201 static int dce60_register_irq_handlers(struct amdgpu_device *adev) 3202 { 3203 struct dc *dc = adev->dm.dc; 3204 struct common_irq_params *c_irq_params; 3205 struct dc_interrupt_params int_params = {0}; 3206 int r; 3207 int i; 3208 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3209 3210 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3211 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3212 3213 /* 3214 * Actions of amdgpu_irq_add_id(): 3215 * 1. Register a set() function with base driver. 3216 * Base driver will call set() function to enable/disable an 3217 * interrupt in DC hardware. 3218 * 2. Register amdgpu_dm_irq_handler(). 3219 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3220 * coming from DC hardware. 3221 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3222 * for acknowledging and handling. */ 3223 3224 /* Use VBLANK interrupt */ 3225 for (i = 0; i < adev->mode_info.num_crtc; i++) { 3226 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq); 3227 if (r) { 3228 DRM_ERROR("Failed to add crtc irq id!\n"); 3229 return r; 3230 } 3231 3232 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3233 int_params.irq_source = 3234 dc_interrupt_to_irq_source(dc, i+1 , 0); 3235 3236 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3237 3238 c_irq_params->adev = adev; 3239 c_irq_params->irq_src = int_params.irq_source; 3240 3241 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3242 dm_crtc_high_irq, c_irq_params); 3243 } 3244 3245 /* Use GRPH_PFLIP interrupt */ 3246 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 3247 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 3248 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 3249 if (r) { 3250 DRM_ERROR("Failed to add page flip irq id!\n"); 3251 return r; 3252 } 3253 3254 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3255 int_params.irq_source = 3256 dc_interrupt_to_irq_source(dc, i, 0); 3257 3258 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3259 3260 c_irq_params->adev = adev; 3261 c_irq_params->irq_src = int_params.irq_source; 3262 3263 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3264 dm_pflip_high_irq, c_irq_params); 3265 3266 } 3267 3268 /* HPD */ 3269 r = amdgpu_irq_add_id(adev, client_id, 3270 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 3271 if (r) { 3272 DRM_ERROR("Failed to add hpd irq id!\n"); 3273 return r; 3274 } 3275 3276 register_hpd_handlers(adev); 3277 3278 return 0; 3279 } 3280 #endif 3281 3282 /* Register IRQ sources and initialize IRQ callbacks */ 3283 static int dce110_register_irq_handlers(struct amdgpu_device *adev) 3284 { 3285 struct dc *dc = adev->dm.dc; 3286 struct common_irq_params *c_irq_params; 3287 struct dc_interrupt_params int_params = {0}; 3288 int r; 3289 int i; 3290 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; 3291 3292 if (adev->asic_type >= CHIP_VEGA10) 3293 client_id = SOC15_IH_CLIENTID_DCE; 3294 3295 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3296 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3297 3298 /* 3299 * Actions of amdgpu_irq_add_id(): 3300 * 1. Register a set() function with base driver. 3301 * Base driver will call set() function to enable/disable an 3302 * interrupt in DC hardware. 3303 * 2. Register amdgpu_dm_irq_handler(). 3304 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3305 * coming from DC hardware. 3306 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3307 * for acknowledging and handling. */ 3308 3309 /* Use VBLANK interrupt */ 3310 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { 3311 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); 3312 if (r) { 3313 DRM_ERROR("Failed to add crtc irq id!\n"); 3314 return r; 3315 } 3316 3317 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3318 int_params.irq_source = 3319 dc_interrupt_to_irq_source(dc, i, 0); 3320 3321 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3322 3323 c_irq_params->adev = adev; 3324 c_irq_params->irq_src = int_params.irq_source; 3325 3326 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3327 dm_crtc_high_irq, c_irq_params); 3328 } 3329 3330 /* Use VUPDATE interrupt */ 3331 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { 3332 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); 3333 if (r) { 3334 DRM_ERROR("Failed to add vupdate irq id!\n"); 3335 return r; 3336 } 3337 3338 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3339 int_params.irq_source = 3340 dc_interrupt_to_irq_source(dc, i, 0); 3341 3342 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3343 3344 c_irq_params->adev = adev; 3345 c_irq_params->irq_src = int_params.irq_source; 3346 3347 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3348 dm_vupdate_high_irq, c_irq_params); 3349 } 3350 3351 /* Use GRPH_PFLIP interrupt */ 3352 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; 3353 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { 3354 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); 3355 if (r) { 3356 DRM_ERROR("Failed to add page flip irq id!\n"); 3357 return r; 3358 } 3359 3360 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3361 int_params.irq_source = 3362 dc_interrupt_to_irq_source(dc, i, 0); 3363 3364 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3365 3366 c_irq_params->adev = adev; 3367 c_irq_params->irq_src = int_params.irq_source; 3368 3369 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3370 dm_pflip_high_irq, c_irq_params); 3371 3372 } 3373 3374 /* HPD */ 3375 r = amdgpu_irq_add_id(adev, client_id, 3376 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 3377 if (r) { 3378 DRM_ERROR("Failed to add hpd irq id!\n"); 3379 return r; 3380 } 3381 3382 register_hpd_handlers(adev); 3383 3384 return 0; 3385 } 3386 3387 #if defined(CONFIG_DRM_AMD_DC_DCN) 3388 /* Register IRQ sources and initialize IRQ callbacks */ 3389 static int dcn10_register_irq_handlers(struct amdgpu_device *adev) 3390 { 3391 struct dc *dc = adev->dm.dc; 3392 struct common_irq_params *c_irq_params; 3393 struct dc_interrupt_params int_params = {0}; 3394 int r; 3395 int i; 3396 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3397 static const unsigned int vrtl_int_srcid[] = { 3398 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, 3399 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, 3400 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, 3401 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, 3402 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, 3403 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL 3404 }; 3405 #endif 3406 3407 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3408 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3409 3410 /* 3411 * Actions of amdgpu_irq_add_id(): 3412 * 1. Register a set() function with base driver. 3413 * Base driver will call set() function to enable/disable an 3414 * interrupt in DC hardware. 3415 * 2. Register amdgpu_dm_irq_handler(). 3416 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts 3417 * coming from DC hardware. 3418 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC 3419 * for acknowledging and handling. 3420 */ 3421 3422 /* Use VSTARTUP interrupt */ 3423 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; 3424 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; 3425 i++) { 3426 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); 3427 3428 if (r) { 3429 DRM_ERROR("Failed to add crtc irq id!\n"); 3430 return r; 3431 } 3432 3433 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3434 int_params.irq_source = 3435 dc_interrupt_to_irq_source(dc, i, 0); 3436 3437 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; 3438 3439 c_irq_params->adev = adev; 3440 c_irq_params->irq_src = int_params.irq_source; 3441 3442 amdgpu_dm_irq_register_interrupt( 3443 adev, &int_params, dm_crtc_high_irq, c_irq_params); 3444 } 3445 3446 /* Use otg vertical line interrupt */ 3447 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 3448 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { 3449 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, 3450 vrtl_int_srcid[i], &adev->vline0_irq); 3451 3452 if (r) { 3453 DRM_ERROR("Failed to add vline0 irq id!\n"); 3454 return r; 3455 } 3456 3457 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3458 int_params.irq_source = 3459 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); 3460 3461 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) { 3462 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]); 3463 break; 3464 } 3465 3466 c_irq_params = &adev->dm.vline0_params[int_params.irq_source 3467 - DC_IRQ_SOURCE_DC1_VLINE0]; 3468 3469 c_irq_params->adev = adev; 3470 c_irq_params->irq_src = int_params.irq_source; 3471 3472 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3473 dm_dcn_vertical_interrupt0_high_irq, c_irq_params); 3474 } 3475 #endif 3476 3477 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to 3478 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx 3479 * to trigger at end of each vblank, regardless of state of the lock, 3480 * matching DCE behaviour. 3481 */ 3482 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; 3483 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; 3484 i++) { 3485 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); 3486 3487 if (r) { 3488 DRM_ERROR("Failed to add vupdate irq id!\n"); 3489 return r; 3490 } 3491 3492 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3493 int_params.irq_source = 3494 dc_interrupt_to_irq_source(dc, i, 0); 3495 3496 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; 3497 3498 c_irq_params->adev = adev; 3499 c_irq_params->irq_src = int_params.irq_source; 3500 3501 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3502 dm_vupdate_high_irq, c_irq_params); 3503 } 3504 3505 /* Use GRPH_PFLIP interrupt */ 3506 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; 3507 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; 3508 i++) { 3509 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); 3510 if (r) { 3511 DRM_ERROR("Failed to add page flip irq id!\n"); 3512 return r; 3513 } 3514 3515 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3516 int_params.irq_source = 3517 dc_interrupt_to_irq_source(dc, i, 0); 3518 3519 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; 3520 3521 c_irq_params->adev = adev; 3522 c_irq_params->irq_src = int_params.irq_source; 3523 3524 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3525 dm_pflip_high_irq, c_irq_params); 3526 3527 } 3528 3529 /* HPD */ 3530 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 3531 &adev->hpd_irq); 3532 if (r) { 3533 DRM_ERROR("Failed to add hpd irq id!\n"); 3534 return r; 3535 } 3536 3537 register_hpd_handlers(adev); 3538 3539 return 0; 3540 } 3541 /* Register Outbox IRQ sources and initialize IRQ callbacks */ 3542 static int register_outbox_irq_handlers(struct amdgpu_device *adev) 3543 { 3544 struct dc *dc = adev->dm.dc; 3545 struct common_irq_params *c_irq_params; 3546 struct dc_interrupt_params int_params = {0}; 3547 int r, i; 3548 3549 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3550 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3551 3552 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, 3553 &adev->dmub_outbox_irq); 3554 if (r) { 3555 DRM_ERROR("Failed to add outbox irq id!\n"); 3556 return r; 3557 } 3558 3559 if (dc->ctx->dmub_srv) { 3560 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; 3561 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3562 int_params.irq_source = 3563 dc_interrupt_to_irq_source(dc, i, 0); 3564 3565 c_irq_params = &adev->dm.dmub_outbox_params[0]; 3566 3567 c_irq_params->adev = adev; 3568 c_irq_params->irq_src = int_params.irq_source; 3569 3570 amdgpu_dm_irq_register_interrupt(adev, &int_params, 3571 dm_dmub_outbox1_low_irq, c_irq_params); 3572 } 3573 3574 return 0; 3575 } 3576 #endif 3577 3578 /* 3579 * Acquires the lock for the atomic state object and returns 3580 * the new atomic state. 3581 * 3582 * This should only be called during atomic check. 3583 */ 3584 static int dm_atomic_get_state(struct drm_atomic_state *state, 3585 struct dm_atomic_state **dm_state) 3586 { 3587 struct drm_device *dev = state->dev; 3588 struct amdgpu_device *adev = drm_to_adev(dev); 3589 struct amdgpu_display_manager *dm = &adev->dm; 3590 struct drm_private_state *priv_state; 3591 3592 if (*dm_state) 3593 return 0; 3594 3595 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); 3596 if (IS_ERR(priv_state)) 3597 return PTR_ERR(priv_state); 3598 3599 *dm_state = to_dm_atomic_state(priv_state); 3600 3601 return 0; 3602 } 3603 3604 static struct dm_atomic_state * 3605 dm_atomic_get_new_state(struct drm_atomic_state *state) 3606 { 3607 struct drm_device *dev = state->dev; 3608 struct amdgpu_device *adev = drm_to_adev(dev); 3609 struct amdgpu_display_manager *dm = &adev->dm; 3610 struct drm_private_obj *obj; 3611 struct drm_private_state *new_obj_state; 3612 int i; 3613 3614 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { 3615 if (obj->funcs == dm->atomic_obj.funcs) 3616 return to_dm_atomic_state(new_obj_state); 3617 } 3618 3619 return NULL; 3620 } 3621 3622 static struct drm_private_state * 3623 dm_atomic_duplicate_state(struct drm_private_obj *obj) 3624 { 3625 struct dm_atomic_state *old_state, *new_state; 3626 3627 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); 3628 if (!new_state) 3629 return NULL; 3630 3631 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); 3632 3633 old_state = to_dm_atomic_state(obj->state); 3634 3635 if (old_state && old_state->context) 3636 new_state->context = dc_copy_state(old_state->context); 3637 3638 if (!new_state->context) { 3639 kfree(new_state); 3640 return NULL; 3641 } 3642 3643 return &new_state->base; 3644 } 3645 3646 static void dm_atomic_destroy_state(struct drm_private_obj *obj, 3647 struct drm_private_state *state) 3648 { 3649 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 3650 3651 if (dm_state && dm_state->context) 3652 dc_release_state(dm_state->context); 3653 3654 kfree(dm_state); 3655 } 3656 3657 static struct drm_private_state_funcs dm_atomic_state_funcs = { 3658 .atomic_duplicate_state = dm_atomic_duplicate_state, 3659 .atomic_destroy_state = dm_atomic_destroy_state, 3660 }; 3661 3662 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 3663 { 3664 struct dm_atomic_state *state; 3665 int r; 3666 3667 adev->mode_info.mode_config_initialized = true; 3668 3669 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; 3670 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; 3671 3672 adev_to_drm(adev)->mode_config.max_width = 16384; 3673 adev_to_drm(adev)->mode_config.max_height = 16384; 3674 3675 adev_to_drm(adev)->mode_config.preferred_depth = 24; 3676 adev_to_drm(adev)->mode_config.prefer_shadow = 1; 3677 /* indicates support for immediate flip */ 3678 adev_to_drm(adev)->mode_config.async_page_flip = true; 3679 3680 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; 3681 3682 state = kzalloc(sizeof(*state), GFP_KERNEL); 3683 if (!state) 3684 return -ENOMEM; 3685 3686 state->context = dc_create_state(adev->dm.dc); 3687 if (!state->context) { 3688 kfree(state); 3689 return -ENOMEM; 3690 } 3691 3692 dc_resource_state_copy_construct_current(adev->dm.dc, state->context); 3693 3694 drm_atomic_private_obj_init(adev_to_drm(adev), 3695 &adev->dm.atomic_obj, 3696 &state->base, 3697 &dm_atomic_state_funcs); 3698 3699 r = amdgpu_display_modeset_create_props(adev); 3700 if (r) { 3701 dc_release_state(state->context); 3702 kfree(state); 3703 return r; 3704 } 3705 3706 r = amdgpu_dm_audio_init(adev); 3707 if (r) { 3708 dc_release_state(state->context); 3709 kfree(state); 3710 return r; 3711 } 3712 3713 return 0; 3714 } 3715 3716 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 3717 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 3718 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 3719 3720 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 3721 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 3722 3723 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, 3724 int bl_idx) 3725 { 3726 #if defined(CONFIG_ACPI) 3727 struct amdgpu_dm_backlight_caps caps; 3728 3729 memset(&caps, 0, sizeof(caps)); 3730 3731 if (dm->backlight_caps[bl_idx].caps_valid) 3732 return; 3733 3734 amdgpu_acpi_get_backlight_caps(&caps); 3735 if (caps.caps_valid) { 3736 dm->backlight_caps[bl_idx].caps_valid = true; 3737 if (caps.aux_support) 3738 return; 3739 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; 3740 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; 3741 } else { 3742 dm->backlight_caps[bl_idx].min_input_signal = 3743 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 3744 dm->backlight_caps[bl_idx].max_input_signal = 3745 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 3746 } 3747 #else 3748 if (dm->backlight_caps[bl_idx].aux_support) 3749 return; 3750 3751 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; 3752 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; 3753 #endif 3754 } 3755 3756 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, 3757 unsigned *min, unsigned *max) 3758 { 3759 if (!caps) 3760 return 0; 3761 3762 if (caps->aux_support) { 3763 // Firmware limits are in nits, DC API wants millinits. 3764 *max = 1000 * caps->aux_max_input_signal; 3765 *min = 1000 * caps->aux_min_input_signal; 3766 } else { 3767 // Firmware limits are 8-bit, PWM control is 16-bit. 3768 *max = 0x101 * caps->max_input_signal; 3769 *min = 0x101 * caps->min_input_signal; 3770 } 3771 return 1; 3772 } 3773 3774 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, 3775 uint32_t brightness) 3776 { 3777 unsigned min, max; 3778 3779 if (!get_brightness_range(caps, &min, &max)) 3780 return brightness; 3781 3782 // Rescale 0..255 to min..max 3783 return min + DIV_ROUND_CLOSEST((max - min) * brightness, 3784 AMDGPU_MAX_BL_LEVEL); 3785 } 3786 3787 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, 3788 uint32_t brightness) 3789 { 3790 unsigned min, max; 3791 3792 if (!get_brightness_range(caps, &min, &max)) 3793 return brightness; 3794 3795 if (brightness < min) 3796 return 0; 3797 // Rescale min..max to 0..255 3798 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), 3799 max - min); 3800 } 3801 3802 static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, 3803 int bl_idx, 3804 u32 user_brightness) 3805 { 3806 struct amdgpu_dm_backlight_caps caps; 3807 struct dc_link *link; 3808 u32 brightness; 3809 bool rc; 3810 3811 amdgpu_dm_update_backlight_caps(dm, bl_idx); 3812 caps = dm->backlight_caps[bl_idx]; 3813 3814 dm->brightness[bl_idx] = user_brightness; 3815 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); 3816 link = (struct dc_link *)dm->backlight_link[bl_idx]; 3817 3818 /* Change brightness based on AUX property */ 3819 if (caps.aux_support) { 3820 rc = dc_link_set_backlight_level_nits(link, true, brightness, 3821 AUX_BL_DEFAULT_TRANSITION_TIME_MS); 3822 if (!rc) 3823 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); 3824 } else { 3825 rc = dc_link_set_backlight_level(link, brightness, 0); 3826 if (!rc) 3827 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); 3828 } 3829 3830 return rc ? 0 : 1; 3831 } 3832 3833 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) 3834 { 3835 struct amdgpu_display_manager *dm = bl_get_data(bd); 3836 int i; 3837 3838 for (i = 0; i < dm->num_of_edps; i++) { 3839 if (bd == dm->backlight_dev[i]) 3840 break; 3841 } 3842 if (i >= AMDGPU_DM_MAX_NUM_EDP) 3843 i = 0; 3844 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); 3845 3846 return 0; 3847 } 3848 3849 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, 3850 int bl_idx) 3851 { 3852 struct amdgpu_dm_backlight_caps caps; 3853 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; 3854 3855 amdgpu_dm_update_backlight_caps(dm, bl_idx); 3856 caps = dm->backlight_caps[bl_idx]; 3857 3858 if (caps.aux_support) { 3859 u32 avg, peak; 3860 bool rc; 3861 3862 rc = dc_link_get_backlight_level_nits(link, &avg, &peak); 3863 if (!rc) 3864 return dm->brightness[bl_idx]; 3865 return convert_brightness_to_user(&caps, avg); 3866 } else { 3867 int ret = dc_link_get_backlight_level(link); 3868 3869 if (ret == DC_ERROR_UNEXPECTED) 3870 return dm->brightness[bl_idx]; 3871 return convert_brightness_to_user(&caps, ret); 3872 } 3873 } 3874 3875 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) 3876 { 3877 struct amdgpu_display_manager *dm = bl_get_data(bd); 3878 int i; 3879 3880 for (i = 0; i < dm->num_of_edps; i++) { 3881 if (bd == dm->backlight_dev[i]) 3882 break; 3883 } 3884 if (i >= AMDGPU_DM_MAX_NUM_EDP) 3885 i = 0; 3886 return amdgpu_dm_backlight_get_level(dm, i); 3887 } 3888 3889 static const struct backlight_ops amdgpu_dm_backlight_ops = { 3890 .options = BL_CORE_SUSPENDRESUME, 3891 .get_brightness = amdgpu_dm_backlight_get_brightness, 3892 .update_status = amdgpu_dm_backlight_update_status, 3893 }; 3894 3895 static void 3896 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) 3897 { 3898 char bl_name[16]; 3899 struct backlight_properties props = { 0 }; 3900 3901 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps); 3902 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL; 3903 3904 props.max_brightness = AMDGPU_MAX_BL_LEVEL; 3905 props.brightness = AMDGPU_MAX_BL_LEVEL; 3906 props.type = BACKLIGHT_RAW; 3907 3908 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", 3909 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps); 3910 3911 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name, 3912 adev_to_drm(dm->adev)->dev, 3913 dm, 3914 &amdgpu_dm_backlight_ops, 3915 &props); 3916 3917 if (IS_ERR(dm->backlight_dev[dm->num_of_edps])) 3918 DRM_ERROR("DM: Backlight registration failed!\n"); 3919 else 3920 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); 3921 } 3922 #endif 3923 3924 static int initialize_plane(struct amdgpu_display_manager *dm, 3925 struct amdgpu_mode_info *mode_info, int plane_id, 3926 enum drm_plane_type plane_type, 3927 const struct dc_plane_cap *plane_cap) 3928 { 3929 struct drm_plane *plane; 3930 unsigned long possible_crtcs; 3931 int ret = 0; 3932 3933 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); 3934 if (!plane) { 3935 DRM_ERROR("KMS: Failed to allocate plane\n"); 3936 return -ENOMEM; 3937 } 3938 plane->type = plane_type; 3939 3940 /* 3941 * HACK: IGT tests expect that the primary plane for a CRTC 3942 * can only have one possible CRTC. Only expose support for 3943 * any CRTC if they're not going to be used as a primary plane 3944 * for a CRTC - like overlay or underlay planes. 3945 */ 3946 possible_crtcs = 1 << plane_id; 3947 if (plane_id >= dm->dc->caps.max_streams) 3948 possible_crtcs = 0xff; 3949 3950 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); 3951 3952 if (ret) { 3953 DRM_ERROR("KMS: Failed to initialize plane\n"); 3954 kfree(plane); 3955 return ret; 3956 } 3957 3958 if (mode_info) 3959 mode_info->planes[plane_id] = plane; 3960 3961 return ret; 3962 } 3963 3964 3965 static void register_backlight_device(struct amdgpu_display_manager *dm, 3966 struct dc_link *link) 3967 { 3968 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 3969 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 3970 3971 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && 3972 link->type != dc_connection_none) { 3973 /* 3974 * Event if registration failed, we should continue with 3975 * DM initialization because not having a backlight control 3976 * is better then a black screen. 3977 */ 3978 if (!dm->backlight_dev[dm->num_of_edps]) 3979 amdgpu_dm_register_backlight_device(dm); 3980 3981 if (dm->backlight_dev[dm->num_of_edps]) { 3982 dm->backlight_link[dm->num_of_edps] = link; 3983 dm->num_of_edps++; 3984 } 3985 } 3986 #endif 3987 } 3988 3989 3990 /* 3991 * In this architecture, the association 3992 * connector -> encoder -> crtc 3993 * id not really requried. The crtc and connector will hold the 3994 * display_index as an abstraction to use with DAL component 3995 * 3996 * Returns 0 on success 3997 */ 3998 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) 3999 { 4000 struct amdgpu_display_manager *dm = &adev->dm; 4001 int32_t i; 4002 struct amdgpu_dm_connector *aconnector = NULL; 4003 struct amdgpu_encoder *aencoder = NULL; 4004 struct amdgpu_mode_info *mode_info = &adev->mode_info; 4005 uint32_t link_cnt; 4006 int32_t primary_planes; 4007 enum dc_connection_type new_connection_type = dc_connection_none; 4008 const struct dc_plane_cap *plane; 4009 4010 dm->display_indexes_num = dm->dc->caps.max_streams; 4011 /* Update the actual used number of crtc */ 4012 adev->mode_info.num_crtc = adev->dm.display_indexes_num; 4013 4014 link_cnt = dm->dc->caps.max_links; 4015 if (amdgpu_dm_mode_config_init(dm->adev)) { 4016 DRM_ERROR("DM: Failed to initialize mode config\n"); 4017 return -EINVAL; 4018 } 4019 4020 /* There is one primary plane per CRTC */ 4021 primary_planes = dm->dc->caps.max_streams; 4022 ASSERT(primary_planes <= AMDGPU_MAX_PLANES); 4023 4024 /* 4025 * Initialize primary planes, implicit planes for legacy IOCTLS. 4026 * Order is reversed to match iteration order in atomic check. 4027 */ 4028 for (i = (primary_planes - 1); i >= 0; i--) { 4029 plane = &dm->dc->caps.planes[i]; 4030 4031 if (initialize_plane(dm, mode_info, i, 4032 DRM_PLANE_TYPE_PRIMARY, plane)) { 4033 DRM_ERROR("KMS: Failed to initialize primary plane\n"); 4034 goto fail; 4035 } 4036 } 4037 4038 /* 4039 * Initialize overlay planes, index starting after primary planes. 4040 * These planes have a higher DRM index than the primary planes since 4041 * they should be considered as having a higher z-order. 4042 * Order is reversed to match iteration order in atomic check. 4043 * 4044 * Only support DCN for now, and only expose one so we don't encourage 4045 * userspace to use up all the pipes. 4046 */ 4047 for (i = 0; i < dm->dc->caps.max_planes; ++i) { 4048 struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; 4049 4050 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) 4051 continue; 4052 4053 if (!plane->blends_with_above || !plane->blends_with_below) 4054 continue; 4055 4056 if (!plane->pixel_format_support.argb8888) 4057 continue; 4058 4059 if (initialize_plane(dm, NULL, primary_planes + i, 4060 DRM_PLANE_TYPE_OVERLAY, plane)) { 4061 DRM_ERROR("KMS: Failed to initialize overlay plane\n"); 4062 goto fail; 4063 } 4064 4065 /* Only create one overlay plane. */ 4066 break; 4067 } 4068 4069 for (i = 0; i < dm->dc->caps.max_streams; i++) 4070 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { 4071 DRM_ERROR("KMS: Failed to initialize crtc\n"); 4072 goto fail; 4073 } 4074 4075 #if defined(CONFIG_DRM_AMD_DC_DCN) 4076 /* Use Outbox interrupt */ 4077 switch (adev->asic_type) { 4078 case CHIP_SIENNA_CICHLID: 4079 case CHIP_NAVY_FLOUNDER: 4080 case CHIP_YELLOW_CARP: 4081 case CHIP_RENOIR: 4082 if (register_outbox_irq_handlers(dm->adev)) { 4083 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4084 goto fail; 4085 } 4086 break; 4087 default: 4088 DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type); 4089 } 4090 #endif 4091 4092 /* loops over all connectors on the board */ 4093 for (i = 0; i < link_cnt; i++) { 4094 struct dc_link *link = NULL; 4095 4096 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { 4097 DRM_ERROR( 4098 "KMS: Cannot support more than %d display indexes\n", 4099 AMDGPU_DM_MAX_DISPLAY_INDEX); 4100 continue; 4101 } 4102 4103 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); 4104 if (!aconnector) 4105 goto fail; 4106 4107 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); 4108 if (!aencoder) 4109 goto fail; 4110 4111 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { 4112 DRM_ERROR("KMS: Failed to initialize encoder\n"); 4113 goto fail; 4114 } 4115 4116 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { 4117 DRM_ERROR("KMS: Failed to initialize connector\n"); 4118 goto fail; 4119 } 4120 4121 link = dc_get_link_at_index(dm->dc, i); 4122 4123 if (!dc_link_detect_sink(link, &new_connection_type)) 4124 DRM_ERROR("KMS: Failed to detect connector\n"); 4125 4126 if (aconnector->base.force && new_connection_type == dc_connection_none) { 4127 emulated_link_detect(link); 4128 amdgpu_dm_update_connector_after_detect(aconnector); 4129 4130 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { 4131 amdgpu_dm_update_connector_after_detect(aconnector); 4132 register_backlight_device(dm, link); 4133 if (amdgpu_dc_feature_mask & DC_PSR_MASK) 4134 amdgpu_dm_set_psr_caps(link); 4135 } 4136 4137 4138 } 4139 4140 /* Software is initialized. Now we can register interrupt handlers. */ 4141 switch (adev->asic_type) { 4142 #if defined(CONFIG_DRM_AMD_DC_SI) 4143 case CHIP_TAHITI: 4144 case CHIP_PITCAIRN: 4145 case CHIP_VERDE: 4146 case CHIP_OLAND: 4147 if (dce60_register_irq_handlers(dm->adev)) { 4148 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4149 goto fail; 4150 } 4151 break; 4152 #endif 4153 case CHIP_BONAIRE: 4154 case CHIP_HAWAII: 4155 case CHIP_KAVERI: 4156 case CHIP_KABINI: 4157 case CHIP_MULLINS: 4158 case CHIP_TONGA: 4159 case CHIP_FIJI: 4160 case CHIP_CARRIZO: 4161 case CHIP_STONEY: 4162 case CHIP_POLARIS11: 4163 case CHIP_POLARIS10: 4164 case CHIP_POLARIS12: 4165 case CHIP_VEGAM: 4166 case CHIP_VEGA10: 4167 case CHIP_VEGA12: 4168 case CHIP_VEGA20: 4169 if (dce110_register_irq_handlers(dm->adev)) { 4170 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4171 goto fail; 4172 } 4173 break; 4174 #if defined(CONFIG_DRM_AMD_DC_DCN) 4175 case CHIP_RAVEN: 4176 case CHIP_NAVI12: 4177 case CHIP_NAVI10: 4178 case CHIP_NAVI14: 4179 case CHIP_RENOIR: 4180 case CHIP_SIENNA_CICHLID: 4181 case CHIP_NAVY_FLOUNDER: 4182 case CHIP_DIMGREY_CAVEFISH: 4183 case CHIP_BEIGE_GOBY: 4184 case CHIP_VANGOGH: 4185 case CHIP_YELLOW_CARP: 4186 if (dcn10_register_irq_handlers(dm->adev)) { 4187 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4188 goto fail; 4189 } 4190 break; 4191 #endif 4192 default: 4193 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 4194 goto fail; 4195 } 4196 4197 return 0; 4198 fail: 4199 kfree(aencoder); 4200 kfree(aconnector); 4201 4202 return -EINVAL; 4203 } 4204 4205 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 4206 { 4207 drm_atomic_private_obj_fini(&dm->atomic_obj); 4208 return; 4209 } 4210 4211 /****************************************************************************** 4212 * amdgpu_display_funcs functions 4213 *****************************************************************************/ 4214 4215 /* 4216 * dm_bandwidth_update - program display watermarks 4217 * 4218 * @adev: amdgpu_device pointer 4219 * 4220 * Calculate and program the display watermarks and line buffer allocation. 4221 */ 4222 static void dm_bandwidth_update(struct amdgpu_device *adev) 4223 { 4224 /* TODO: implement later */ 4225 } 4226 4227 static const struct amdgpu_display_funcs dm_display_funcs = { 4228 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ 4229 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ 4230 .backlight_set_level = NULL, /* never called for DC */ 4231 .backlight_get_level = NULL, /* never called for DC */ 4232 .hpd_sense = NULL,/* called unconditionally */ 4233 .hpd_set_polarity = NULL, /* called unconditionally */ 4234 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ 4235 .page_flip_get_scanoutpos = 4236 dm_crtc_get_scanoutpos,/* called unconditionally */ 4237 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ 4238 .add_connector = NULL, /* VBIOS parsing. DAL does it. */ 4239 }; 4240 4241 #if defined(CONFIG_DEBUG_KERNEL_DC) 4242 4243 static ssize_t s3_debug_store(struct device *device, 4244 struct device_attribute *attr, 4245 const char *buf, 4246 size_t count) 4247 { 4248 int ret; 4249 int s3_state; 4250 struct drm_device *drm_dev = dev_get_drvdata(device); 4251 struct amdgpu_device *adev = drm_to_adev(drm_dev); 4252 4253 ret = kstrtoint(buf, 0, &s3_state); 4254 4255 if (ret == 0) { 4256 if (s3_state) { 4257 dm_resume(adev); 4258 drm_kms_helper_hotplug_event(adev_to_drm(adev)); 4259 } else 4260 dm_suspend(adev); 4261 } 4262 4263 return ret == 0 ? count : 0; 4264 } 4265 4266 DEVICE_ATTR_WO(s3_debug); 4267 4268 #endif 4269 4270 static int dm_early_init(void *handle) 4271 { 4272 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4273 4274 switch (adev->asic_type) { 4275 #if defined(CONFIG_DRM_AMD_DC_SI) 4276 case CHIP_TAHITI: 4277 case CHIP_PITCAIRN: 4278 case CHIP_VERDE: 4279 adev->mode_info.num_crtc = 6; 4280 adev->mode_info.num_hpd = 6; 4281 adev->mode_info.num_dig = 6; 4282 break; 4283 case CHIP_OLAND: 4284 adev->mode_info.num_crtc = 2; 4285 adev->mode_info.num_hpd = 2; 4286 adev->mode_info.num_dig = 2; 4287 break; 4288 #endif 4289 case CHIP_BONAIRE: 4290 case CHIP_HAWAII: 4291 adev->mode_info.num_crtc = 6; 4292 adev->mode_info.num_hpd = 6; 4293 adev->mode_info.num_dig = 6; 4294 break; 4295 case CHIP_KAVERI: 4296 adev->mode_info.num_crtc = 4; 4297 adev->mode_info.num_hpd = 6; 4298 adev->mode_info.num_dig = 7; 4299 break; 4300 case CHIP_KABINI: 4301 case CHIP_MULLINS: 4302 adev->mode_info.num_crtc = 2; 4303 adev->mode_info.num_hpd = 6; 4304 adev->mode_info.num_dig = 6; 4305 break; 4306 case CHIP_FIJI: 4307 case CHIP_TONGA: 4308 adev->mode_info.num_crtc = 6; 4309 adev->mode_info.num_hpd = 6; 4310 adev->mode_info.num_dig = 7; 4311 break; 4312 case CHIP_CARRIZO: 4313 adev->mode_info.num_crtc = 3; 4314 adev->mode_info.num_hpd = 6; 4315 adev->mode_info.num_dig = 9; 4316 break; 4317 case CHIP_STONEY: 4318 adev->mode_info.num_crtc = 2; 4319 adev->mode_info.num_hpd = 6; 4320 adev->mode_info.num_dig = 9; 4321 break; 4322 case CHIP_POLARIS11: 4323 case CHIP_POLARIS12: 4324 adev->mode_info.num_crtc = 5; 4325 adev->mode_info.num_hpd = 5; 4326 adev->mode_info.num_dig = 5; 4327 break; 4328 case CHIP_POLARIS10: 4329 case CHIP_VEGAM: 4330 adev->mode_info.num_crtc = 6; 4331 adev->mode_info.num_hpd = 6; 4332 adev->mode_info.num_dig = 6; 4333 break; 4334 case CHIP_VEGA10: 4335 case CHIP_VEGA12: 4336 case CHIP_VEGA20: 4337 adev->mode_info.num_crtc = 6; 4338 adev->mode_info.num_hpd = 6; 4339 adev->mode_info.num_dig = 6; 4340 break; 4341 #if defined(CONFIG_DRM_AMD_DC_DCN) 4342 case CHIP_RAVEN: 4343 case CHIP_RENOIR: 4344 case CHIP_VANGOGH: 4345 adev->mode_info.num_crtc = 4; 4346 adev->mode_info.num_hpd = 4; 4347 adev->mode_info.num_dig = 4; 4348 break; 4349 case CHIP_NAVI10: 4350 case CHIP_NAVI12: 4351 case CHIP_SIENNA_CICHLID: 4352 case CHIP_NAVY_FLOUNDER: 4353 adev->mode_info.num_crtc = 6; 4354 adev->mode_info.num_hpd = 6; 4355 adev->mode_info.num_dig = 6; 4356 break; 4357 case CHIP_YELLOW_CARP: 4358 adev->mode_info.num_crtc = 4; 4359 adev->mode_info.num_hpd = 4; 4360 adev->mode_info.num_dig = 4; 4361 break; 4362 case CHIP_NAVI14: 4363 case CHIP_DIMGREY_CAVEFISH: 4364 adev->mode_info.num_crtc = 5; 4365 adev->mode_info.num_hpd = 5; 4366 adev->mode_info.num_dig = 5; 4367 break; 4368 case CHIP_BEIGE_GOBY: 4369 adev->mode_info.num_crtc = 2; 4370 adev->mode_info.num_hpd = 2; 4371 adev->mode_info.num_dig = 2; 4372 break; 4373 #endif 4374 default: 4375 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); 4376 return -EINVAL; 4377 } 4378 4379 amdgpu_dm_set_irq_funcs(adev); 4380 4381 if (adev->mode_info.funcs == NULL) 4382 adev->mode_info.funcs = &dm_display_funcs; 4383 4384 /* 4385 * Note: Do NOT change adev->audio_endpt_rreg and 4386 * adev->audio_endpt_wreg because they are initialised in 4387 * amdgpu_device_init() 4388 */ 4389 #if defined(CONFIG_DEBUG_KERNEL_DC) 4390 device_create_file( 4391 adev_to_drm(adev)->dev, 4392 &dev_attr_s3_debug); 4393 #endif 4394 4395 return 0; 4396 } 4397 4398 static bool modeset_required(struct drm_crtc_state *crtc_state, 4399 struct dc_stream_state *new_stream, 4400 struct dc_stream_state *old_stream) 4401 { 4402 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 4403 } 4404 4405 static bool modereset_required(struct drm_crtc_state *crtc_state) 4406 { 4407 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); 4408 } 4409 4410 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 4411 { 4412 drm_encoder_cleanup(encoder); 4413 kfree(encoder); 4414 } 4415 4416 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { 4417 .destroy = amdgpu_dm_encoder_destroy, 4418 }; 4419 4420 4421 static void get_min_max_dc_plane_scaling(struct drm_device *dev, 4422 struct drm_framebuffer *fb, 4423 int *min_downscale, int *max_upscale) 4424 { 4425 struct amdgpu_device *adev = drm_to_adev(dev); 4426 struct dc *dc = adev->dm.dc; 4427 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */ 4428 struct dc_plane_cap *plane_cap = &dc->caps.planes[0]; 4429 4430 switch (fb->format->format) { 4431 case DRM_FORMAT_P010: 4432 case DRM_FORMAT_NV12: 4433 case DRM_FORMAT_NV21: 4434 *max_upscale = plane_cap->max_upscale_factor.nv12; 4435 *min_downscale = plane_cap->max_downscale_factor.nv12; 4436 break; 4437 4438 case DRM_FORMAT_XRGB16161616F: 4439 case DRM_FORMAT_ARGB16161616F: 4440 case DRM_FORMAT_XBGR16161616F: 4441 case DRM_FORMAT_ABGR16161616F: 4442 *max_upscale = plane_cap->max_upscale_factor.fp16; 4443 *min_downscale = plane_cap->max_downscale_factor.fp16; 4444 break; 4445 4446 default: 4447 *max_upscale = plane_cap->max_upscale_factor.argb8888; 4448 *min_downscale = plane_cap->max_downscale_factor.argb8888; 4449 break; 4450 } 4451 4452 /* 4453 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a 4454 * scaling factor of 1.0 == 1000 units. 4455 */ 4456 if (*max_upscale == 1) 4457 *max_upscale = 1000; 4458 4459 if (*min_downscale == 1) 4460 *min_downscale = 1000; 4461 } 4462 4463 4464 static int fill_dc_scaling_info(const struct drm_plane_state *state, 4465 struct dc_scaling_info *scaling_info) 4466 { 4467 int scale_w, scale_h, min_downscale, max_upscale; 4468 4469 memset(scaling_info, 0, sizeof(*scaling_info)); 4470 4471 /* Source is fixed 16.16 but we ignore mantissa for now... */ 4472 scaling_info->src_rect.x = state->src_x >> 16; 4473 scaling_info->src_rect.y = state->src_y >> 16; 4474 4475 /* 4476 * For reasons we don't (yet) fully understand a non-zero 4477 * src_y coordinate into an NV12 buffer can cause a 4478 * system hang. To avoid hangs (and maybe be overly cautious) 4479 * let's reject both non-zero src_x and src_y. 4480 * 4481 * We currently know of only one use-case to reproduce a 4482 * scenario with non-zero src_x and src_y for NV12, which 4483 * is to gesture the YouTube Android app into full screen 4484 * on ChromeOS. 4485 */ 4486 if (state->fb && 4487 state->fb->format->format == DRM_FORMAT_NV12 && 4488 (scaling_info->src_rect.x != 0 || 4489 scaling_info->src_rect.y != 0)) 4490 return -EINVAL; 4491 4492 scaling_info->src_rect.width = state->src_w >> 16; 4493 if (scaling_info->src_rect.width == 0) 4494 return -EINVAL; 4495 4496 scaling_info->src_rect.height = state->src_h >> 16; 4497 if (scaling_info->src_rect.height == 0) 4498 return -EINVAL; 4499 4500 scaling_info->dst_rect.x = state->crtc_x; 4501 scaling_info->dst_rect.y = state->crtc_y; 4502 4503 if (state->crtc_w == 0) 4504 return -EINVAL; 4505 4506 scaling_info->dst_rect.width = state->crtc_w; 4507 4508 if (state->crtc_h == 0) 4509 return -EINVAL; 4510 4511 scaling_info->dst_rect.height = state->crtc_h; 4512 4513 /* DRM doesn't specify clipping on destination output. */ 4514 scaling_info->clip_rect = scaling_info->dst_rect; 4515 4516 /* Validate scaling per-format with DC plane caps */ 4517 if (state->plane && state->plane->dev && state->fb) { 4518 get_min_max_dc_plane_scaling(state->plane->dev, state->fb, 4519 &min_downscale, &max_upscale); 4520 } else { 4521 min_downscale = 250; 4522 max_upscale = 16000; 4523 } 4524 4525 scale_w = scaling_info->dst_rect.width * 1000 / 4526 scaling_info->src_rect.width; 4527 4528 if (scale_w < min_downscale || scale_w > max_upscale) 4529 return -EINVAL; 4530 4531 scale_h = scaling_info->dst_rect.height * 1000 / 4532 scaling_info->src_rect.height; 4533 4534 if (scale_h < min_downscale || scale_h > max_upscale) 4535 return -EINVAL; 4536 4537 /* 4538 * The "scaling_quality" can be ignored for now, quality = 0 has DC 4539 * assume reasonable defaults based on the format. 4540 */ 4541 4542 return 0; 4543 } 4544 4545 static void 4546 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, 4547 uint64_t tiling_flags) 4548 { 4549 /* Fill GFX8 params */ 4550 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { 4551 unsigned int bankw, bankh, mtaspect, tile_split, num_banks; 4552 4553 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 4554 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 4555 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 4556 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 4557 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 4558 4559 /* XXX fix me for VI */ 4560 tiling_info->gfx8.num_banks = num_banks; 4561 tiling_info->gfx8.array_mode = 4562 DC_ARRAY_2D_TILED_THIN1; 4563 tiling_info->gfx8.tile_split = tile_split; 4564 tiling_info->gfx8.bank_width = bankw; 4565 tiling_info->gfx8.bank_height = bankh; 4566 tiling_info->gfx8.tile_aspect = mtaspect; 4567 tiling_info->gfx8.tile_mode = 4568 DC_ADDR_SURF_MICRO_TILING_DISPLAY; 4569 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) 4570 == DC_ARRAY_1D_TILED_THIN1) { 4571 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; 4572 } 4573 4574 tiling_info->gfx8.pipe_config = 4575 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 4576 } 4577 4578 static void 4579 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, 4580 union dc_tiling_info *tiling_info) 4581 { 4582 tiling_info->gfx9.num_pipes = 4583 adev->gfx.config.gb_addr_config_fields.num_pipes; 4584 tiling_info->gfx9.num_banks = 4585 adev->gfx.config.gb_addr_config_fields.num_banks; 4586 tiling_info->gfx9.pipe_interleave = 4587 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; 4588 tiling_info->gfx9.num_shader_engines = 4589 adev->gfx.config.gb_addr_config_fields.num_se; 4590 tiling_info->gfx9.max_compressed_frags = 4591 adev->gfx.config.gb_addr_config_fields.max_compress_frags; 4592 tiling_info->gfx9.num_rb_per_se = 4593 adev->gfx.config.gb_addr_config_fields.num_rb_per_se; 4594 tiling_info->gfx9.shaderEnable = 1; 4595 if (adev->asic_type == CHIP_SIENNA_CICHLID || 4596 adev->asic_type == CHIP_NAVY_FLOUNDER || 4597 adev->asic_type == CHIP_DIMGREY_CAVEFISH || 4598 adev->asic_type == CHIP_BEIGE_GOBY || 4599 adev->asic_type == CHIP_YELLOW_CARP || 4600 adev->asic_type == CHIP_VANGOGH) 4601 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; 4602 } 4603 4604 static int 4605 validate_dcc(struct amdgpu_device *adev, 4606 const enum surface_pixel_format format, 4607 const enum dc_rotation_angle rotation, 4608 const union dc_tiling_info *tiling_info, 4609 const struct dc_plane_dcc_param *dcc, 4610 const struct dc_plane_address *address, 4611 const struct plane_size *plane_size) 4612 { 4613 struct dc *dc = adev->dm.dc; 4614 struct dc_dcc_surface_param input; 4615 struct dc_surface_dcc_cap output; 4616 4617 memset(&input, 0, sizeof(input)); 4618 memset(&output, 0, sizeof(output)); 4619 4620 if (!dcc->enable) 4621 return 0; 4622 4623 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || 4624 !dc->cap_funcs.get_dcc_compression_cap) 4625 return -EINVAL; 4626 4627 input.format = format; 4628 input.surface_size.width = plane_size->surface_size.width; 4629 input.surface_size.height = plane_size->surface_size.height; 4630 input.swizzle_mode = tiling_info->gfx9.swizzle; 4631 4632 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) 4633 input.scan = SCAN_DIRECTION_HORIZONTAL; 4634 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) 4635 input.scan = SCAN_DIRECTION_VERTICAL; 4636 4637 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) 4638 return -EINVAL; 4639 4640 if (!output.capable) 4641 return -EINVAL; 4642 4643 if (dcc->independent_64b_blks == 0 && 4644 output.grph.rgb.independent_64b_blks != 0) 4645 return -EINVAL; 4646 4647 return 0; 4648 } 4649 4650 static bool 4651 modifier_has_dcc(uint64_t modifier) 4652 { 4653 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); 4654 } 4655 4656 static unsigned 4657 modifier_gfx9_swizzle_mode(uint64_t modifier) 4658 { 4659 if (modifier == DRM_FORMAT_MOD_LINEAR) 4660 return 0; 4661 4662 return AMD_FMT_MOD_GET(TILE, modifier); 4663 } 4664 4665 static const struct drm_format_info * 4666 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 4667 { 4668 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); 4669 } 4670 4671 static void 4672 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, 4673 union dc_tiling_info *tiling_info, 4674 uint64_t modifier) 4675 { 4676 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); 4677 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); 4678 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); 4679 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits); 4680 4681 fill_gfx9_tiling_info_from_device(adev, tiling_info); 4682 4683 if (!IS_AMD_FMT_MOD(modifier)) 4684 return; 4685 4686 tiling_info->gfx9.num_pipes = 1u << pipes_log2; 4687 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2); 4688 4689 if (adev->family >= AMDGPU_FAMILY_NV) { 4690 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2; 4691 } else { 4692 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits; 4693 4694 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */ 4695 } 4696 } 4697 4698 enum dm_micro_swizzle { 4699 MICRO_SWIZZLE_Z = 0, 4700 MICRO_SWIZZLE_S = 1, 4701 MICRO_SWIZZLE_D = 2, 4702 MICRO_SWIZZLE_R = 3 4703 }; 4704 4705 static bool dm_plane_format_mod_supported(struct drm_plane *plane, 4706 uint32_t format, 4707 uint64_t modifier) 4708 { 4709 struct amdgpu_device *adev = drm_to_adev(plane->dev); 4710 const struct drm_format_info *info = drm_format_info(format); 4711 int i; 4712 4713 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; 4714 4715 if (!info) 4716 return false; 4717 4718 /* 4719 * We always have to allow these modifiers: 4720 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers. 4721 * 2. Not passing any modifiers is the same as explicitly passing INVALID. 4722 */ 4723 if (modifier == DRM_FORMAT_MOD_LINEAR || 4724 modifier == DRM_FORMAT_MOD_INVALID) { 4725 return true; 4726 } 4727 4728 /* Check that the modifier is on the list of the plane's supported modifiers. */ 4729 for (i = 0; i < plane->modifier_count; i++) { 4730 if (modifier == plane->modifiers[i]) 4731 break; 4732 } 4733 if (i == plane->modifier_count) 4734 return false; 4735 4736 /* 4737 * For D swizzle the canonical modifier depends on the bpp, so check 4738 * it here. 4739 */ 4740 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && 4741 adev->family >= AMDGPU_FAMILY_NV) { 4742 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) 4743 return false; 4744 } 4745 4746 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && 4747 info->cpp[0] < 8) 4748 return false; 4749 4750 if (modifier_has_dcc(modifier)) { 4751 /* Per radeonsi comments 16/64 bpp are more complicated. */ 4752 if (info->cpp[0] != 4) 4753 return false; 4754 /* We support multi-planar formats, but not when combined with 4755 * additional DCC metadata planes. */ 4756 if (info->num_planes > 1) 4757 return false; 4758 } 4759 4760 return true; 4761 } 4762 4763 static void 4764 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) 4765 { 4766 if (!*mods) 4767 return; 4768 4769 if (*cap - *size < 1) { 4770 uint64_t new_cap = *cap * 2; 4771 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); 4772 4773 if (!new_mods) { 4774 kfree(*mods); 4775 *mods = NULL; 4776 return; 4777 } 4778 4779 memcpy(new_mods, *mods, sizeof(uint64_t) * *size); 4780 kfree(*mods); 4781 *mods = new_mods; 4782 *cap = new_cap; 4783 } 4784 4785 (*mods)[*size] = mod; 4786 *size += 1; 4787 } 4788 4789 static void 4790 add_gfx9_modifiers(const struct amdgpu_device *adev, 4791 uint64_t **mods, uint64_t *size, uint64_t *capacity) 4792 { 4793 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 4794 int pipe_xor_bits = min(8, pipes + 4795 ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); 4796 int bank_xor_bits = min(8 - pipe_xor_bits, 4797 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); 4798 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + 4799 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); 4800 4801 4802 if (adev->family == AMDGPU_FAMILY_RV) { 4803 /* Raven2 and later */ 4804 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81; 4805 4806 /* 4807 * No _D DCC swizzles yet because we only allow 32bpp, which 4808 * doesn't support _D on DCN 4809 */ 4810 4811 if (has_constant_encode) { 4812 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4813 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 4814 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 4815 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4816 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 4817 AMD_FMT_MOD_SET(DCC, 1) | 4818 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4819 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 4820 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); 4821 } 4822 4823 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4824 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 4825 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 4826 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4827 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 4828 AMD_FMT_MOD_SET(DCC, 1) | 4829 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4830 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 4831 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); 4832 4833 if (has_constant_encode) { 4834 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4835 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 4836 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 4837 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4838 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 4839 AMD_FMT_MOD_SET(DCC, 1) | 4840 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 4841 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4842 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 4843 4844 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4845 AMD_FMT_MOD_SET(RB, rb) | 4846 AMD_FMT_MOD_SET(PIPE, pipes)); 4847 } 4848 4849 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4850 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 4851 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 4852 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4853 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | 4854 AMD_FMT_MOD_SET(DCC, 1) | 4855 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 4856 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4857 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | 4858 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | 4859 AMD_FMT_MOD_SET(RB, rb) | 4860 AMD_FMT_MOD_SET(PIPE, pipes)); 4861 } 4862 4863 /* 4864 * Only supported for 64bpp on Raven, will be filtered on format in 4865 * dm_plane_format_mod_supported. 4866 */ 4867 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4868 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | 4869 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 4870 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4871 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 4872 4873 if (adev->family == AMDGPU_FAMILY_RV) { 4874 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4875 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 4876 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | 4877 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4878 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); 4879 } 4880 4881 /* 4882 * Only supported for 64bpp on Raven, will be filtered on format in 4883 * dm_plane_format_mod_supported. 4884 */ 4885 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4886 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 4887 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 4888 4889 if (adev->family == AMDGPU_FAMILY_RV) { 4890 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4891 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 4892 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 4893 } 4894 } 4895 4896 static void 4897 add_gfx10_1_modifiers(const struct amdgpu_device *adev, 4898 uint64_t **mods, uint64_t *size, uint64_t *capacity) 4899 { 4900 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 4901 4902 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4903 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4904 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 4905 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4906 AMD_FMT_MOD_SET(DCC, 1) | 4907 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4908 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4909 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 4910 4911 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4912 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4913 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 4914 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4915 AMD_FMT_MOD_SET(DCC, 1) | 4916 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 4917 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4918 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4919 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 4920 4921 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4922 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4923 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 4924 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 4925 4926 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4927 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 4928 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | 4929 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); 4930 4931 4932 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ 4933 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4934 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 4935 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 4936 4937 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4938 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 4939 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 4940 } 4941 4942 static void 4943 add_gfx10_3_modifiers(const struct amdgpu_device *adev, 4944 uint64_t **mods, uint64_t *size, uint64_t *capacity) 4945 { 4946 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); 4947 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); 4948 4949 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4950 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4951 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 4952 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4953 AMD_FMT_MOD_SET(PACKERS, pkrs) | 4954 AMD_FMT_MOD_SET(DCC, 1) | 4955 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4956 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4957 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 4958 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 4959 4960 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4961 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4962 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 4963 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4964 AMD_FMT_MOD_SET(PACKERS, pkrs) | 4965 AMD_FMT_MOD_SET(DCC, 1) | 4966 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4967 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 4968 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 4969 4970 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4971 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4972 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 4973 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4974 AMD_FMT_MOD_SET(PACKERS, pkrs) | 4975 AMD_FMT_MOD_SET(DCC, 1) | 4976 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 4977 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4978 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4979 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 4980 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 4981 4982 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4983 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4984 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 4985 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4986 AMD_FMT_MOD_SET(PACKERS, pkrs) | 4987 AMD_FMT_MOD_SET(DCC, 1) | 4988 AMD_FMT_MOD_SET(DCC_RETILE, 1) | 4989 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4990 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 4991 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 4992 4993 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4994 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | 4995 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 4996 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 4997 AMD_FMT_MOD_SET(PACKERS, pkrs)); 4998 4999 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5000 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | 5001 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | 5002 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | 5003 AMD_FMT_MOD_SET(PACKERS, pkrs)); 5004 5005 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ 5006 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5007 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | 5008 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5009 5010 add_modifier(mods, size, capacity, AMD_FMT_MOD | 5011 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | 5012 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); 5013 } 5014 5015 static int 5016 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) 5017 { 5018 uint64_t size = 0, capacity = 128; 5019 *mods = NULL; 5020 5021 /* We have not hooked up any pre-GFX9 modifiers. */ 5022 if (adev->family < AMDGPU_FAMILY_AI) 5023 return 0; 5024 5025 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); 5026 5027 if (plane_type == DRM_PLANE_TYPE_CURSOR) { 5028 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 5029 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 5030 return *mods ? 0 : -ENOMEM; 5031 } 5032 5033 switch (adev->family) { 5034 case AMDGPU_FAMILY_AI: 5035 case AMDGPU_FAMILY_RV: 5036 add_gfx9_modifiers(adev, mods, &size, &capacity); 5037 break; 5038 case AMDGPU_FAMILY_NV: 5039 case AMDGPU_FAMILY_VGH: 5040 case AMDGPU_FAMILY_YC: 5041 if (adev->asic_type >= CHIP_SIENNA_CICHLID) 5042 add_gfx10_3_modifiers(adev, mods, &size, &capacity); 5043 else 5044 add_gfx10_1_modifiers(adev, mods, &size, &capacity); 5045 break; 5046 } 5047 5048 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); 5049 5050 /* INVALID marks the end of the list. */ 5051 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); 5052 5053 if (!*mods) 5054 return -ENOMEM; 5055 5056 return 0; 5057 } 5058 5059 static int 5060 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, 5061 const struct amdgpu_framebuffer *afb, 5062 const enum surface_pixel_format format, 5063 const enum dc_rotation_angle rotation, 5064 const struct plane_size *plane_size, 5065 union dc_tiling_info *tiling_info, 5066 struct dc_plane_dcc_param *dcc, 5067 struct dc_plane_address *address, 5068 const bool force_disable_dcc) 5069 { 5070 const uint64_t modifier = afb->base.modifier; 5071 int ret = 0; 5072 5073 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); 5074 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); 5075 5076 if (modifier_has_dcc(modifier) && !force_disable_dcc) { 5077 uint64_t dcc_address = afb->address + afb->base.offsets[1]; 5078 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); 5079 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); 5080 5081 dcc->enable = 1; 5082 dcc->meta_pitch = afb->base.pitches[1]; 5083 dcc->independent_64b_blks = independent_64b_blks; 5084 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { 5085 if (independent_64b_blks && independent_128b_blks) 5086 dcc->dcc_ind_blk = hubp_ind_block_64b; 5087 else if (independent_128b_blks) 5088 dcc->dcc_ind_blk = hubp_ind_block_128b; 5089 else if (independent_64b_blks && !independent_128b_blks) 5090 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; 5091 else 5092 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 5093 } else { 5094 if (independent_64b_blks) 5095 dcc->dcc_ind_blk = hubp_ind_block_64b; 5096 else 5097 dcc->dcc_ind_blk = hubp_ind_block_unconstrained; 5098 } 5099 5100 address->grph.meta_addr.low_part = lower_32_bits(dcc_address); 5101 address->grph.meta_addr.high_part = upper_32_bits(dcc_address); 5102 } 5103 5104 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); 5105 if (ret) 5106 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); 5107 5108 return ret; 5109 } 5110 5111 static int 5112 fill_plane_buffer_attributes(struct amdgpu_device *adev, 5113 const struct amdgpu_framebuffer *afb, 5114 const enum surface_pixel_format format, 5115 const enum dc_rotation_angle rotation, 5116 const uint64_t tiling_flags, 5117 union dc_tiling_info *tiling_info, 5118 struct plane_size *plane_size, 5119 struct dc_plane_dcc_param *dcc, 5120 struct dc_plane_address *address, 5121 bool tmz_surface, 5122 bool force_disable_dcc) 5123 { 5124 const struct drm_framebuffer *fb = &afb->base; 5125 int ret; 5126 5127 memset(tiling_info, 0, sizeof(*tiling_info)); 5128 memset(plane_size, 0, sizeof(*plane_size)); 5129 memset(dcc, 0, sizeof(*dcc)); 5130 memset(address, 0, sizeof(*address)); 5131 5132 address->tmz_surface = tmz_surface; 5133 5134 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { 5135 uint64_t addr = afb->address + fb->offsets[0]; 5136 5137 plane_size->surface_size.x = 0; 5138 plane_size->surface_size.y = 0; 5139 plane_size->surface_size.width = fb->width; 5140 plane_size->surface_size.height = fb->height; 5141 plane_size->surface_pitch = 5142 fb->pitches[0] / fb->format->cpp[0]; 5143 5144 address->type = PLN_ADDR_TYPE_GRAPHICS; 5145 address->grph.addr.low_part = lower_32_bits(addr); 5146 address->grph.addr.high_part = upper_32_bits(addr); 5147 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { 5148 uint64_t luma_addr = afb->address + fb->offsets[0]; 5149 uint64_t chroma_addr = afb->address + fb->offsets[1]; 5150 5151 plane_size->surface_size.x = 0; 5152 plane_size->surface_size.y = 0; 5153 plane_size->surface_size.width = fb->width; 5154 plane_size->surface_size.height = fb->height; 5155 plane_size->surface_pitch = 5156 fb->pitches[0] / fb->format->cpp[0]; 5157 5158 plane_size->chroma_size.x = 0; 5159 plane_size->chroma_size.y = 0; 5160 /* TODO: set these based on surface format */ 5161 plane_size->chroma_size.width = fb->width / 2; 5162 plane_size->chroma_size.height = fb->height / 2; 5163 5164 plane_size->chroma_pitch = 5165 fb->pitches[1] / fb->format->cpp[1]; 5166 5167 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; 5168 address->video_progressive.luma_addr.low_part = 5169 lower_32_bits(luma_addr); 5170 address->video_progressive.luma_addr.high_part = 5171 upper_32_bits(luma_addr); 5172 address->video_progressive.chroma_addr.low_part = 5173 lower_32_bits(chroma_addr); 5174 address->video_progressive.chroma_addr.high_part = 5175 upper_32_bits(chroma_addr); 5176 } 5177 5178 if (adev->family >= AMDGPU_FAMILY_AI) { 5179 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, 5180 rotation, plane_size, 5181 tiling_info, dcc, 5182 address, 5183 force_disable_dcc); 5184 if (ret) 5185 return ret; 5186 } else { 5187 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); 5188 } 5189 5190 return 0; 5191 } 5192 5193 static void 5194 fill_blending_from_plane_state(const struct drm_plane_state *plane_state, 5195 bool *per_pixel_alpha, bool *global_alpha, 5196 int *global_alpha_value) 5197 { 5198 *per_pixel_alpha = false; 5199 *global_alpha = false; 5200 *global_alpha_value = 0xff; 5201 5202 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) 5203 return; 5204 5205 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) { 5206 static const uint32_t alpha_formats[] = { 5207 DRM_FORMAT_ARGB8888, 5208 DRM_FORMAT_RGBA8888, 5209 DRM_FORMAT_ABGR8888, 5210 }; 5211 uint32_t format = plane_state->fb->format->format; 5212 unsigned int i; 5213 5214 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { 5215 if (format == alpha_formats[i]) { 5216 *per_pixel_alpha = true; 5217 break; 5218 } 5219 } 5220 } 5221 5222 if (plane_state->alpha < 0xffff) { 5223 *global_alpha = true; 5224 *global_alpha_value = plane_state->alpha >> 8; 5225 } 5226 } 5227 5228 static int 5229 fill_plane_color_attributes(const struct drm_plane_state *plane_state, 5230 const enum surface_pixel_format format, 5231 enum dc_color_space *color_space) 5232 { 5233 bool full_range; 5234 5235 *color_space = COLOR_SPACE_SRGB; 5236 5237 /* DRM color properties only affect non-RGB formats. */ 5238 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 5239 return 0; 5240 5241 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); 5242 5243 switch (plane_state->color_encoding) { 5244 case DRM_COLOR_YCBCR_BT601: 5245 if (full_range) 5246 *color_space = COLOR_SPACE_YCBCR601; 5247 else 5248 *color_space = COLOR_SPACE_YCBCR601_LIMITED; 5249 break; 5250 5251 case DRM_COLOR_YCBCR_BT709: 5252 if (full_range) 5253 *color_space = COLOR_SPACE_YCBCR709; 5254 else 5255 *color_space = COLOR_SPACE_YCBCR709_LIMITED; 5256 break; 5257 5258 case DRM_COLOR_YCBCR_BT2020: 5259 if (full_range) 5260 *color_space = COLOR_SPACE_2020_YCBCR; 5261 else 5262 return -EINVAL; 5263 break; 5264 5265 default: 5266 return -EINVAL; 5267 } 5268 5269 return 0; 5270 } 5271 5272 static int 5273 fill_dc_plane_info_and_addr(struct amdgpu_device *adev, 5274 const struct drm_plane_state *plane_state, 5275 const uint64_t tiling_flags, 5276 struct dc_plane_info *plane_info, 5277 struct dc_plane_address *address, 5278 bool tmz_surface, 5279 bool force_disable_dcc) 5280 { 5281 const struct drm_framebuffer *fb = plane_state->fb; 5282 const struct amdgpu_framebuffer *afb = 5283 to_amdgpu_framebuffer(plane_state->fb); 5284 int ret; 5285 5286 memset(plane_info, 0, sizeof(*plane_info)); 5287 5288 switch (fb->format->format) { 5289 case DRM_FORMAT_C8: 5290 plane_info->format = 5291 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; 5292 break; 5293 case DRM_FORMAT_RGB565: 5294 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; 5295 break; 5296 case DRM_FORMAT_XRGB8888: 5297 case DRM_FORMAT_ARGB8888: 5298 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; 5299 break; 5300 case DRM_FORMAT_XRGB2101010: 5301 case DRM_FORMAT_ARGB2101010: 5302 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; 5303 break; 5304 case DRM_FORMAT_XBGR2101010: 5305 case DRM_FORMAT_ABGR2101010: 5306 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; 5307 break; 5308 case DRM_FORMAT_XBGR8888: 5309 case DRM_FORMAT_ABGR8888: 5310 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; 5311 break; 5312 case DRM_FORMAT_NV21: 5313 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; 5314 break; 5315 case DRM_FORMAT_NV12: 5316 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; 5317 break; 5318 case DRM_FORMAT_P010: 5319 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; 5320 break; 5321 case DRM_FORMAT_XRGB16161616F: 5322 case DRM_FORMAT_ARGB16161616F: 5323 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; 5324 break; 5325 case DRM_FORMAT_XBGR16161616F: 5326 case DRM_FORMAT_ABGR16161616F: 5327 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; 5328 break; 5329 case DRM_FORMAT_XRGB16161616: 5330 case DRM_FORMAT_ARGB16161616: 5331 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; 5332 break; 5333 case DRM_FORMAT_XBGR16161616: 5334 case DRM_FORMAT_ABGR16161616: 5335 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; 5336 break; 5337 default: 5338 DRM_ERROR( 5339 "Unsupported screen format %p4cc\n", 5340 &fb->format->format); 5341 return -EINVAL; 5342 } 5343 5344 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { 5345 case DRM_MODE_ROTATE_0: 5346 plane_info->rotation = ROTATION_ANGLE_0; 5347 break; 5348 case DRM_MODE_ROTATE_90: 5349 plane_info->rotation = ROTATION_ANGLE_90; 5350 break; 5351 case DRM_MODE_ROTATE_180: 5352 plane_info->rotation = ROTATION_ANGLE_180; 5353 break; 5354 case DRM_MODE_ROTATE_270: 5355 plane_info->rotation = ROTATION_ANGLE_270; 5356 break; 5357 default: 5358 plane_info->rotation = ROTATION_ANGLE_0; 5359 break; 5360 } 5361 5362 plane_info->visible = true; 5363 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; 5364 5365 plane_info->layer_index = 0; 5366 5367 ret = fill_plane_color_attributes(plane_state, plane_info->format, 5368 &plane_info->color_space); 5369 if (ret) 5370 return ret; 5371 5372 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format, 5373 plane_info->rotation, tiling_flags, 5374 &plane_info->tiling_info, 5375 &plane_info->plane_size, 5376 &plane_info->dcc, address, tmz_surface, 5377 force_disable_dcc); 5378 if (ret) 5379 return ret; 5380 5381 fill_blending_from_plane_state( 5382 plane_state, &plane_info->per_pixel_alpha, 5383 &plane_info->global_alpha, &plane_info->global_alpha_value); 5384 5385 return 0; 5386 } 5387 5388 static int fill_dc_plane_attributes(struct amdgpu_device *adev, 5389 struct dc_plane_state *dc_plane_state, 5390 struct drm_plane_state *plane_state, 5391 struct drm_crtc_state *crtc_state) 5392 { 5393 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 5394 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; 5395 struct dc_scaling_info scaling_info; 5396 struct dc_plane_info plane_info; 5397 int ret; 5398 bool force_disable_dcc = false; 5399 5400 ret = fill_dc_scaling_info(plane_state, &scaling_info); 5401 if (ret) 5402 return ret; 5403 5404 dc_plane_state->src_rect = scaling_info.src_rect; 5405 dc_plane_state->dst_rect = scaling_info.dst_rect; 5406 dc_plane_state->clip_rect = scaling_info.clip_rect; 5407 dc_plane_state->scaling_quality = scaling_info.scaling_quality; 5408 5409 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; 5410 ret = fill_dc_plane_info_and_addr(adev, plane_state, 5411 afb->tiling_flags, 5412 &plane_info, 5413 &dc_plane_state->address, 5414 afb->tmz_surface, 5415 force_disable_dcc); 5416 if (ret) 5417 return ret; 5418 5419 dc_plane_state->format = plane_info.format; 5420 dc_plane_state->color_space = plane_info.color_space; 5421 dc_plane_state->format = plane_info.format; 5422 dc_plane_state->plane_size = plane_info.plane_size; 5423 dc_plane_state->rotation = plane_info.rotation; 5424 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; 5425 dc_plane_state->stereo_format = plane_info.stereo_format; 5426 dc_plane_state->tiling_info = plane_info.tiling_info; 5427 dc_plane_state->visible = plane_info.visible; 5428 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; 5429 dc_plane_state->global_alpha = plane_info.global_alpha; 5430 dc_plane_state->global_alpha_value = plane_info.global_alpha_value; 5431 dc_plane_state->dcc = plane_info.dcc; 5432 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0 5433 dc_plane_state->flip_int_enabled = true; 5434 5435 /* 5436 * Always set input transfer function, since plane state is refreshed 5437 * every time. 5438 */ 5439 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); 5440 if (ret) 5441 return ret; 5442 5443 return 0; 5444 } 5445 5446 static void update_stream_scaling_settings(const struct drm_display_mode *mode, 5447 const struct dm_connector_state *dm_state, 5448 struct dc_stream_state *stream) 5449 { 5450 enum amdgpu_rmx_type rmx_type; 5451 5452 struct rect src = { 0 }; /* viewport in composition space*/ 5453 struct rect dst = { 0 }; /* stream addressable area */ 5454 5455 /* no mode. nothing to be done */ 5456 if (!mode) 5457 return; 5458 5459 /* Full screen scaling by default */ 5460 src.width = mode->hdisplay; 5461 src.height = mode->vdisplay; 5462 dst.width = stream->timing.h_addressable; 5463 dst.height = stream->timing.v_addressable; 5464 5465 if (dm_state) { 5466 rmx_type = dm_state->scaling; 5467 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { 5468 if (src.width * dst.height < 5469 src.height * dst.width) { 5470 /* height needs less upscaling/more downscaling */ 5471 dst.width = src.width * 5472 dst.height / src.height; 5473 } else { 5474 /* width needs less upscaling/more downscaling */ 5475 dst.height = src.height * 5476 dst.width / src.width; 5477 } 5478 } else if (rmx_type == RMX_CENTER) { 5479 dst = src; 5480 } 5481 5482 dst.x = (stream->timing.h_addressable - dst.width) / 2; 5483 dst.y = (stream->timing.v_addressable - dst.height) / 2; 5484 5485 if (dm_state->underscan_enable) { 5486 dst.x += dm_state->underscan_hborder / 2; 5487 dst.y += dm_state->underscan_vborder / 2; 5488 dst.width -= dm_state->underscan_hborder; 5489 dst.height -= dm_state->underscan_vborder; 5490 } 5491 } 5492 5493 stream->src = src; 5494 stream->dst = dst; 5495 5496 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", 5497 dst.x, dst.y, dst.width, dst.height); 5498 5499 } 5500 5501 static enum dc_color_depth 5502 convert_color_depth_from_display_info(const struct drm_connector *connector, 5503 bool is_y420, int requested_bpc) 5504 { 5505 uint8_t bpc; 5506 5507 if (is_y420) { 5508 bpc = 8; 5509 5510 /* Cap display bpc based on HDMI 2.0 HF-VSDB */ 5511 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) 5512 bpc = 16; 5513 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) 5514 bpc = 12; 5515 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) 5516 bpc = 10; 5517 } else { 5518 bpc = (uint8_t)connector->display_info.bpc; 5519 /* Assume 8 bpc by default if no bpc is specified. */ 5520 bpc = bpc ? bpc : 8; 5521 } 5522 5523 if (requested_bpc > 0) { 5524 /* 5525 * Cap display bpc based on the user requested value. 5526 * 5527 * The value for state->max_bpc may not correctly updated 5528 * depending on when the connector gets added to the state 5529 * or if this was called outside of atomic check, so it 5530 * can't be used directly. 5531 */ 5532 bpc = min_t(u8, bpc, requested_bpc); 5533 5534 /* Round down to the nearest even number. */ 5535 bpc = bpc - (bpc & 1); 5536 } 5537 5538 switch (bpc) { 5539 case 0: 5540 /* 5541 * Temporary Work around, DRM doesn't parse color depth for 5542 * EDID revision before 1.4 5543 * TODO: Fix edid parsing 5544 */ 5545 return COLOR_DEPTH_888; 5546 case 6: 5547 return COLOR_DEPTH_666; 5548 case 8: 5549 return COLOR_DEPTH_888; 5550 case 10: 5551 return COLOR_DEPTH_101010; 5552 case 12: 5553 return COLOR_DEPTH_121212; 5554 case 14: 5555 return COLOR_DEPTH_141414; 5556 case 16: 5557 return COLOR_DEPTH_161616; 5558 default: 5559 return COLOR_DEPTH_UNDEFINED; 5560 } 5561 } 5562 5563 static enum dc_aspect_ratio 5564 get_aspect_ratio(const struct drm_display_mode *mode_in) 5565 { 5566 /* 1-1 mapping, since both enums follow the HDMI spec. */ 5567 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; 5568 } 5569 5570 static enum dc_color_space 5571 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) 5572 { 5573 enum dc_color_space color_space = COLOR_SPACE_SRGB; 5574 5575 switch (dc_crtc_timing->pixel_encoding) { 5576 case PIXEL_ENCODING_YCBCR422: 5577 case PIXEL_ENCODING_YCBCR444: 5578 case PIXEL_ENCODING_YCBCR420: 5579 { 5580 /* 5581 * 27030khz is the separation point between HDTV and SDTV 5582 * according to HDMI spec, we use YCbCr709 and YCbCr601 5583 * respectively 5584 */ 5585 if (dc_crtc_timing->pix_clk_100hz > 270300) { 5586 if (dc_crtc_timing->flags.Y_ONLY) 5587 color_space = 5588 COLOR_SPACE_YCBCR709_LIMITED; 5589 else 5590 color_space = COLOR_SPACE_YCBCR709; 5591 } else { 5592 if (dc_crtc_timing->flags.Y_ONLY) 5593 color_space = 5594 COLOR_SPACE_YCBCR601_LIMITED; 5595 else 5596 color_space = COLOR_SPACE_YCBCR601; 5597 } 5598 5599 } 5600 break; 5601 case PIXEL_ENCODING_RGB: 5602 color_space = COLOR_SPACE_SRGB; 5603 break; 5604 5605 default: 5606 WARN_ON(1); 5607 break; 5608 } 5609 5610 return color_space; 5611 } 5612 5613 static bool adjust_colour_depth_from_display_info( 5614 struct dc_crtc_timing *timing_out, 5615 const struct drm_display_info *info) 5616 { 5617 enum dc_color_depth depth = timing_out->display_color_depth; 5618 int normalized_clk; 5619 do { 5620 normalized_clk = timing_out->pix_clk_100hz / 10; 5621 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 5622 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 5623 normalized_clk /= 2; 5624 /* Adjusting pix clock following on HDMI spec based on colour depth */ 5625 switch (depth) { 5626 case COLOR_DEPTH_888: 5627 break; 5628 case COLOR_DEPTH_101010: 5629 normalized_clk = (normalized_clk * 30) / 24; 5630 break; 5631 case COLOR_DEPTH_121212: 5632 normalized_clk = (normalized_clk * 36) / 24; 5633 break; 5634 case COLOR_DEPTH_161616: 5635 normalized_clk = (normalized_clk * 48) / 24; 5636 break; 5637 default: 5638 /* The above depths are the only ones valid for HDMI. */ 5639 return false; 5640 } 5641 if (normalized_clk <= info->max_tmds_clock) { 5642 timing_out->display_color_depth = depth; 5643 return true; 5644 } 5645 } while (--depth > COLOR_DEPTH_666); 5646 return false; 5647 } 5648 5649 static void fill_stream_properties_from_drm_display_mode( 5650 struct dc_stream_state *stream, 5651 const struct drm_display_mode *mode_in, 5652 const struct drm_connector *connector, 5653 const struct drm_connector_state *connector_state, 5654 const struct dc_stream_state *old_stream, 5655 int requested_bpc) 5656 { 5657 struct dc_crtc_timing *timing_out = &stream->timing; 5658 const struct drm_display_info *info = &connector->display_info; 5659 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 5660 struct hdmi_vendor_infoframe hv_frame; 5661 struct hdmi_avi_infoframe avi_frame; 5662 5663 memset(&hv_frame, 0, sizeof(hv_frame)); 5664 memset(&avi_frame, 0, sizeof(avi_frame)); 5665 5666 timing_out->h_border_left = 0; 5667 timing_out->h_border_right = 0; 5668 timing_out->v_border_top = 0; 5669 timing_out->v_border_bottom = 0; 5670 /* TODO: un-hardcode */ 5671 if (drm_mode_is_420_only(info, mode_in) 5672 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5673 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5674 else if (drm_mode_is_420_also(info, mode_in) 5675 && aconnector->force_yuv420_output) 5676 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5677 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) 5678 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 5679 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; 5680 else 5681 timing_out->pixel_encoding = PIXEL_ENCODING_RGB; 5682 5683 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; 5684 timing_out->display_color_depth = convert_color_depth_from_display_info( 5685 connector, 5686 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), 5687 requested_bpc); 5688 timing_out->scan_type = SCANNING_TYPE_NODATA; 5689 timing_out->hdmi_vic = 0; 5690 5691 if(old_stream) { 5692 timing_out->vic = old_stream->timing.vic; 5693 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; 5694 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; 5695 } else { 5696 timing_out->vic = drm_match_cea_mode(mode_in); 5697 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) 5698 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; 5699 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) 5700 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; 5701 } 5702 5703 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 5704 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); 5705 timing_out->vic = avi_frame.video_code; 5706 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); 5707 timing_out->hdmi_vic = hv_frame.vic; 5708 } 5709 5710 if (is_freesync_video_mode(mode_in, aconnector)) { 5711 timing_out->h_addressable = mode_in->hdisplay; 5712 timing_out->h_total = mode_in->htotal; 5713 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; 5714 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; 5715 timing_out->v_total = mode_in->vtotal; 5716 timing_out->v_addressable = mode_in->vdisplay; 5717 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; 5718 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; 5719 timing_out->pix_clk_100hz = mode_in->clock * 10; 5720 } else { 5721 timing_out->h_addressable = mode_in->crtc_hdisplay; 5722 timing_out->h_total = mode_in->crtc_htotal; 5723 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; 5724 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; 5725 timing_out->v_total = mode_in->crtc_vtotal; 5726 timing_out->v_addressable = mode_in->crtc_vdisplay; 5727 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; 5728 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; 5729 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; 5730 } 5731 5732 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 5733 5734 stream->output_color_space = get_output_color_space(timing_out); 5735 5736 stream->out_transfer_func->type = TF_TYPE_PREDEFINED; 5737 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; 5738 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 5739 if (!adjust_colour_depth_from_display_info(timing_out, info) && 5740 drm_mode_is_420_also(info, mode_in) && 5741 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { 5742 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 5743 adjust_colour_depth_from_display_info(timing_out, info); 5744 } 5745 } 5746 } 5747 5748 static void fill_audio_info(struct audio_info *audio_info, 5749 const struct drm_connector *drm_connector, 5750 const struct dc_sink *dc_sink) 5751 { 5752 int i = 0; 5753 int cea_revision = 0; 5754 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; 5755 5756 audio_info->manufacture_id = edid_caps->manufacturer_id; 5757 audio_info->product_id = edid_caps->product_id; 5758 5759 cea_revision = drm_connector->display_info.cea_rev; 5760 5761 strscpy(audio_info->display_name, 5762 edid_caps->display_name, 5763 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 5764 5765 if (cea_revision >= 3) { 5766 audio_info->mode_count = edid_caps->audio_mode_count; 5767 5768 for (i = 0; i < audio_info->mode_count; ++i) { 5769 audio_info->modes[i].format_code = 5770 (enum audio_format_code) 5771 (edid_caps->audio_modes[i].format_code); 5772 audio_info->modes[i].channel_count = 5773 edid_caps->audio_modes[i].channel_count; 5774 audio_info->modes[i].sample_rates.all = 5775 edid_caps->audio_modes[i].sample_rate; 5776 audio_info->modes[i].sample_size = 5777 edid_caps->audio_modes[i].sample_size; 5778 } 5779 } 5780 5781 audio_info->flags.all = edid_caps->speaker_flags; 5782 5783 /* TODO: We only check for the progressive mode, check for interlace mode too */ 5784 if (drm_connector->latency_present[0]) { 5785 audio_info->video_latency = drm_connector->video_latency[0]; 5786 audio_info->audio_latency = drm_connector->audio_latency[0]; 5787 } 5788 5789 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ 5790 5791 } 5792 5793 static void 5794 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, 5795 struct drm_display_mode *dst_mode) 5796 { 5797 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; 5798 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; 5799 dst_mode->crtc_clock = src_mode->crtc_clock; 5800 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; 5801 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; 5802 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; 5803 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; 5804 dst_mode->crtc_htotal = src_mode->crtc_htotal; 5805 dst_mode->crtc_hskew = src_mode->crtc_hskew; 5806 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; 5807 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; 5808 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; 5809 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; 5810 dst_mode->crtc_vtotal = src_mode->crtc_vtotal; 5811 } 5812 5813 static void 5814 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, 5815 const struct drm_display_mode *native_mode, 5816 bool scale_enabled) 5817 { 5818 if (scale_enabled) { 5819 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 5820 } else if (native_mode->clock == drm_mode->clock && 5821 native_mode->htotal == drm_mode->htotal && 5822 native_mode->vtotal == drm_mode->vtotal) { 5823 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); 5824 } else { 5825 /* no scaling nor amdgpu inserted, no need to patch */ 5826 } 5827 } 5828 5829 static struct dc_sink * 5830 create_fake_sink(struct amdgpu_dm_connector *aconnector) 5831 { 5832 struct dc_sink_init_data sink_init_data = { 0 }; 5833 struct dc_sink *sink = NULL; 5834 sink_init_data.link = aconnector->dc_link; 5835 sink_init_data.sink_signal = aconnector->dc_link->connector_signal; 5836 5837 sink = dc_sink_create(&sink_init_data); 5838 if (!sink) { 5839 DRM_ERROR("Failed to create sink!\n"); 5840 return NULL; 5841 } 5842 sink->sink_signal = SIGNAL_TYPE_VIRTUAL; 5843 5844 return sink; 5845 } 5846 5847 static void set_multisync_trigger_params( 5848 struct dc_stream_state *stream) 5849 { 5850 struct dc_stream_state *master = NULL; 5851 5852 if (stream->triggered_crtc_reset.enabled) { 5853 master = stream->triggered_crtc_reset.event_source; 5854 stream->triggered_crtc_reset.event = 5855 master->timing.flags.VSYNC_POSITIVE_POLARITY ? 5856 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; 5857 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; 5858 } 5859 } 5860 5861 static void set_master_stream(struct dc_stream_state *stream_set[], 5862 int stream_count) 5863 { 5864 int j, highest_rfr = 0, master_stream = 0; 5865 5866 for (j = 0; j < stream_count; j++) { 5867 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { 5868 int refresh_rate = 0; 5869 5870 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ 5871 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); 5872 if (refresh_rate > highest_rfr) { 5873 highest_rfr = refresh_rate; 5874 master_stream = j; 5875 } 5876 } 5877 } 5878 for (j = 0; j < stream_count; j++) { 5879 if (stream_set[j]) 5880 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; 5881 } 5882 } 5883 5884 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) 5885 { 5886 int i = 0; 5887 struct dc_stream_state *stream; 5888 5889 if (context->stream_count < 2) 5890 return; 5891 for (i = 0; i < context->stream_count ; i++) { 5892 if (!context->streams[i]) 5893 continue; 5894 /* 5895 * TODO: add a function to read AMD VSDB bits and set 5896 * crtc_sync_master.multi_sync_enabled flag 5897 * For now it's set to false 5898 */ 5899 } 5900 5901 set_master_stream(context->streams, context->stream_count); 5902 5903 for (i = 0; i < context->stream_count ; i++) { 5904 stream = context->streams[i]; 5905 5906 if (!stream) 5907 continue; 5908 5909 set_multisync_trigger_params(stream); 5910 } 5911 } 5912 5913 #if defined(CONFIG_DRM_AMD_DC_DCN) 5914 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, 5915 struct dc_sink *sink, struct dc_stream_state *stream, 5916 struct dsc_dec_dpcd_caps *dsc_caps) 5917 { 5918 stream->timing.flags.DSC = 0; 5919 5920 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 5921 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 5922 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 5923 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, 5924 dsc_caps); 5925 } 5926 } 5927 5928 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, 5929 struct dc_sink *sink, struct dc_stream_state *stream, 5930 struct dsc_dec_dpcd_caps *dsc_caps) 5931 { 5932 struct drm_connector *drm_connector = &aconnector->base; 5933 uint32_t link_bandwidth_kbps; 5934 uint32_t max_dsc_target_bpp_limit_override = 0; 5935 5936 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 5937 dc_link_get_link_cap(aconnector->dc_link)); 5938 5939 if (stream->link && stream->link->local_sink) 5940 max_dsc_target_bpp_limit_override = 5941 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit; 5942 5943 /* Set DSC policy according to dsc_clock_en */ 5944 dc_dsc_policy_set_enable_dsc_when_not_needed( 5945 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); 5946 5947 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 5948 5949 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], 5950 dsc_caps, 5951 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, 5952 max_dsc_target_bpp_limit_override, 5953 link_bandwidth_kbps, 5954 &stream->timing, 5955 &stream->timing.dsc_cfg)) { 5956 stream->timing.flags.DSC = 1; 5957 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); 5958 } 5959 } 5960 5961 /* Overwrite the stream flag if DSC is enabled through debugfs */ 5962 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) 5963 stream->timing.flags.DSC = 1; 5964 5965 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) 5966 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; 5967 5968 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) 5969 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; 5970 5971 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) 5972 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; 5973 } 5974 #endif 5975 5976 /** 5977 * DOC: FreeSync Video 5978 * 5979 * When a userspace application wants to play a video, the content follows a 5980 * standard format definition that usually specifies the FPS for that format. 5981 * The below list illustrates some video format and the expected FPS, 5982 * respectively: 5983 * 5984 * - TV/NTSC (23.976 FPS) 5985 * - Cinema (24 FPS) 5986 * - TV/PAL (25 FPS) 5987 * - TV/NTSC (29.97 FPS) 5988 * - TV/NTSC (30 FPS) 5989 * - Cinema HFR (48 FPS) 5990 * - TV/PAL (50 FPS) 5991 * - Commonly used (60 FPS) 5992 * - Multiples of 24 (48,72,96 FPS) 5993 * 5994 * The list of standards video format is not huge and can be added to the 5995 * connector modeset list beforehand. With that, userspace can leverage 5996 * FreeSync to extends the front porch in order to attain the target refresh 5997 * rate. Such a switch will happen seamlessly, without screen blanking or 5998 * reprogramming of the output in any other way. If the userspace requests a 5999 * modesetting change compatible with FreeSync modes that only differ in the 6000 * refresh rate, DC will skip the full update and avoid blink during the 6001 * transition. For example, the video player can change the modesetting from 6002 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without 6003 * causing any display blink. This same concept can be applied to a mode 6004 * setting change. 6005 */ 6006 static struct drm_display_mode * 6007 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, 6008 bool use_probed_modes) 6009 { 6010 struct drm_display_mode *m, *m_pref = NULL; 6011 u16 current_refresh, highest_refresh; 6012 struct list_head *list_head = use_probed_modes ? 6013 &aconnector->base.probed_modes : 6014 &aconnector->base.modes; 6015 6016 if (aconnector->freesync_vid_base.clock != 0) 6017 return &aconnector->freesync_vid_base; 6018 6019 /* Find the preferred mode */ 6020 list_for_each_entry (m, list_head, head) { 6021 if (m->type & DRM_MODE_TYPE_PREFERRED) { 6022 m_pref = m; 6023 break; 6024 } 6025 } 6026 6027 if (!m_pref) { 6028 /* Probably an EDID with no preferred mode. Fallback to first entry */ 6029 m_pref = list_first_entry_or_null( 6030 &aconnector->base.modes, struct drm_display_mode, head); 6031 if (!m_pref) { 6032 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); 6033 return NULL; 6034 } 6035 } 6036 6037 highest_refresh = drm_mode_vrefresh(m_pref); 6038 6039 /* 6040 * Find the mode with highest refresh rate with same resolution. 6041 * For some monitors, preferred mode is not the mode with highest 6042 * supported refresh rate. 6043 */ 6044 list_for_each_entry (m, list_head, head) { 6045 current_refresh = drm_mode_vrefresh(m); 6046 6047 if (m->hdisplay == m_pref->hdisplay && 6048 m->vdisplay == m_pref->vdisplay && 6049 highest_refresh < current_refresh) { 6050 highest_refresh = current_refresh; 6051 m_pref = m; 6052 } 6053 } 6054 6055 aconnector->freesync_vid_base = *m_pref; 6056 return m_pref; 6057 } 6058 6059 static bool is_freesync_video_mode(const struct drm_display_mode *mode, 6060 struct amdgpu_dm_connector *aconnector) 6061 { 6062 struct drm_display_mode *high_mode; 6063 int timing_diff; 6064 6065 high_mode = get_highest_refresh_rate_mode(aconnector, false); 6066 if (!high_mode || !mode) 6067 return false; 6068 6069 timing_diff = high_mode->vtotal - mode->vtotal; 6070 6071 if (high_mode->clock == 0 || high_mode->clock != mode->clock || 6072 high_mode->hdisplay != mode->hdisplay || 6073 high_mode->vdisplay != mode->vdisplay || 6074 high_mode->hsync_start != mode->hsync_start || 6075 high_mode->hsync_end != mode->hsync_end || 6076 high_mode->htotal != mode->htotal || 6077 high_mode->hskew != mode->hskew || 6078 high_mode->vscan != mode->vscan || 6079 high_mode->vsync_start - mode->vsync_start != timing_diff || 6080 high_mode->vsync_end - mode->vsync_end != timing_diff) 6081 return false; 6082 else 6083 return true; 6084 } 6085 6086 static struct dc_stream_state * 6087 create_stream_for_sink(struct amdgpu_dm_connector *aconnector, 6088 const struct drm_display_mode *drm_mode, 6089 const struct dm_connector_state *dm_state, 6090 const struct dc_stream_state *old_stream, 6091 int requested_bpc) 6092 { 6093 struct drm_display_mode *preferred_mode = NULL; 6094 struct drm_connector *drm_connector; 6095 const struct drm_connector_state *con_state = 6096 dm_state ? &dm_state->base : NULL; 6097 struct dc_stream_state *stream = NULL; 6098 struct drm_display_mode mode = *drm_mode; 6099 struct drm_display_mode saved_mode; 6100 struct drm_display_mode *freesync_mode = NULL; 6101 bool native_mode_found = false; 6102 bool recalculate_timing = false; 6103 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; 6104 int mode_refresh; 6105 int preferred_refresh = 0; 6106 #if defined(CONFIG_DRM_AMD_DC_DCN) 6107 struct dsc_dec_dpcd_caps dsc_caps; 6108 #endif 6109 struct dc_sink *sink = NULL; 6110 6111 memset(&saved_mode, 0, sizeof(saved_mode)); 6112 6113 if (aconnector == NULL) { 6114 DRM_ERROR("aconnector is NULL!\n"); 6115 return stream; 6116 } 6117 6118 drm_connector = &aconnector->base; 6119 6120 if (!aconnector->dc_sink) { 6121 sink = create_fake_sink(aconnector); 6122 if (!sink) 6123 return stream; 6124 } else { 6125 sink = aconnector->dc_sink; 6126 dc_sink_retain(sink); 6127 } 6128 6129 stream = dc_create_stream_for_sink(sink); 6130 6131 if (stream == NULL) { 6132 DRM_ERROR("Failed to create stream for sink!\n"); 6133 goto finish; 6134 } 6135 6136 stream->dm_stream_context = aconnector; 6137 6138 stream->timing.flags.LTE_340MCSC_SCRAMBLE = 6139 drm_connector->display_info.hdmi.scdc.scrambling.low_rates; 6140 6141 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { 6142 /* Search for preferred mode */ 6143 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { 6144 native_mode_found = true; 6145 break; 6146 } 6147 } 6148 if (!native_mode_found) 6149 preferred_mode = list_first_entry_or_null( 6150 &aconnector->base.modes, 6151 struct drm_display_mode, 6152 head); 6153 6154 mode_refresh = drm_mode_vrefresh(&mode); 6155 6156 if (preferred_mode == NULL) { 6157 /* 6158 * This may not be an error, the use case is when we have no 6159 * usermode calls to reset and set mode upon hotplug. In this 6160 * case, we call set mode ourselves to restore the previous mode 6161 * and the modelist may not be filled in in time. 6162 */ 6163 DRM_DEBUG_DRIVER("No preferred mode found\n"); 6164 } else { 6165 recalculate_timing = amdgpu_freesync_vid_mode && 6166 is_freesync_video_mode(&mode, aconnector); 6167 if (recalculate_timing) { 6168 freesync_mode = get_highest_refresh_rate_mode(aconnector, false); 6169 saved_mode = mode; 6170 mode = *freesync_mode; 6171 } else { 6172 decide_crtc_timing_for_drm_display_mode( 6173 &mode, preferred_mode, scale); 6174 6175 preferred_refresh = drm_mode_vrefresh(preferred_mode); 6176 } 6177 } 6178 6179 if (recalculate_timing) 6180 drm_mode_set_crtcinfo(&saved_mode, 0); 6181 else if (!dm_state) 6182 drm_mode_set_crtcinfo(&mode, 0); 6183 6184 /* 6185 * If scaling is enabled and refresh rate didn't change 6186 * we copy the vic and polarities of the old timings 6187 */ 6188 if (!scale || mode_refresh != preferred_refresh) 6189 fill_stream_properties_from_drm_display_mode( 6190 stream, &mode, &aconnector->base, con_state, NULL, 6191 requested_bpc); 6192 else 6193 fill_stream_properties_from_drm_display_mode( 6194 stream, &mode, &aconnector->base, con_state, old_stream, 6195 requested_bpc); 6196 6197 #if defined(CONFIG_DRM_AMD_DC_DCN) 6198 /* SST DSC determination policy */ 6199 update_dsc_caps(aconnector, sink, stream, &dsc_caps); 6200 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) 6201 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); 6202 #endif 6203 6204 update_stream_scaling_settings(&mode, dm_state, stream); 6205 6206 fill_audio_info( 6207 &stream->audio_info, 6208 drm_connector, 6209 sink); 6210 6211 update_stream_signal(stream, sink); 6212 6213 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6214 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); 6215 6216 if (stream->link->psr_settings.psr_feature_enabled) { 6217 // 6218 // should decide stream support vsc sdp colorimetry capability 6219 // before building vsc info packet 6220 // 6221 stream->use_vsc_sdp_for_colorimetry = false; 6222 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 6223 stream->use_vsc_sdp_for_colorimetry = 6224 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; 6225 } else { 6226 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) 6227 stream->use_vsc_sdp_for_colorimetry = true; 6228 } 6229 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket); 6230 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; 6231 6232 } 6233 finish: 6234 dc_sink_release(sink); 6235 6236 return stream; 6237 } 6238 6239 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) 6240 { 6241 drm_crtc_cleanup(crtc); 6242 kfree(crtc); 6243 } 6244 6245 static void dm_crtc_destroy_state(struct drm_crtc *crtc, 6246 struct drm_crtc_state *state) 6247 { 6248 struct dm_crtc_state *cur = to_dm_crtc_state(state); 6249 6250 /* TODO Destroy dc_stream objects are stream object is flattened */ 6251 if (cur->stream) 6252 dc_stream_release(cur->stream); 6253 6254 6255 __drm_atomic_helper_crtc_destroy_state(state); 6256 6257 6258 kfree(state); 6259 } 6260 6261 static void dm_crtc_reset_state(struct drm_crtc *crtc) 6262 { 6263 struct dm_crtc_state *state; 6264 6265 if (crtc->state) 6266 dm_crtc_destroy_state(crtc, crtc->state); 6267 6268 state = kzalloc(sizeof(*state), GFP_KERNEL); 6269 if (WARN_ON(!state)) 6270 return; 6271 6272 __drm_atomic_helper_crtc_reset(crtc, &state->base); 6273 } 6274 6275 static struct drm_crtc_state * 6276 dm_crtc_duplicate_state(struct drm_crtc *crtc) 6277 { 6278 struct dm_crtc_state *state, *cur; 6279 6280 cur = to_dm_crtc_state(crtc->state); 6281 6282 if (WARN_ON(!crtc->state)) 6283 return NULL; 6284 6285 state = kzalloc(sizeof(*state), GFP_KERNEL); 6286 if (!state) 6287 return NULL; 6288 6289 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); 6290 6291 if (cur->stream) { 6292 state->stream = cur->stream; 6293 dc_stream_retain(state->stream); 6294 } 6295 6296 state->active_planes = cur->active_planes; 6297 state->vrr_infopacket = cur->vrr_infopacket; 6298 state->abm_level = cur->abm_level; 6299 state->vrr_supported = cur->vrr_supported; 6300 state->freesync_config = cur->freesync_config; 6301 state->cm_has_degamma = cur->cm_has_degamma; 6302 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; 6303 state->force_dpms_off = cur->force_dpms_off; 6304 /* TODO Duplicate dc_stream after objects are stream object is flattened */ 6305 6306 return &state->base; 6307 } 6308 6309 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 6310 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) 6311 { 6312 crtc_debugfs_init(crtc); 6313 6314 return 0; 6315 } 6316 #endif 6317 6318 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) 6319 { 6320 enum dc_irq_source irq_source; 6321 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 6322 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 6323 int rc; 6324 6325 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; 6326 6327 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; 6328 6329 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", 6330 acrtc->crtc_id, enable ? "en" : "dis", rc); 6331 return rc; 6332 } 6333 6334 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) 6335 { 6336 enum dc_irq_source irq_source; 6337 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 6338 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 6339 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); 6340 #if defined(CONFIG_DRM_AMD_DC_DCN) 6341 struct amdgpu_display_manager *dm = &adev->dm; 6342 struct vblank_control_work *work; 6343 #endif 6344 int rc = 0; 6345 6346 if (enable) { 6347 /* vblank irq on -> Only need vupdate irq in vrr mode */ 6348 if (amdgpu_dm_vrr_active(acrtc_state)) 6349 rc = dm_set_vupdate_irq(crtc, true); 6350 } else { 6351 /* vblank irq off -> vupdate irq off */ 6352 rc = dm_set_vupdate_irq(crtc, false); 6353 } 6354 6355 if (rc) 6356 return rc; 6357 6358 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 6359 6360 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) 6361 return -EBUSY; 6362 6363 if (amdgpu_in_reset(adev)) 6364 return 0; 6365 6366 #if defined(CONFIG_DRM_AMD_DC_DCN) 6367 if (dm->vblank_control_workqueue) { 6368 work = kzalloc(sizeof(*work), GFP_ATOMIC); 6369 if (!work) 6370 return -ENOMEM; 6371 6372 INIT_WORK(&work->work, vblank_control_worker); 6373 work->dm = dm; 6374 work->acrtc = acrtc; 6375 work->enable = enable; 6376 6377 if (acrtc_state->stream) { 6378 dc_stream_retain(acrtc_state->stream); 6379 work->stream = acrtc_state->stream; 6380 } 6381 6382 queue_work(dm->vblank_control_workqueue, &work->work); 6383 } 6384 #endif 6385 6386 return 0; 6387 } 6388 6389 static int dm_enable_vblank(struct drm_crtc *crtc) 6390 { 6391 return dm_set_vblank(crtc, true); 6392 } 6393 6394 static void dm_disable_vblank(struct drm_crtc *crtc) 6395 { 6396 dm_set_vblank(crtc, false); 6397 } 6398 6399 /* Implemented only the options currently availible for the driver */ 6400 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { 6401 .reset = dm_crtc_reset_state, 6402 .destroy = amdgpu_dm_crtc_destroy, 6403 .set_config = drm_atomic_helper_set_config, 6404 .page_flip = drm_atomic_helper_page_flip, 6405 .atomic_duplicate_state = dm_crtc_duplicate_state, 6406 .atomic_destroy_state = dm_crtc_destroy_state, 6407 .set_crc_source = amdgpu_dm_crtc_set_crc_source, 6408 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, 6409 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, 6410 .get_vblank_counter = amdgpu_get_vblank_counter_kms, 6411 .enable_vblank = dm_enable_vblank, 6412 .disable_vblank = dm_disable_vblank, 6413 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 6414 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 6415 .late_register = amdgpu_dm_crtc_late_register, 6416 #endif 6417 }; 6418 6419 static enum drm_connector_status 6420 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) 6421 { 6422 bool connected; 6423 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6424 6425 /* 6426 * Notes: 6427 * 1. This interface is NOT called in context of HPD irq. 6428 * 2. This interface *is called* in context of user-mode ioctl. Which 6429 * makes it a bad place for *any* MST-related activity. 6430 */ 6431 6432 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && 6433 !aconnector->fake_enable) 6434 connected = (aconnector->dc_sink != NULL); 6435 else 6436 connected = (aconnector->base.force == DRM_FORCE_ON); 6437 6438 update_subconnector_property(aconnector); 6439 6440 return (connected ? connector_status_connected : 6441 connector_status_disconnected); 6442 } 6443 6444 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, 6445 struct drm_connector_state *connector_state, 6446 struct drm_property *property, 6447 uint64_t val) 6448 { 6449 struct drm_device *dev = connector->dev; 6450 struct amdgpu_device *adev = drm_to_adev(dev); 6451 struct dm_connector_state *dm_old_state = 6452 to_dm_connector_state(connector->state); 6453 struct dm_connector_state *dm_new_state = 6454 to_dm_connector_state(connector_state); 6455 6456 int ret = -EINVAL; 6457 6458 if (property == dev->mode_config.scaling_mode_property) { 6459 enum amdgpu_rmx_type rmx_type; 6460 6461 switch (val) { 6462 case DRM_MODE_SCALE_CENTER: 6463 rmx_type = RMX_CENTER; 6464 break; 6465 case DRM_MODE_SCALE_ASPECT: 6466 rmx_type = RMX_ASPECT; 6467 break; 6468 case DRM_MODE_SCALE_FULLSCREEN: 6469 rmx_type = RMX_FULL; 6470 break; 6471 case DRM_MODE_SCALE_NONE: 6472 default: 6473 rmx_type = RMX_OFF; 6474 break; 6475 } 6476 6477 if (dm_old_state->scaling == rmx_type) 6478 return 0; 6479 6480 dm_new_state->scaling = rmx_type; 6481 ret = 0; 6482 } else if (property == adev->mode_info.underscan_hborder_property) { 6483 dm_new_state->underscan_hborder = val; 6484 ret = 0; 6485 } else if (property == adev->mode_info.underscan_vborder_property) { 6486 dm_new_state->underscan_vborder = val; 6487 ret = 0; 6488 } else if (property == adev->mode_info.underscan_property) { 6489 dm_new_state->underscan_enable = val; 6490 ret = 0; 6491 } else if (property == adev->mode_info.abm_level_property) { 6492 dm_new_state->abm_level = val; 6493 ret = 0; 6494 } 6495 6496 return ret; 6497 } 6498 6499 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, 6500 const struct drm_connector_state *state, 6501 struct drm_property *property, 6502 uint64_t *val) 6503 { 6504 struct drm_device *dev = connector->dev; 6505 struct amdgpu_device *adev = drm_to_adev(dev); 6506 struct dm_connector_state *dm_state = 6507 to_dm_connector_state(state); 6508 int ret = -EINVAL; 6509 6510 if (property == dev->mode_config.scaling_mode_property) { 6511 switch (dm_state->scaling) { 6512 case RMX_CENTER: 6513 *val = DRM_MODE_SCALE_CENTER; 6514 break; 6515 case RMX_ASPECT: 6516 *val = DRM_MODE_SCALE_ASPECT; 6517 break; 6518 case RMX_FULL: 6519 *val = DRM_MODE_SCALE_FULLSCREEN; 6520 break; 6521 case RMX_OFF: 6522 default: 6523 *val = DRM_MODE_SCALE_NONE; 6524 break; 6525 } 6526 ret = 0; 6527 } else if (property == adev->mode_info.underscan_hborder_property) { 6528 *val = dm_state->underscan_hborder; 6529 ret = 0; 6530 } else if (property == adev->mode_info.underscan_vborder_property) { 6531 *val = dm_state->underscan_vborder; 6532 ret = 0; 6533 } else if (property == adev->mode_info.underscan_property) { 6534 *val = dm_state->underscan_enable; 6535 ret = 0; 6536 } else if (property == adev->mode_info.abm_level_property) { 6537 *val = dm_state->abm_level; 6538 ret = 0; 6539 } 6540 6541 return ret; 6542 } 6543 6544 static void amdgpu_dm_connector_unregister(struct drm_connector *connector) 6545 { 6546 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 6547 6548 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); 6549 } 6550 6551 static void amdgpu_dm_connector_destroy(struct drm_connector *connector) 6552 { 6553 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6554 const struct dc_link *link = aconnector->dc_link; 6555 struct amdgpu_device *adev = drm_to_adev(connector->dev); 6556 struct amdgpu_display_manager *dm = &adev->dm; 6557 int i; 6558 6559 /* 6560 * Call only if mst_mgr was iniitalized before since it's not done 6561 * for all connector types. 6562 */ 6563 if (aconnector->mst_mgr.dev) 6564 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); 6565 6566 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ 6567 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 6568 for (i = 0; i < dm->num_of_edps; i++) { 6569 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) { 6570 backlight_device_unregister(dm->backlight_dev[i]); 6571 dm->backlight_dev[i] = NULL; 6572 } 6573 } 6574 #endif 6575 6576 if (aconnector->dc_em_sink) 6577 dc_sink_release(aconnector->dc_em_sink); 6578 aconnector->dc_em_sink = NULL; 6579 if (aconnector->dc_sink) 6580 dc_sink_release(aconnector->dc_sink); 6581 aconnector->dc_sink = NULL; 6582 6583 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 6584 drm_connector_unregister(connector); 6585 drm_connector_cleanup(connector); 6586 if (aconnector->i2c) { 6587 i2c_del_adapter(&aconnector->i2c->base); 6588 kfree(aconnector->i2c); 6589 } 6590 kfree(aconnector->dm_dp_aux.aux.name); 6591 6592 kfree(connector); 6593 } 6594 6595 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) 6596 { 6597 struct dm_connector_state *state = 6598 to_dm_connector_state(connector->state); 6599 6600 if (connector->state) 6601 __drm_atomic_helper_connector_destroy_state(connector->state); 6602 6603 kfree(state); 6604 6605 state = kzalloc(sizeof(*state), GFP_KERNEL); 6606 6607 if (state) { 6608 state->scaling = RMX_OFF; 6609 state->underscan_enable = false; 6610 state->underscan_hborder = 0; 6611 state->underscan_vborder = 0; 6612 state->base.max_requested_bpc = 8; 6613 state->vcpi_slots = 0; 6614 state->pbn = 0; 6615 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 6616 state->abm_level = amdgpu_dm_abm_level; 6617 6618 __drm_atomic_helper_connector_reset(connector, &state->base); 6619 } 6620 } 6621 6622 struct drm_connector_state * 6623 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) 6624 { 6625 struct dm_connector_state *state = 6626 to_dm_connector_state(connector->state); 6627 6628 struct dm_connector_state *new_state = 6629 kmemdup(state, sizeof(*state), GFP_KERNEL); 6630 6631 if (!new_state) 6632 return NULL; 6633 6634 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); 6635 6636 new_state->freesync_capable = state->freesync_capable; 6637 new_state->abm_level = state->abm_level; 6638 new_state->scaling = state->scaling; 6639 new_state->underscan_enable = state->underscan_enable; 6640 new_state->underscan_hborder = state->underscan_hborder; 6641 new_state->underscan_vborder = state->underscan_vborder; 6642 new_state->vcpi_slots = state->vcpi_slots; 6643 new_state->pbn = state->pbn; 6644 return &new_state->base; 6645 } 6646 6647 static int 6648 amdgpu_dm_connector_late_register(struct drm_connector *connector) 6649 { 6650 struct amdgpu_dm_connector *amdgpu_dm_connector = 6651 to_amdgpu_dm_connector(connector); 6652 int r; 6653 6654 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 6655 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { 6656 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; 6657 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); 6658 if (r) 6659 return r; 6660 } 6661 6662 #if defined(CONFIG_DEBUG_FS) 6663 connector_debugfs_init(amdgpu_dm_connector); 6664 #endif 6665 6666 return 0; 6667 } 6668 6669 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { 6670 .reset = amdgpu_dm_connector_funcs_reset, 6671 .detect = amdgpu_dm_connector_detect, 6672 .fill_modes = drm_helper_probe_single_connector_modes, 6673 .destroy = amdgpu_dm_connector_destroy, 6674 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, 6675 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 6676 .atomic_set_property = amdgpu_dm_connector_atomic_set_property, 6677 .atomic_get_property = amdgpu_dm_connector_atomic_get_property, 6678 .late_register = amdgpu_dm_connector_late_register, 6679 .early_unregister = amdgpu_dm_connector_unregister 6680 }; 6681 6682 static int get_modes(struct drm_connector *connector) 6683 { 6684 return amdgpu_dm_connector_get_modes(connector); 6685 } 6686 6687 static void create_eml_sink(struct amdgpu_dm_connector *aconnector) 6688 { 6689 struct dc_sink_init_data init_params = { 6690 .link = aconnector->dc_link, 6691 .sink_signal = SIGNAL_TYPE_VIRTUAL 6692 }; 6693 struct edid *edid; 6694 6695 if (!aconnector->base.edid_blob_ptr) { 6696 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", 6697 aconnector->base.name); 6698 6699 aconnector->base.force = DRM_FORCE_OFF; 6700 aconnector->base.override_edid = false; 6701 return; 6702 } 6703 6704 edid = (struct edid *) aconnector->base.edid_blob_ptr->data; 6705 6706 aconnector->edid = edid; 6707 6708 aconnector->dc_em_sink = dc_link_add_remote_sink( 6709 aconnector->dc_link, 6710 (uint8_t *)edid, 6711 (edid->extensions + 1) * EDID_LENGTH, 6712 &init_params); 6713 6714 if (aconnector->base.force == DRM_FORCE_ON) { 6715 aconnector->dc_sink = aconnector->dc_link->local_sink ? 6716 aconnector->dc_link->local_sink : 6717 aconnector->dc_em_sink; 6718 dc_sink_retain(aconnector->dc_sink); 6719 } 6720 } 6721 6722 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 6723 { 6724 struct dc_link *link = (struct dc_link *)aconnector->dc_link; 6725 6726 /* 6727 * In case of headless boot with force on for DP managed connector 6728 * Those settings have to be != 0 to get initial modeset 6729 */ 6730 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { 6731 link->verified_link_cap.lane_count = LANE_COUNT_FOUR; 6732 link->verified_link_cap.link_rate = LINK_RATE_HIGH2; 6733 } 6734 6735 6736 aconnector->base.override_edid = true; 6737 create_eml_sink(aconnector); 6738 } 6739 6740 static struct dc_stream_state * 6741 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, 6742 const struct drm_display_mode *drm_mode, 6743 const struct dm_connector_state *dm_state, 6744 const struct dc_stream_state *old_stream) 6745 { 6746 struct drm_connector *connector = &aconnector->base; 6747 struct amdgpu_device *adev = drm_to_adev(connector->dev); 6748 struct dc_stream_state *stream; 6749 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; 6750 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; 6751 enum dc_status dc_result = DC_OK; 6752 6753 do { 6754 stream = create_stream_for_sink(aconnector, drm_mode, 6755 dm_state, old_stream, 6756 requested_bpc); 6757 if (stream == NULL) { 6758 DRM_ERROR("Failed to create stream for sink!\n"); 6759 break; 6760 } 6761 6762 dc_result = dc_validate_stream(adev->dm.dc, stream); 6763 6764 if (dc_result != DC_OK) { 6765 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", 6766 drm_mode->hdisplay, 6767 drm_mode->vdisplay, 6768 drm_mode->clock, 6769 dc_result, 6770 dc_status_to_str(dc_result)); 6771 6772 dc_stream_release(stream); 6773 stream = NULL; 6774 requested_bpc -= 2; /* lower bpc to retry validation */ 6775 } 6776 6777 } while (stream == NULL && requested_bpc >= 6); 6778 6779 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { 6780 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); 6781 6782 aconnector->force_yuv420_output = true; 6783 stream = create_validate_stream_for_sink(aconnector, drm_mode, 6784 dm_state, old_stream); 6785 aconnector->force_yuv420_output = false; 6786 } 6787 6788 return stream; 6789 } 6790 6791 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, 6792 struct drm_display_mode *mode) 6793 { 6794 int result = MODE_ERROR; 6795 struct dc_sink *dc_sink; 6796 /* TODO: Unhardcode stream count */ 6797 struct dc_stream_state *stream; 6798 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 6799 6800 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 6801 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) 6802 return result; 6803 6804 /* 6805 * Only run this the first time mode_valid is called to initilialize 6806 * EDID mgmt 6807 */ 6808 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && 6809 !aconnector->dc_em_sink) 6810 handle_edid_mgmt(aconnector); 6811 6812 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; 6813 6814 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && 6815 aconnector->base.force != DRM_FORCE_ON) { 6816 DRM_ERROR("dc_sink is NULL!\n"); 6817 goto fail; 6818 } 6819 6820 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL); 6821 if (stream) { 6822 dc_stream_release(stream); 6823 result = MODE_OK; 6824 } 6825 6826 fail: 6827 /* TODO: error handling*/ 6828 return result; 6829 } 6830 6831 static int fill_hdr_info_packet(const struct drm_connector_state *state, 6832 struct dc_info_packet *out) 6833 { 6834 struct hdmi_drm_infoframe frame; 6835 unsigned char buf[30]; /* 26 + 4 */ 6836 ssize_t len; 6837 int ret, i; 6838 6839 memset(out, 0, sizeof(*out)); 6840 6841 if (!state->hdr_output_metadata) 6842 return 0; 6843 6844 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); 6845 if (ret) 6846 return ret; 6847 6848 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); 6849 if (len < 0) 6850 return (int)len; 6851 6852 /* Static metadata is a fixed 26 bytes + 4 byte header. */ 6853 if (len != 30) 6854 return -EINVAL; 6855 6856 /* Prepare the infopacket for DC. */ 6857 switch (state->connector->connector_type) { 6858 case DRM_MODE_CONNECTOR_HDMIA: 6859 out->hb0 = 0x87; /* type */ 6860 out->hb1 = 0x01; /* version */ 6861 out->hb2 = 0x1A; /* length */ 6862 out->sb[0] = buf[3]; /* checksum */ 6863 i = 1; 6864 break; 6865 6866 case DRM_MODE_CONNECTOR_DisplayPort: 6867 case DRM_MODE_CONNECTOR_eDP: 6868 out->hb0 = 0x00; /* sdp id, zero */ 6869 out->hb1 = 0x87; /* type */ 6870 out->hb2 = 0x1D; /* payload len - 1 */ 6871 out->hb3 = (0x13 << 2); /* sdp version */ 6872 out->sb[0] = 0x01; /* version */ 6873 out->sb[1] = 0x1A; /* length */ 6874 i = 2; 6875 break; 6876 6877 default: 6878 return -EINVAL; 6879 } 6880 6881 memcpy(&out->sb[i], &buf[4], 26); 6882 out->valid = true; 6883 6884 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, 6885 sizeof(out->sb), false); 6886 6887 return 0; 6888 } 6889 6890 static int 6891 amdgpu_dm_connector_atomic_check(struct drm_connector *conn, 6892 struct drm_atomic_state *state) 6893 { 6894 struct drm_connector_state *new_con_state = 6895 drm_atomic_get_new_connector_state(state, conn); 6896 struct drm_connector_state *old_con_state = 6897 drm_atomic_get_old_connector_state(state, conn); 6898 struct drm_crtc *crtc = new_con_state->crtc; 6899 struct drm_crtc_state *new_crtc_state; 6900 int ret; 6901 6902 trace_amdgpu_dm_connector_atomic_check(new_con_state); 6903 6904 if (!crtc) 6905 return 0; 6906 6907 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { 6908 struct dc_info_packet hdr_infopacket; 6909 6910 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); 6911 if (ret) 6912 return ret; 6913 6914 new_crtc_state = drm_atomic_get_crtc_state(state, crtc); 6915 if (IS_ERR(new_crtc_state)) 6916 return PTR_ERR(new_crtc_state); 6917 6918 /* 6919 * DC considers the stream backends changed if the 6920 * static metadata changes. Forcing the modeset also 6921 * gives a simple way for userspace to switch from 6922 * 8bpc to 10bpc when setting the metadata to enter 6923 * or exit HDR. 6924 * 6925 * Changing the static metadata after it's been 6926 * set is permissible, however. So only force a 6927 * modeset if we're entering or exiting HDR. 6928 */ 6929 new_crtc_state->mode_changed = 6930 !old_con_state->hdr_output_metadata || 6931 !new_con_state->hdr_output_metadata; 6932 } 6933 6934 return 0; 6935 } 6936 6937 static const struct drm_connector_helper_funcs 6938 amdgpu_dm_connector_helper_funcs = { 6939 /* 6940 * If hotplugging a second bigger display in FB Con mode, bigger resolution 6941 * modes will be filtered by drm_mode_validate_size(), and those modes 6942 * are missing after user start lightdm. So we need to renew modes list. 6943 * in get_modes call back, not just return the modes count 6944 */ 6945 .get_modes = get_modes, 6946 .mode_valid = amdgpu_dm_connector_mode_valid, 6947 .atomic_check = amdgpu_dm_connector_atomic_check, 6948 }; 6949 6950 static void dm_crtc_helper_disable(struct drm_crtc *crtc) 6951 { 6952 } 6953 6954 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) 6955 { 6956 struct drm_atomic_state *state = new_crtc_state->state; 6957 struct drm_plane *plane; 6958 int num_active = 0; 6959 6960 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) { 6961 struct drm_plane_state *new_plane_state; 6962 6963 /* Cursor planes are "fake". */ 6964 if (plane->type == DRM_PLANE_TYPE_CURSOR) 6965 continue; 6966 6967 new_plane_state = drm_atomic_get_new_plane_state(state, plane); 6968 6969 if (!new_plane_state) { 6970 /* 6971 * The plane is enable on the CRTC and hasn't changed 6972 * state. This means that it previously passed 6973 * validation and is therefore enabled. 6974 */ 6975 num_active += 1; 6976 continue; 6977 } 6978 6979 /* We need a framebuffer to be considered enabled. */ 6980 num_active += (new_plane_state->fb != NULL); 6981 } 6982 6983 return num_active; 6984 } 6985 6986 static void dm_update_crtc_active_planes(struct drm_crtc *crtc, 6987 struct drm_crtc_state *new_crtc_state) 6988 { 6989 struct dm_crtc_state *dm_new_crtc_state = 6990 to_dm_crtc_state(new_crtc_state); 6991 6992 dm_new_crtc_state->active_planes = 0; 6993 6994 if (!dm_new_crtc_state->stream) 6995 return; 6996 6997 dm_new_crtc_state->active_planes = 6998 count_crtc_active_planes(new_crtc_state); 6999 } 7000 7001 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, 7002 struct drm_atomic_state *state) 7003 { 7004 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 7005 crtc); 7006 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 7007 struct dc *dc = adev->dm.dc; 7008 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); 7009 int ret = -EINVAL; 7010 7011 trace_amdgpu_dm_crtc_atomic_check(crtc_state); 7012 7013 dm_update_crtc_active_planes(crtc, crtc_state); 7014 7015 if (WARN_ON(unlikely(!dm_crtc_state->stream && 7016 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) { 7017 return ret; 7018 } 7019 7020 /* 7021 * We require the primary plane to be enabled whenever the CRTC is, otherwise 7022 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other 7023 * planes are disabled, which is not supported by the hardware. And there is legacy 7024 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL. 7025 */ 7026 if (crtc_state->enable && 7027 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) { 7028 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n"); 7029 return -EINVAL; 7030 } 7031 7032 /* In some use cases, like reset, no stream is attached */ 7033 if (!dm_crtc_state->stream) 7034 return 0; 7035 7036 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) 7037 return 0; 7038 7039 DRM_DEBUG_ATOMIC("Failed DC stream validation\n"); 7040 return ret; 7041 } 7042 7043 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, 7044 const struct drm_display_mode *mode, 7045 struct drm_display_mode *adjusted_mode) 7046 { 7047 return true; 7048 } 7049 7050 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { 7051 .disable = dm_crtc_helper_disable, 7052 .atomic_check = dm_crtc_helper_atomic_check, 7053 .mode_fixup = dm_crtc_helper_mode_fixup, 7054 .get_scanout_position = amdgpu_crtc_get_scanout_position, 7055 }; 7056 7057 static void dm_encoder_helper_disable(struct drm_encoder *encoder) 7058 { 7059 7060 } 7061 7062 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth) 7063 { 7064 switch (display_color_depth) { 7065 case COLOR_DEPTH_666: 7066 return 6; 7067 case COLOR_DEPTH_888: 7068 return 8; 7069 case COLOR_DEPTH_101010: 7070 return 10; 7071 case COLOR_DEPTH_121212: 7072 return 12; 7073 case COLOR_DEPTH_141414: 7074 return 14; 7075 case COLOR_DEPTH_161616: 7076 return 16; 7077 default: 7078 break; 7079 } 7080 return 0; 7081 } 7082 7083 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, 7084 struct drm_crtc_state *crtc_state, 7085 struct drm_connector_state *conn_state) 7086 { 7087 struct drm_atomic_state *state = crtc_state->state; 7088 struct drm_connector *connector = conn_state->connector; 7089 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 7090 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); 7091 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 7092 struct drm_dp_mst_topology_mgr *mst_mgr; 7093 struct drm_dp_mst_port *mst_port; 7094 enum dc_color_depth color_depth; 7095 int clock, bpp = 0; 7096 bool is_y420 = false; 7097 7098 if (!aconnector->port || !aconnector->dc_sink) 7099 return 0; 7100 7101 mst_port = aconnector->port; 7102 mst_mgr = &aconnector->mst_port->mst_mgr; 7103 7104 if (!crtc_state->connectors_changed && !crtc_state->mode_changed) 7105 return 0; 7106 7107 if (!state->duplicated) { 7108 int max_bpc = conn_state->max_requested_bpc; 7109 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && 7110 aconnector->force_yuv420_output; 7111 color_depth = convert_color_depth_from_display_info(connector, 7112 is_y420, 7113 max_bpc); 7114 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 7115 clock = adjusted_mode->clock; 7116 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); 7117 } 7118 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state, 7119 mst_mgr, 7120 mst_port, 7121 dm_new_connector_state->pbn, 7122 dm_mst_get_pbn_divider(aconnector->dc_link)); 7123 if (dm_new_connector_state->vcpi_slots < 0) { 7124 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); 7125 return dm_new_connector_state->vcpi_slots; 7126 } 7127 return 0; 7128 } 7129 7130 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { 7131 .disable = dm_encoder_helper_disable, 7132 .atomic_check = dm_encoder_helper_atomic_check 7133 }; 7134 7135 #if defined(CONFIG_DRM_AMD_DC_DCN) 7136 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, 7137 struct dc_state *dc_state, 7138 struct dsc_mst_fairness_vars *vars) 7139 { 7140 struct dc_stream_state *stream = NULL; 7141 struct drm_connector *connector; 7142 struct drm_connector_state *new_con_state; 7143 struct amdgpu_dm_connector *aconnector; 7144 struct dm_connector_state *dm_conn_state; 7145 int i, j, clock; 7146 int vcpi, pbn_div, pbn = 0; 7147 7148 for_each_new_connector_in_state(state, connector, new_con_state, i) { 7149 7150 aconnector = to_amdgpu_dm_connector(connector); 7151 7152 if (!aconnector->port) 7153 continue; 7154 7155 if (!new_con_state || !new_con_state->crtc) 7156 continue; 7157 7158 dm_conn_state = to_dm_connector_state(new_con_state); 7159 7160 for (j = 0; j < dc_state->stream_count; j++) { 7161 stream = dc_state->streams[j]; 7162 if (!stream) 7163 continue; 7164 7165 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector) 7166 break; 7167 7168 stream = NULL; 7169 } 7170 7171 if (!stream) 7172 continue; 7173 7174 if (stream->timing.flags.DSC != 1) { 7175 drm_dp_mst_atomic_enable_dsc(state, 7176 aconnector->port, 7177 dm_conn_state->pbn, 7178 0, 7179 false); 7180 continue; 7181 } 7182 7183 pbn_div = dm_mst_get_pbn_divider(stream->link); 7184 clock = stream->timing.pix_clk_100hz / 10; 7185 /* pbn is calculated by compute_mst_dsc_configs_for_state*/ 7186 for (j = 0; j < dc_state->stream_count; j++) { 7187 if (vars[j].aconnector == aconnector) { 7188 pbn = vars[j].pbn; 7189 break; 7190 } 7191 } 7192 7193 vcpi = drm_dp_mst_atomic_enable_dsc(state, 7194 aconnector->port, 7195 pbn, pbn_div, 7196 true); 7197 if (vcpi < 0) 7198 return vcpi; 7199 7200 dm_conn_state->pbn = pbn; 7201 dm_conn_state->vcpi_slots = vcpi; 7202 } 7203 return 0; 7204 } 7205 #endif 7206 7207 static void dm_drm_plane_reset(struct drm_plane *plane) 7208 { 7209 struct dm_plane_state *amdgpu_state = NULL; 7210 7211 if (plane->state) 7212 plane->funcs->atomic_destroy_state(plane, plane->state); 7213 7214 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); 7215 WARN_ON(amdgpu_state == NULL); 7216 7217 if (amdgpu_state) 7218 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); 7219 } 7220 7221 static struct drm_plane_state * 7222 dm_drm_plane_duplicate_state(struct drm_plane *plane) 7223 { 7224 struct dm_plane_state *dm_plane_state, *old_dm_plane_state; 7225 7226 old_dm_plane_state = to_dm_plane_state(plane->state); 7227 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); 7228 if (!dm_plane_state) 7229 return NULL; 7230 7231 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); 7232 7233 if (old_dm_plane_state->dc_state) { 7234 dm_plane_state->dc_state = old_dm_plane_state->dc_state; 7235 dc_plane_state_retain(dm_plane_state->dc_state); 7236 } 7237 7238 return &dm_plane_state->base; 7239 } 7240 7241 static void dm_drm_plane_destroy_state(struct drm_plane *plane, 7242 struct drm_plane_state *state) 7243 { 7244 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); 7245 7246 if (dm_plane_state->dc_state) 7247 dc_plane_state_release(dm_plane_state->dc_state); 7248 7249 drm_atomic_helper_plane_destroy_state(plane, state); 7250 } 7251 7252 static const struct drm_plane_funcs dm_plane_funcs = { 7253 .update_plane = drm_atomic_helper_update_plane, 7254 .disable_plane = drm_atomic_helper_disable_plane, 7255 .destroy = drm_primary_helper_destroy, 7256 .reset = dm_drm_plane_reset, 7257 .atomic_duplicate_state = dm_drm_plane_duplicate_state, 7258 .atomic_destroy_state = dm_drm_plane_destroy_state, 7259 .format_mod_supported = dm_plane_format_mod_supported, 7260 }; 7261 7262 static int dm_plane_helper_prepare_fb(struct drm_plane *plane, 7263 struct drm_plane_state *new_state) 7264 { 7265 struct amdgpu_framebuffer *afb; 7266 struct drm_gem_object *obj; 7267 struct amdgpu_device *adev; 7268 struct amdgpu_bo *rbo; 7269 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; 7270 struct list_head list; 7271 struct ttm_validate_buffer tv; 7272 struct ww_acquire_ctx ticket; 7273 uint32_t domain; 7274 int r; 7275 7276 if (!new_state->fb) { 7277 DRM_DEBUG_KMS("No FB bound\n"); 7278 return 0; 7279 } 7280 7281 afb = to_amdgpu_framebuffer(new_state->fb); 7282 obj = new_state->fb->obj[0]; 7283 rbo = gem_to_amdgpu_bo(obj); 7284 adev = amdgpu_ttm_adev(rbo->tbo.bdev); 7285 INIT_LIST_HEAD(&list); 7286 7287 tv.bo = &rbo->tbo; 7288 tv.num_shared = 1; 7289 list_add(&tv.head, &list); 7290 7291 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); 7292 if (r) { 7293 dev_err(adev->dev, "fail to reserve bo (%d)\n", r); 7294 return r; 7295 } 7296 7297 if (plane->type != DRM_PLANE_TYPE_CURSOR) 7298 domain = amdgpu_display_supported_domains(adev, rbo->flags); 7299 else 7300 domain = AMDGPU_GEM_DOMAIN_VRAM; 7301 7302 r = amdgpu_bo_pin(rbo, domain); 7303 if (unlikely(r != 0)) { 7304 if (r != -ERESTARTSYS) 7305 DRM_ERROR("Failed to pin framebuffer with error %d\n", r); 7306 ttm_eu_backoff_reservation(&ticket, &list); 7307 return r; 7308 } 7309 7310 r = amdgpu_ttm_alloc_gart(&rbo->tbo); 7311 if (unlikely(r != 0)) { 7312 amdgpu_bo_unpin(rbo); 7313 ttm_eu_backoff_reservation(&ticket, &list); 7314 DRM_ERROR("%p bind failed\n", rbo); 7315 return r; 7316 } 7317 7318 ttm_eu_backoff_reservation(&ticket, &list); 7319 7320 afb->address = amdgpu_bo_gpu_offset(rbo); 7321 7322 amdgpu_bo_ref(rbo); 7323 7324 /** 7325 * We don't do surface updates on planes that have been newly created, 7326 * but we also don't have the afb->address during atomic check. 7327 * 7328 * Fill in buffer attributes depending on the address here, but only on 7329 * newly created planes since they're not being used by DC yet and this 7330 * won't modify global state. 7331 */ 7332 dm_plane_state_old = to_dm_plane_state(plane->state); 7333 dm_plane_state_new = to_dm_plane_state(new_state); 7334 7335 if (dm_plane_state_new->dc_state && 7336 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { 7337 struct dc_plane_state *plane_state = 7338 dm_plane_state_new->dc_state; 7339 bool force_disable_dcc = !plane_state->dcc.enable; 7340 7341 fill_plane_buffer_attributes( 7342 adev, afb, plane_state->format, plane_state->rotation, 7343 afb->tiling_flags, 7344 &plane_state->tiling_info, &plane_state->plane_size, 7345 &plane_state->dcc, &plane_state->address, 7346 afb->tmz_surface, force_disable_dcc); 7347 } 7348 7349 return 0; 7350 } 7351 7352 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, 7353 struct drm_plane_state *old_state) 7354 { 7355 struct amdgpu_bo *rbo; 7356 int r; 7357 7358 if (!old_state->fb) 7359 return; 7360 7361 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); 7362 r = amdgpu_bo_reserve(rbo, false); 7363 if (unlikely(r)) { 7364 DRM_ERROR("failed to reserve rbo before unpin\n"); 7365 return; 7366 } 7367 7368 amdgpu_bo_unpin(rbo); 7369 amdgpu_bo_unreserve(rbo); 7370 amdgpu_bo_unref(&rbo); 7371 } 7372 7373 static int dm_plane_helper_check_state(struct drm_plane_state *state, 7374 struct drm_crtc_state *new_crtc_state) 7375 { 7376 struct drm_framebuffer *fb = state->fb; 7377 int min_downscale, max_upscale; 7378 int min_scale = 0; 7379 int max_scale = INT_MAX; 7380 7381 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */ 7382 if (fb && state->crtc) { 7383 /* Validate viewport to cover the case when only the position changes */ 7384 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) { 7385 int viewport_width = state->crtc_w; 7386 int viewport_height = state->crtc_h; 7387 7388 if (state->crtc_x < 0) 7389 viewport_width += state->crtc_x; 7390 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay) 7391 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x; 7392 7393 if (state->crtc_y < 0) 7394 viewport_height += state->crtc_y; 7395 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) 7396 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; 7397 7398 if (viewport_width < 0 || viewport_height < 0) { 7399 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); 7400 return -EINVAL; 7401 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ 7402 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); 7403 return -EINVAL; 7404 } else if (viewport_height < MIN_VIEWPORT_SIZE) { 7405 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); 7406 return -EINVAL; 7407 } 7408 7409 } 7410 7411 /* Get min/max allowed scaling factors from plane caps. */ 7412 get_min_max_dc_plane_scaling(state->crtc->dev, fb, 7413 &min_downscale, &max_upscale); 7414 /* 7415 * Convert to drm convention: 16.16 fixed point, instead of dc's 7416 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's 7417 * dst/src, so min_scale = 1.0 / max_upscale, etc. 7418 */ 7419 min_scale = (1000 << 16) / max_upscale; 7420 max_scale = (1000 << 16) / min_downscale; 7421 } 7422 7423 return drm_atomic_helper_check_plane_state( 7424 state, new_crtc_state, min_scale, max_scale, true, true); 7425 } 7426 7427 static int dm_plane_atomic_check(struct drm_plane *plane, 7428 struct drm_atomic_state *state) 7429 { 7430 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 7431 plane); 7432 struct amdgpu_device *adev = drm_to_adev(plane->dev); 7433 struct dc *dc = adev->dm.dc; 7434 struct dm_plane_state *dm_plane_state; 7435 struct dc_scaling_info scaling_info; 7436 struct drm_crtc_state *new_crtc_state; 7437 int ret; 7438 7439 trace_amdgpu_dm_plane_atomic_check(new_plane_state); 7440 7441 dm_plane_state = to_dm_plane_state(new_plane_state); 7442 7443 if (!dm_plane_state->dc_state) 7444 return 0; 7445 7446 new_crtc_state = 7447 drm_atomic_get_new_crtc_state(state, 7448 new_plane_state->crtc); 7449 if (!new_crtc_state) 7450 return -EINVAL; 7451 7452 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); 7453 if (ret) 7454 return ret; 7455 7456 ret = fill_dc_scaling_info(new_plane_state, &scaling_info); 7457 if (ret) 7458 return ret; 7459 7460 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) 7461 return 0; 7462 7463 return -EINVAL; 7464 } 7465 7466 static int dm_plane_atomic_async_check(struct drm_plane *plane, 7467 struct drm_atomic_state *state) 7468 { 7469 /* Only support async updates on cursor planes. */ 7470 if (plane->type != DRM_PLANE_TYPE_CURSOR) 7471 return -EINVAL; 7472 7473 return 0; 7474 } 7475 7476 static void dm_plane_atomic_async_update(struct drm_plane *plane, 7477 struct drm_atomic_state *state) 7478 { 7479 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 7480 plane); 7481 struct drm_plane_state *old_state = 7482 drm_atomic_get_old_plane_state(state, plane); 7483 7484 trace_amdgpu_dm_atomic_update_cursor(new_state); 7485 7486 swap(plane->state->fb, new_state->fb); 7487 7488 plane->state->src_x = new_state->src_x; 7489 plane->state->src_y = new_state->src_y; 7490 plane->state->src_w = new_state->src_w; 7491 plane->state->src_h = new_state->src_h; 7492 plane->state->crtc_x = new_state->crtc_x; 7493 plane->state->crtc_y = new_state->crtc_y; 7494 plane->state->crtc_w = new_state->crtc_w; 7495 plane->state->crtc_h = new_state->crtc_h; 7496 7497 handle_cursor_update(plane, old_state); 7498 } 7499 7500 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { 7501 .prepare_fb = dm_plane_helper_prepare_fb, 7502 .cleanup_fb = dm_plane_helper_cleanup_fb, 7503 .atomic_check = dm_plane_atomic_check, 7504 .atomic_async_check = dm_plane_atomic_async_check, 7505 .atomic_async_update = dm_plane_atomic_async_update 7506 }; 7507 7508 /* 7509 * TODO: these are currently initialized to rgb formats only. 7510 * For future use cases we should either initialize them dynamically based on 7511 * plane capabilities, or initialize this array to all formats, so internal drm 7512 * check will succeed, and let DC implement proper check 7513 */ 7514 static const uint32_t rgb_formats[] = { 7515 DRM_FORMAT_XRGB8888, 7516 DRM_FORMAT_ARGB8888, 7517 DRM_FORMAT_RGBA8888, 7518 DRM_FORMAT_XRGB2101010, 7519 DRM_FORMAT_XBGR2101010, 7520 DRM_FORMAT_ARGB2101010, 7521 DRM_FORMAT_ABGR2101010, 7522 DRM_FORMAT_XRGB16161616, 7523 DRM_FORMAT_XBGR16161616, 7524 DRM_FORMAT_ARGB16161616, 7525 DRM_FORMAT_ABGR16161616, 7526 DRM_FORMAT_XBGR8888, 7527 DRM_FORMAT_ABGR8888, 7528 DRM_FORMAT_RGB565, 7529 }; 7530 7531 static const uint32_t overlay_formats[] = { 7532 DRM_FORMAT_XRGB8888, 7533 DRM_FORMAT_ARGB8888, 7534 DRM_FORMAT_RGBA8888, 7535 DRM_FORMAT_XBGR8888, 7536 DRM_FORMAT_ABGR8888, 7537 DRM_FORMAT_RGB565 7538 }; 7539 7540 static const u32 cursor_formats[] = { 7541 DRM_FORMAT_ARGB8888 7542 }; 7543 7544 static int get_plane_formats(const struct drm_plane *plane, 7545 const struct dc_plane_cap *plane_cap, 7546 uint32_t *formats, int max_formats) 7547 { 7548 int i, num_formats = 0; 7549 7550 /* 7551 * TODO: Query support for each group of formats directly from 7552 * DC plane caps. This will require adding more formats to the 7553 * caps list. 7554 */ 7555 7556 switch (plane->type) { 7557 case DRM_PLANE_TYPE_PRIMARY: 7558 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { 7559 if (num_formats >= max_formats) 7560 break; 7561 7562 formats[num_formats++] = rgb_formats[i]; 7563 } 7564 7565 if (plane_cap && plane_cap->pixel_format_support.nv12) 7566 formats[num_formats++] = DRM_FORMAT_NV12; 7567 if (plane_cap && plane_cap->pixel_format_support.p010) 7568 formats[num_formats++] = DRM_FORMAT_P010; 7569 if (plane_cap && plane_cap->pixel_format_support.fp16) { 7570 formats[num_formats++] = DRM_FORMAT_XRGB16161616F; 7571 formats[num_formats++] = DRM_FORMAT_ARGB16161616F; 7572 formats[num_formats++] = DRM_FORMAT_XBGR16161616F; 7573 formats[num_formats++] = DRM_FORMAT_ABGR16161616F; 7574 } 7575 break; 7576 7577 case DRM_PLANE_TYPE_OVERLAY: 7578 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { 7579 if (num_formats >= max_formats) 7580 break; 7581 7582 formats[num_formats++] = overlay_formats[i]; 7583 } 7584 break; 7585 7586 case DRM_PLANE_TYPE_CURSOR: 7587 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { 7588 if (num_formats >= max_formats) 7589 break; 7590 7591 formats[num_formats++] = cursor_formats[i]; 7592 } 7593 break; 7594 } 7595 7596 return num_formats; 7597 } 7598 7599 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, 7600 struct drm_plane *plane, 7601 unsigned long possible_crtcs, 7602 const struct dc_plane_cap *plane_cap) 7603 { 7604 uint32_t formats[32]; 7605 int num_formats; 7606 int res = -EPERM; 7607 unsigned int supported_rotations; 7608 uint64_t *modifiers = NULL; 7609 7610 num_formats = get_plane_formats(plane, plane_cap, formats, 7611 ARRAY_SIZE(formats)); 7612 7613 res = get_plane_modifiers(dm->adev, plane->type, &modifiers); 7614 if (res) 7615 return res; 7616 7617 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, 7618 &dm_plane_funcs, formats, num_formats, 7619 modifiers, plane->type, NULL); 7620 kfree(modifiers); 7621 if (res) 7622 return res; 7623 7624 if (plane->type == DRM_PLANE_TYPE_OVERLAY && 7625 plane_cap && plane_cap->per_pixel_alpha) { 7626 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | 7627 BIT(DRM_MODE_BLEND_PREMULTI); 7628 7629 drm_plane_create_alpha_property(plane); 7630 drm_plane_create_blend_mode_property(plane, blend_caps); 7631 } 7632 7633 if (plane->type == DRM_PLANE_TYPE_PRIMARY && 7634 plane_cap && 7635 (plane_cap->pixel_format_support.nv12 || 7636 plane_cap->pixel_format_support.p010)) { 7637 /* This only affects YUV formats. */ 7638 drm_plane_create_color_properties( 7639 plane, 7640 BIT(DRM_COLOR_YCBCR_BT601) | 7641 BIT(DRM_COLOR_YCBCR_BT709) | 7642 BIT(DRM_COLOR_YCBCR_BT2020), 7643 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | 7644 BIT(DRM_COLOR_YCBCR_FULL_RANGE), 7645 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); 7646 } 7647 7648 supported_rotations = 7649 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | 7650 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; 7651 7652 if (dm->adev->asic_type >= CHIP_BONAIRE && 7653 plane->type != DRM_PLANE_TYPE_CURSOR) 7654 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 7655 supported_rotations); 7656 7657 drm_plane_helper_add(plane, &dm_plane_helper_funcs); 7658 7659 /* Create (reset) the plane state */ 7660 if (plane->funcs->reset) 7661 plane->funcs->reset(plane); 7662 7663 return 0; 7664 } 7665 7666 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 7667 struct drm_plane *plane, 7668 uint32_t crtc_index) 7669 { 7670 struct amdgpu_crtc *acrtc = NULL; 7671 struct drm_plane *cursor_plane; 7672 7673 int res = -ENOMEM; 7674 7675 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); 7676 if (!cursor_plane) 7677 goto fail; 7678 7679 cursor_plane->type = DRM_PLANE_TYPE_CURSOR; 7680 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL); 7681 7682 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); 7683 if (!acrtc) 7684 goto fail; 7685 7686 res = drm_crtc_init_with_planes( 7687 dm->ddev, 7688 &acrtc->base, 7689 plane, 7690 cursor_plane, 7691 &amdgpu_dm_crtc_funcs, NULL); 7692 7693 if (res) 7694 goto fail; 7695 7696 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); 7697 7698 /* Create (reset) the plane state */ 7699 if (acrtc->base.funcs->reset) 7700 acrtc->base.funcs->reset(&acrtc->base); 7701 7702 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size; 7703 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size; 7704 7705 acrtc->crtc_id = crtc_index; 7706 acrtc->base.enabled = false; 7707 acrtc->otg_inst = -1; 7708 7709 dm->adev->mode_info.crtcs[crtc_index] = acrtc; 7710 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES, 7711 true, MAX_COLOR_LUT_ENTRIES); 7712 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); 7713 7714 return 0; 7715 7716 fail: 7717 kfree(acrtc); 7718 kfree(cursor_plane); 7719 return res; 7720 } 7721 7722 7723 static int to_drm_connector_type(enum signal_type st) 7724 { 7725 switch (st) { 7726 case SIGNAL_TYPE_HDMI_TYPE_A: 7727 return DRM_MODE_CONNECTOR_HDMIA; 7728 case SIGNAL_TYPE_EDP: 7729 return DRM_MODE_CONNECTOR_eDP; 7730 case SIGNAL_TYPE_LVDS: 7731 return DRM_MODE_CONNECTOR_LVDS; 7732 case SIGNAL_TYPE_RGB: 7733 return DRM_MODE_CONNECTOR_VGA; 7734 case SIGNAL_TYPE_DISPLAY_PORT: 7735 case SIGNAL_TYPE_DISPLAY_PORT_MST: 7736 return DRM_MODE_CONNECTOR_DisplayPort; 7737 case SIGNAL_TYPE_DVI_DUAL_LINK: 7738 case SIGNAL_TYPE_DVI_SINGLE_LINK: 7739 return DRM_MODE_CONNECTOR_DVID; 7740 case SIGNAL_TYPE_VIRTUAL: 7741 return DRM_MODE_CONNECTOR_VIRTUAL; 7742 7743 default: 7744 return DRM_MODE_CONNECTOR_Unknown; 7745 } 7746 } 7747 7748 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) 7749 { 7750 struct drm_encoder *encoder; 7751 7752 /* There is only one encoder per connector */ 7753 drm_connector_for_each_possible_encoder(connector, encoder) 7754 return encoder; 7755 7756 return NULL; 7757 } 7758 7759 static void amdgpu_dm_get_native_mode(struct drm_connector *connector) 7760 { 7761 struct drm_encoder *encoder; 7762 struct amdgpu_encoder *amdgpu_encoder; 7763 7764 encoder = amdgpu_dm_connector_to_encoder(connector); 7765 7766 if (encoder == NULL) 7767 return; 7768 7769 amdgpu_encoder = to_amdgpu_encoder(encoder); 7770 7771 amdgpu_encoder->native_mode.clock = 0; 7772 7773 if (!list_empty(&connector->probed_modes)) { 7774 struct drm_display_mode *preferred_mode = NULL; 7775 7776 list_for_each_entry(preferred_mode, 7777 &connector->probed_modes, 7778 head) { 7779 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) 7780 amdgpu_encoder->native_mode = *preferred_mode; 7781 7782 break; 7783 } 7784 7785 } 7786 } 7787 7788 static struct drm_display_mode * 7789 amdgpu_dm_create_common_mode(struct drm_encoder *encoder, 7790 char *name, 7791 int hdisplay, int vdisplay) 7792 { 7793 struct drm_device *dev = encoder->dev; 7794 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7795 struct drm_display_mode *mode = NULL; 7796 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7797 7798 mode = drm_mode_duplicate(dev, native_mode); 7799 7800 if (mode == NULL) 7801 return NULL; 7802 7803 mode->hdisplay = hdisplay; 7804 mode->vdisplay = vdisplay; 7805 mode->type &= ~DRM_MODE_TYPE_PREFERRED; 7806 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); 7807 7808 return mode; 7809 7810 } 7811 7812 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, 7813 struct drm_connector *connector) 7814 { 7815 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 7816 struct drm_display_mode *mode = NULL; 7817 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; 7818 struct amdgpu_dm_connector *amdgpu_dm_connector = 7819 to_amdgpu_dm_connector(connector); 7820 int i; 7821 int n; 7822 struct mode_size { 7823 char name[DRM_DISPLAY_MODE_LEN]; 7824 int w; 7825 int h; 7826 } common_modes[] = { 7827 { "640x480", 640, 480}, 7828 { "800x600", 800, 600}, 7829 { "1024x768", 1024, 768}, 7830 { "1280x720", 1280, 720}, 7831 { "1280x800", 1280, 800}, 7832 {"1280x1024", 1280, 1024}, 7833 { "1440x900", 1440, 900}, 7834 {"1680x1050", 1680, 1050}, 7835 {"1600x1200", 1600, 1200}, 7836 {"1920x1080", 1920, 1080}, 7837 {"1920x1200", 1920, 1200} 7838 }; 7839 7840 n = ARRAY_SIZE(common_modes); 7841 7842 for (i = 0; i < n; i++) { 7843 struct drm_display_mode *curmode = NULL; 7844 bool mode_existed = false; 7845 7846 if (common_modes[i].w > native_mode->hdisplay || 7847 common_modes[i].h > native_mode->vdisplay || 7848 (common_modes[i].w == native_mode->hdisplay && 7849 common_modes[i].h == native_mode->vdisplay)) 7850 continue; 7851 7852 list_for_each_entry(curmode, &connector->probed_modes, head) { 7853 if (common_modes[i].w == curmode->hdisplay && 7854 common_modes[i].h == curmode->vdisplay) { 7855 mode_existed = true; 7856 break; 7857 } 7858 } 7859 7860 if (mode_existed) 7861 continue; 7862 7863 mode = amdgpu_dm_create_common_mode(encoder, 7864 common_modes[i].name, common_modes[i].w, 7865 common_modes[i].h); 7866 drm_mode_probed_add(connector, mode); 7867 amdgpu_dm_connector->num_modes++; 7868 } 7869 } 7870 7871 static void amdgpu_set_panel_orientation(struct drm_connector *connector) 7872 { 7873 struct drm_encoder *encoder; 7874 struct amdgpu_encoder *amdgpu_encoder; 7875 const struct drm_display_mode *native_mode; 7876 7877 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && 7878 connector->connector_type != DRM_MODE_CONNECTOR_LVDS) 7879 return; 7880 7881 encoder = amdgpu_dm_connector_to_encoder(connector); 7882 if (!encoder) 7883 return; 7884 7885 amdgpu_encoder = to_amdgpu_encoder(encoder); 7886 7887 native_mode = &amdgpu_encoder->native_mode; 7888 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) 7889 return; 7890 7891 drm_connector_set_panel_orientation_with_quirk(connector, 7892 DRM_MODE_PANEL_ORIENTATION_UNKNOWN, 7893 native_mode->hdisplay, 7894 native_mode->vdisplay); 7895 } 7896 7897 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, 7898 struct edid *edid) 7899 { 7900 struct amdgpu_dm_connector *amdgpu_dm_connector = 7901 to_amdgpu_dm_connector(connector); 7902 7903 if (edid) { 7904 /* empty probed_modes */ 7905 INIT_LIST_HEAD(&connector->probed_modes); 7906 amdgpu_dm_connector->num_modes = 7907 drm_add_edid_modes(connector, edid); 7908 7909 /* sorting the probed modes before calling function 7910 * amdgpu_dm_get_native_mode() since EDID can have 7911 * more than one preferred mode. The modes that are 7912 * later in the probed mode list could be of higher 7913 * and preferred resolution. For example, 3840x2160 7914 * resolution in base EDID preferred timing and 4096x2160 7915 * preferred resolution in DID extension block later. 7916 */ 7917 drm_mode_sort(&connector->probed_modes); 7918 amdgpu_dm_get_native_mode(connector); 7919 7920 /* Freesync capabilities are reset by calling 7921 * drm_add_edid_modes() and need to be 7922 * restored here. 7923 */ 7924 amdgpu_dm_update_freesync_caps(connector, edid); 7925 7926 amdgpu_set_panel_orientation(connector); 7927 } else { 7928 amdgpu_dm_connector->num_modes = 0; 7929 } 7930 } 7931 7932 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, 7933 struct drm_display_mode *mode) 7934 { 7935 struct drm_display_mode *m; 7936 7937 list_for_each_entry (m, &aconnector->base.probed_modes, head) { 7938 if (drm_mode_equal(m, mode)) 7939 return true; 7940 } 7941 7942 return false; 7943 } 7944 7945 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) 7946 { 7947 const struct drm_display_mode *m; 7948 struct drm_display_mode *new_mode; 7949 uint i; 7950 uint32_t new_modes_count = 0; 7951 7952 /* Standard FPS values 7953 * 7954 * 23.976 - TV/NTSC 7955 * 24 - Cinema 7956 * 25 - TV/PAL 7957 * 29.97 - TV/NTSC 7958 * 30 - TV/NTSC 7959 * 48 - Cinema HFR 7960 * 50 - TV/PAL 7961 * 60 - Commonly used 7962 * 48,72,96 - Multiples of 24 7963 */ 7964 static const uint32_t common_rates[] = { 7965 23976, 24000, 25000, 29970, 30000, 7966 48000, 50000, 60000, 72000, 96000 7967 }; 7968 7969 /* 7970 * Find mode with highest refresh rate with the same resolution 7971 * as the preferred mode. Some monitors report a preferred mode 7972 * with lower resolution than the highest refresh rate supported. 7973 */ 7974 7975 m = get_highest_refresh_rate_mode(aconnector, true); 7976 if (!m) 7977 return 0; 7978 7979 for (i = 0; i < ARRAY_SIZE(common_rates); i++) { 7980 uint64_t target_vtotal, target_vtotal_diff; 7981 uint64_t num, den; 7982 7983 if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) 7984 continue; 7985 7986 if (common_rates[i] < aconnector->min_vfreq * 1000 || 7987 common_rates[i] > aconnector->max_vfreq * 1000) 7988 continue; 7989 7990 num = (unsigned long long)m->clock * 1000 * 1000; 7991 den = common_rates[i] * (unsigned long long)m->htotal; 7992 target_vtotal = div_u64(num, den); 7993 target_vtotal_diff = target_vtotal - m->vtotal; 7994 7995 /* Check for illegal modes */ 7996 if (m->vsync_start + target_vtotal_diff < m->vdisplay || 7997 m->vsync_end + target_vtotal_diff < m->vsync_start || 7998 m->vtotal + target_vtotal_diff < m->vsync_end) 7999 continue; 8000 8001 new_mode = drm_mode_duplicate(aconnector->base.dev, m); 8002 if (!new_mode) 8003 goto out; 8004 8005 new_mode->vtotal += (u16)target_vtotal_diff; 8006 new_mode->vsync_start += (u16)target_vtotal_diff; 8007 new_mode->vsync_end += (u16)target_vtotal_diff; 8008 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; 8009 new_mode->type |= DRM_MODE_TYPE_DRIVER; 8010 8011 if (!is_duplicate_mode(aconnector, new_mode)) { 8012 drm_mode_probed_add(&aconnector->base, new_mode); 8013 new_modes_count += 1; 8014 } else 8015 drm_mode_destroy(aconnector->base.dev, new_mode); 8016 } 8017 out: 8018 return new_modes_count; 8019 } 8020 8021 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, 8022 struct edid *edid) 8023 { 8024 struct amdgpu_dm_connector *amdgpu_dm_connector = 8025 to_amdgpu_dm_connector(connector); 8026 8027 if (!(amdgpu_freesync_vid_mode && edid)) 8028 return; 8029 8030 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 8031 amdgpu_dm_connector->num_modes += 8032 add_fs_modes(amdgpu_dm_connector); 8033 } 8034 8035 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) 8036 { 8037 struct amdgpu_dm_connector *amdgpu_dm_connector = 8038 to_amdgpu_dm_connector(connector); 8039 struct drm_encoder *encoder; 8040 struct edid *edid = amdgpu_dm_connector->edid; 8041 8042 encoder = amdgpu_dm_connector_to_encoder(connector); 8043 8044 if (!drm_edid_is_valid(edid)) { 8045 amdgpu_dm_connector->num_modes = 8046 drm_add_modes_noedid(connector, 640, 480); 8047 } else { 8048 amdgpu_dm_connector_ddc_get_modes(connector, edid); 8049 amdgpu_dm_connector_add_common_modes(encoder, connector); 8050 amdgpu_dm_connector_add_freesync_modes(connector, edid); 8051 } 8052 amdgpu_dm_fbc_init(connector); 8053 8054 return amdgpu_dm_connector->num_modes; 8055 } 8056 8057 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, 8058 struct amdgpu_dm_connector *aconnector, 8059 int connector_type, 8060 struct dc_link *link, 8061 int link_index) 8062 { 8063 struct amdgpu_device *adev = drm_to_adev(dm->ddev); 8064 8065 /* 8066 * Some of the properties below require access to state, like bpc. 8067 * Allocate some default initial connector state with our reset helper. 8068 */ 8069 if (aconnector->base.funcs->reset) 8070 aconnector->base.funcs->reset(&aconnector->base); 8071 8072 aconnector->connector_id = link_index; 8073 aconnector->dc_link = link; 8074 aconnector->base.interlace_allowed = false; 8075 aconnector->base.doublescan_allowed = false; 8076 aconnector->base.stereo_allowed = false; 8077 aconnector->base.dpms = DRM_MODE_DPMS_OFF; 8078 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ 8079 aconnector->audio_inst = -1; 8080 mutex_init(&aconnector->hpd_lock); 8081 8082 /* 8083 * configure support HPD hot plug connector_>polled default value is 0 8084 * which means HPD hot plug not supported 8085 */ 8086 switch (connector_type) { 8087 case DRM_MODE_CONNECTOR_HDMIA: 8088 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8089 aconnector->base.ycbcr_420_allowed = 8090 link->link_enc->features.hdmi_ycbcr420_supported ? true : false; 8091 break; 8092 case DRM_MODE_CONNECTOR_DisplayPort: 8093 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8094 aconnector->base.ycbcr_420_allowed = 8095 link->link_enc->features.dp_ycbcr420_supported ? true : false; 8096 break; 8097 case DRM_MODE_CONNECTOR_DVID: 8098 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; 8099 break; 8100 default: 8101 break; 8102 } 8103 8104 drm_object_attach_property(&aconnector->base.base, 8105 dm->ddev->mode_config.scaling_mode_property, 8106 DRM_MODE_SCALE_NONE); 8107 8108 drm_object_attach_property(&aconnector->base.base, 8109 adev->mode_info.underscan_property, 8110 UNDERSCAN_OFF); 8111 drm_object_attach_property(&aconnector->base.base, 8112 adev->mode_info.underscan_hborder_property, 8113 0); 8114 drm_object_attach_property(&aconnector->base.base, 8115 adev->mode_info.underscan_vborder_property, 8116 0); 8117 8118 if (!aconnector->mst_port) 8119 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 8120 8121 /* This defaults to the max in the range, but we want 8bpc for non-edp. */ 8122 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8; 8123 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; 8124 8125 if (connector_type == DRM_MODE_CONNECTOR_eDP && 8126 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) { 8127 drm_object_attach_property(&aconnector->base.base, 8128 adev->mode_info.abm_level_property, 0); 8129 } 8130 8131 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 8132 connector_type == DRM_MODE_CONNECTOR_DisplayPort || 8133 connector_type == DRM_MODE_CONNECTOR_eDP) { 8134 drm_connector_attach_hdr_output_metadata_property(&aconnector->base); 8135 8136 if (!aconnector->mst_port) 8137 drm_connector_attach_vrr_capable_property(&aconnector->base); 8138 8139 #ifdef CONFIG_DRM_AMD_DC_HDCP 8140 if (adev->dm.hdcp_workqueue) 8141 drm_connector_attach_content_protection_property(&aconnector->base, true); 8142 #endif 8143 } 8144 } 8145 8146 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, 8147 struct i2c_msg *msgs, int num) 8148 { 8149 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); 8150 struct ddc_service *ddc_service = i2c->ddc_service; 8151 struct i2c_command cmd; 8152 int i; 8153 int result = -EIO; 8154 8155 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); 8156 8157 if (!cmd.payloads) 8158 return result; 8159 8160 cmd.number_of_payloads = num; 8161 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; 8162 cmd.speed = 100; 8163 8164 for (i = 0; i < num; i++) { 8165 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); 8166 cmd.payloads[i].address = msgs[i].addr; 8167 cmd.payloads[i].length = msgs[i].len; 8168 cmd.payloads[i].data = msgs[i].buf; 8169 } 8170 8171 if (dc_submit_i2c( 8172 ddc_service->ctx->dc, 8173 ddc_service->ddc_pin->hw_info.ddc_channel, 8174 &cmd)) 8175 result = num; 8176 8177 kfree(cmd.payloads); 8178 return result; 8179 } 8180 8181 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) 8182 { 8183 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 8184 } 8185 8186 static const struct i2c_algorithm amdgpu_dm_i2c_algo = { 8187 .master_xfer = amdgpu_dm_i2c_xfer, 8188 .functionality = amdgpu_dm_i2c_func, 8189 }; 8190 8191 static struct amdgpu_i2c_adapter * 8192 create_i2c(struct ddc_service *ddc_service, 8193 int link_index, 8194 int *res) 8195 { 8196 struct amdgpu_device *adev = ddc_service->ctx->driver_context; 8197 struct amdgpu_i2c_adapter *i2c; 8198 8199 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); 8200 if (!i2c) 8201 return NULL; 8202 i2c->base.owner = THIS_MODULE; 8203 i2c->base.class = I2C_CLASS_DDC; 8204 i2c->base.dev.parent = &adev->pdev->dev; 8205 i2c->base.algo = &amdgpu_dm_i2c_algo; 8206 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); 8207 i2c_set_adapdata(&i2c->base, i2c); 8208 i2c->ddc_service = ddc_service; 8209 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index; 8210 8211 return i2c; 8212 } 8213 8214 8215 /* 8216 * Note: this function assumes that dc_link_detect() was called for the 8217 * dc_link which will be represented by this aconnector. 8218 */ 8219 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, 8220 struct amdgpu_dm_connector *aconnector, 8221 uint32_t link_index, 8222 struct amdgpu_encoder *aencoder) 8223 { 8224 int res = 0; 8225 int connector_type; 8226 struct dc *dc = dm->dc; 8227 struct dc_link *link = dc_get_link_at_index(dc, link_index); 8228 struct amdgpu_i2c_adapter *i2c; 8229 8230 link->priv = aconnector; 8231 8232 DRM_DEBUG_DRIVER("%s()\n", __func__); 8233 8234 i2c = create_i2c(link->ddc, link->link_index, &res); 8235 if (!i2c) { 8236 DRM_ERROR("Failed to create i2c adapter data\n"); 8237 return -ENOMEM; 8238 } 8239 8240 aconnector->i2c = i2c; 8241 res = i2c_add_adapter(&i2c->base); 8242 8243 if (res) { 8244 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); 8245 goto out_free; 8246 } 8247 8248 connector_type = to_drm_connector_type(link->connector_signal); 8249 8250 res = drm_connector_init_with_ddc( 8251 dm->ddev, 8252 &aconnector->base, 8253 &amdgpu_dm_connector_funcs, 8254 connector_type, 8255 &i2c->base); 8256 8257 if (res) { 8258 DRM_ERROR("connector_init failed\n"); 8259 aconnector->connector_id = -1; 8260 goto out_free; 8261 } 8262 8263 drm_connector_helper_add( 8264 &aconnector->base, 8265 &amdgpu_dm_connector_helper_funcs); 8266 8267 amdgpu_dm_connector_init_helper( 8268 dm, 8269 aconnector, 8270 connector_type, 8271 link, 8272 link_index); 8273 8274 drm_connector_attach_encoder( 8275 &aconnector->base, &aencoder->base); 8276 8277 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort 8278 || connector_type == DRM_MODE_CONNECTOR_eDP) 8279 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); 8280 8281 out_free: 8282 if (res) { 8283 kfree(i2c); 8284 aconnector->i2c = NULL; 8285 } 8286 return res; 8287 } 8288 8289 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) 8290 { 8291 switch (adev->mode_info.num_crtc) { 8292 case 1: 8293 return 0x1; 8294 case 2: 8295 return 0x3; 8296 case 3: 8297 return 0x7; 8298 case 4: 8299 return 0xf; 8300 case 5: 8301 return 0x1f; 8302 case 6: 8303 default: 8304 return 0x3f; 8305 } 8306 } 8307 8308 static int amdgpu_dm_encoder_init(struct drm_device *dev, 8309 struct amdgpu_encoder *aencoder, 8310 uint32_t link_index) 8311 { 8312 struct amdgpu_device *adev = drm_to_adev(dev); 8313 8314 int res = drm_encoder_init(dev, 8315 &aencoder->base, 8316 &amdgpu_dm_encoder_funcs, 8317 DRM_MODE_ENCODER_TMDS, 8318 NULL); 8319 8320 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); 8321 8322 if (!res) 8323 aencoder->encoder_id = link_index; 8324 else 8325 aencoder->encoder_id = -1; 8326 8327 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); 8328 8329 return res; 8330 } 8331 8332 static void manage_dm_interrupts(struct amdgpu_device *adev, 8333 struct amdgpu_crtc *acrtc, 8334 bool enable) 8335 { 8336 /* 8337 * We have no guarantee that the frontend index maps to the same 8338 * backend index - some even map to more than one. 8339 * 8340 * TODO: Use a different interrupt or check DC itself for the mapping. 8341 */ 8342 int irq_type = 8343 amdgpu_display_crtc_idx_to_irq_type( 8344 adev, 8345 acrtc->crtc_id); 8346 8347 if (enable) { 8348 drm_crtc_vblank_on(&acrtc->base); 8349 amdgpu_irq_get( 8350 adev, 8351 &adev->pageflip_irq, 8352 irq_type); 8353 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8354 amdgpu_irq_get( 8355 adev, 8356 &adev->vline0_irq, 8357 irq_type); 8358 #endif 8359 } else { 8360 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8361 amdgpu_irq_put( 8362 adev, 8363 &adev->vline0_irq, 8364 irq_type); 8365 #endif 8366 amdgpu_irq_put( 8367 adev, 8368 &adev->pageflip_irq, 8369 irq_type); 8370 drm_crtc_vblank_off(&acrtc->base); 8371 } 8372 } 8373 8374 static void dm_update_pflip_irq_state(struct amdgpu_device *adev, 8375 struct amdgpu_crtc *acrtc) 8376 { 8377 int irq_type = 8378 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); 8379 8380 /** 8381 * This reads the current state for the IRQ and force reapplies 8382 * the setting to hardware. 8383 */ 8384 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); 8385 } 8386 8387 static bool 8388 is_scaling_state_different(const struct dm_connector_state *dm_state, 8389 const struct dm_connector_state *old_dm_state) 8390 { 8391 if (dm_state->scaling != old_dm_state->scaling) 8392 return true; 8393 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { 8394 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) 8395 return true; 8396 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { 8397 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) 8398 return true; 8399 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || 8400 dm_state->underscan_vborder != old_dm_state->underscan_vborder) 8401 return true; 8402 return false; 8403 } 8404 8405 #ifdef CONFIG_DRM_AMD_DC_HDCP 8406 static bool is_content_protection_different(struct drm_connector_state *state, 8407 const struct drm_connector_state *old_state, 8408 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) 8409 { 8410 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 8411 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 8412 8413 /* Handle: Type0/1 change */ 8414 if (old_state->hdcp_content_type != state->hdcp_content_type && 8415 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 8416 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8417 return true; 8418 } 8419 8420 /* CP is being re enabled, ignore this 8421 * 8422 * Handles: ENABLED -> DESIRED 8423 */ 8424 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && 8425 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8426 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; 8427 return false; 8428 } 8429 8430 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED 8431 * 8432 * Handles: UNDESIRED -> ENABLED 8433 */ 8434 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && 8435 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) 8436 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 8437 8438 /* Stream removed and re-enabled 8439 * 8440 * Can sometimes overlap with the HPD case, 8441 * thus set update_hdcp to false to avoid 8442 * setting HDCP multiple times. 8443 * 8444 * Handles: DESIRED -> DESIRED (Special case) 8445 */ 8446 if (!(old_state->crtc && old_state->crtc->enabled) && 8447 state->crtc && state->crtc->enabled && 8448 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { 8449 dm_con_state->update_hdcp = false; 8450 return true; 8451 } 8452 8453 /* Hot-plug, headless s3, dpms 8454 * 8455 * Only start HDCP if the display is connected/enabled. 8456 * update_hdcp flag will be set to false until the next 8457 * HPD comes in. 8458 * 8459 * Handles: DESIRED -> DESIRED (Special case) 8460 */ 8461 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && 8462 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { 8463 dm_con_state->update_hdcp = false; 8464 return true; 8465 } 8466 8467 /* 8468 * Handles: UNDESIRED -> UNDESIRED 8469 * DESIRED -> DESIRED 8470 * ENABLED -> ENABLED 8471 */ 8472 if (old_state->content_protection == state->content_protection) 8473 return false; 8474 8475 /* 8476 * Handles: UNDESIRED -> DESIRED 8477 * DESIRED -> UNDESIRED 8478 * ENABLED -> UNDESIRED 8479 */ 8480 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) 8481 return true; 8482 8483 /* 8484 * Handles: DESIRED -> ENABLED 8485 */ 8486 return false; 8487 } 8488 8489 #endif 8490 static void remove_stream(struct amdgpu_device *adev, 8491 struct amdgpu_crtc *acrtc, 8492 struct dc_stream_state *stream) 8493 { 8494 /* this is the update mode case */ 8495 8496 acrtc->otg_inst = -1; 8497 acrtc->enabled = false; 8498 } 8499 8500 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, 8501 struct dc_cursor_position *position) 8502 { 8503 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 8504 int x, y; 8505 int xorigin = 0, yorigin = 0; 8506 8507 if (!crtc || !plane->state->fb) 8508 return 0; 8509 8510 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || 8511 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { 8512 DRM_ERROR("%s: bad cursor width or height %d x %d\n", 8513 __func__, 8514 plane->state->crtc_w, 8515 plane->state->crtc_h); 8516 return -EINVAL; 8517 } 8518 8519 x = plane->state->crtc_x; 8520 y = plane->state->crtc_y; 8521 8522 if (x <= -amdgpu_crtc->max_cursor_width || 8523 y <= -amdgpu_crtc->max_cursor_height) 8524 return 0; 8525 8526 if (x < 0) { 8527 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 8528 x = 0; 8529 } 8530 if (y < 0) { 8531 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 8532 y = 0; 8533 } 8534 position->enable = true; 8535 position->translate_by_source = true; 8536 position->x = x; 8537 position->y = y; 8538 position->x_hotspot = xorigin; 8539 position->y_hotspot = yorigin; 8540 8541 return 0; 8542 } 8543 8544 static void handle_cursor_update(struct drm_plane *plane, 8545 struct drm_plane_state *old_plane_state) 8546 { 8547 struct amdgpu_device *adev = drm_to_adev(plane->dev); 8548 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); 8549 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; 8550 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; 8551 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 8552 uint64_t address = afb ? afb->address : 0; 8553 struct dc_cursor_position position = {0}; 8554 struct dc_cursor_attributes attributes; 8555 int ret; 8556 8557 if (!plane->state->fb && !old_plane_state->fb) 8558 return; 8559 8560 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n", 8561 __func__, 8562 amdgpu_crtc->crtc_id, 8563 plane->state->crtc_w, 8564 plane->state->crtc_h); 8565 8566 ret = get_cursor_position(plane, crtc, &position); 8567 if (ret) 8568 return; 8569 8570 if (!position.enable) { 8571 /* turn off cursor */ 8572 if (crtc_state && crtc_state->stream) { 8573 mutex_lock(&adev->dm.dc_lock); 8574 dc_stream_set_cursor_position(crtc_state->stream, 8575 &position); 8576 mutex_unlock(&adev->dm.dc_lock); 8577 } 8578 return; 8579 } 8580 8581 amdgpu_crtc->cursor_width = plane->state->crtc_w; 8582 amdgpu_crtc->cursor_height = plane->state->crtc_h; 8583 8584 memset(&attributes, 0, sizeof(attributes)); 8585 attributes.address.high_part = upper_32_bits(address); 8586 attributes.address.low_part = lower_32_bits(address); 8587 attributes.width = plane->state->crtc_w; 8588 attributes.height = plane->state->crtc_h; 8589 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; 8590 attributes.rotation_angle = 0; 8591 attributes.attribute_flags.value = 0; 8592 8593 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; 8594 8595 if (crtc_state->stream) { 8596 mutex_lock(&adev->dm.dc_lock); 8597 if (!dc_stream_set_cursor_attributes(crtc_state->stream, 8598 &attributes)) 8599 DRM_ERROR("DC failed to set cursor attributes\n"); 8600 8601 if (!dc_stream_set_cursor_position(crtc_state->stream, 8602 &position)) 8603 DRM_ERROR("DC failed to set cursor position\n"); 8604 mutex_unlock(&adev->dm.dc_lock); 8605 } 8606 } 8607 8608 static void prepare_flip_isr(struct amdgpu_crtc *acrtc) 8609 { 8610 8611 assert_spin_locked(&acrtc->base.dev->event_lock); 8612 WARN_ON(acrtc->event); 8613 8614 acrtc->event = acrtc->base.state->event; 8615 8616 /* Set the flip status */ 8617 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 8618 8619 /* Mark this event as consumed */ 8620 acrtc->base.state->event = NULL; 8621 8622 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", 8623 acrtc->crtc_id); 8624 } 8625 8626 static void update_freesync_state_on_stream( 8627 struct amdgpu_display_manager *dm, 8628 struct dm_crtc_state *new_crtc_state, 8629 struct dc_stream_state *new_stream, 8630 struct dc_plane_state *surface, 8631 u32 flip_timestamp_in_us) 8632 { 8633 struct mod_vrr_params vrr_params; 8634 struct dc_info_packet vrr_infopacket = {0}; 8635 struct amdgpu_device *adev = dm->adev; 8636 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 8637 unsigned long flags; 8638 bool pack_sdp_v1_3 = false; 8639 8640 if (!new_stream) 8641 return; 8642 8643 /* 8644 * TODO: Determine why min/max totals and vrefresh can be 0 here. 8645 * For now it's sufficient to just guard against these conditions. 8646 */ 8647 8648 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 8649 return; 8650 8651 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8652 vrr_params = acrtc->dm_irq_params.vrr_params; 8653 8654 if (surface) { 8655 mod_freesync_handle_preflip( 8656 dm->freesync_module, 8657 surface, 8658 new_stream, 8659 flip_timestamp_in_us, 8660 &vrr_params); 8661 8662 if (adev->family < AMDGPU_FAMILY_AI && 8663 amdgpu_dm_vrr_active(new_crtc_state)) { 8664 mod_freesync_handle_v_update(dm->freesync_module, 8665 new_stream, &vrr_params); 8666 8667 /* Need to call this before the frame ends. */ 8668 dc_stream_adjust_vmin_vmax(dm->dc, 8669 new_crtc_state->stream, 8670 &vrr_params.adjust); 8671 } 8672 } 8673 8674 mod_freesync_build_vrr_infopacket( 8675 dm->freesync_module, 8676 new_stream, 8677 &vrr_params, 8678 PACKET_TYPE_VRR, 8679 TRANSFER_FUNC_UNKNOWN, 8680 &vrr_infopacket, 8681 pack_sdp_v1_3); 8682 8683 new_crtc_state->freesync_timing_changed |= 8684 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, 8685 &vrr_params.adjust, 8686 sizeof(vrr_params.adjust)) != 0); 8687 8688 new_crtc_state->freesync_vrr_info_changed |= 8689 (memcmp(&new_crtc_state->vrr_infopacket, 8690 &vrr_infopacket, 8691 sizeof(vrr_infopacket)) != 0); 8692 8693 acrtc->dm_irq_params.vrr_params = vrr_params; 8694 new_crtc_state->vrr_infopacket = vrr_infopacket; 8695 8696 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust; 8697 new_stream->vrr_infopacket = vrr_infopacket; 8698 8699 if (new_crtc_state->freesync_vrr_info_changed) 8700 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", 8701 new_crtc_state->base.crtc->base.id, 8702 (int)new_crtc_state->base.vrr_enabled, 8703 (int)vrr_params.state); 8704 8705 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8706 } 8707 8708 static void update_stream_irq_parameters( 8709 struct amdgpu_display_manager *dm, 8710 struct dm_crtc_state *new_crtc_state) 8711 { 8712 struct dc_stream_state *new_stream = new_crtc_state->stream; 8713 struct mod_vrr_params vrr_params; 8714 struct mod_freesync_config config = new_crtc_state->freesync_config; 8715 struct amdgpu_device *adev = dm->adev; 8716 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); 8717 unsigned long flags; 8718 8719 if (!new_stream) 8720 return; 8721 8722 /* 8723 * TODO: Determine why min/max totals and vrefresh can be 0 here. 8724 * For now it's sufficient to just guard against these conditions. 8725 */ 8726 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 8727 return; 8728 8729 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 8730 vrr_params = acrtc->dm_irq_params.vrr_params; 8731 8732 if (new_crtc_state->vrr_supported && 8733 config.min_refresh_in_uhz && 8734 config.max_refresh_in_uhz) { 8735 /* 8736 * if freesync compatible mode was set, config.state will be set 8737 * in atomic check 8738 */ 8739 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && 8740 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || 8741 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { 8742 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; 8743 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; 8744 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; 8745 vrr_params.state = VRR_STATE_ACTIVE_FIXED; 8746 } else { 8747 config.state = new_crtc_state->base.vrr_enabled ? 8748 VRR_STATE_ACTIVE_VARIABLE : 8749 VRR_STATE_INACTIVE; 8750 } 8751 } else { 8752 config.state = VRR_STATE_UNSUPPORTED; 8753 } 8754 8755 mod_freesync_build_vrr_params(dm->freesync_module, 8756 new_stream, 8757 &config, &vrr_params); 8758 8759 new_crtc_state->freesync_timing_changed |= 8760 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, 8761 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0); 8762 8763 new_crtc_state->freesync_config = config; 8764 /* Copy state for access from DM IRQ handler */ 8765 acrtc->dm_irq_params.freesync_config = config; 8766 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; 8767 acrtc->dm_irq_params.vrr_params = vrr_params; 8768 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 8769 } 8770 8771 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, 8772 struct dm_crtc_state *new_state) 8773 { 8774 bool old_vrr_active = amdgpu_dm_vrr_active(old_state); 8775 bool new_vrr_active = amdgpu_dm_vrr_active(new_state); 8776 8777 if (!old_vrr_active && new_vrr_active) { 8778 /* Transition VRR inactive -> active: 8779 * While VRR is active, we must not disable vblank irq, as a 8780 * reenable after disable would compute bogus vblank/pflip 8781 * timestamps if it likely happened inside display front-porch. 8782 * 8783 * We also need vupdate irq for the actual core vblank handling 8784 * at end of vblank. 8785 */ 8786 dm_set_vupdate_irq(new_state->base.crtc, true); 8787 drm_crtc_vblank_get(new_state->base.crtc); 8788 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", 8789 __func__, new_state->base.crtc->base.id); 8790 } else if (old_vrr_active && !new_vrr_active) { 8791 /* Transition VRR active -> inactive: 8792 * Allow vblank irq disable again for fixed refresh rate. 8793 */ 8794 dm_set_vupdate_irq(new_state->base.crtc, false); 8795 drm_crtc_vblank_put(new_state->base.crtc); 8796 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", 8797 __func__, new_state->base.crtc->base.id); 8798 } 8799 } 8800 8801 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 8802 { 8803 struct drm_plane *plane; 8804 struct drm_plane_state *old_plane_state; 8805 int i; 8806 8807 /* 8808 * TODO: Make this per-stream so we don't issue redundant updates for 8809 * commits with multiple streams. 8810 */ 8811 for_each_old_plane_in_state(state, plane, old_plane_state, i) 8812 if (plane->type == DRM_PLANE_TYPE_CURSOR) 8813 handle_cursor_update(plane, old_plane_state); 8814 } 8815 8816 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 8817 struct dc_state *dc_state, 8818 struct drm_device *dev, 8819 struct amdgpu_display_manager *dm, 8820 struct drm_crtc *pcrtc, 8821 bool wait_for_vblank) 8822 { 8823 uint32_t i; 8824 uint64_t timestamp_ns; 8825 struct drm_plane *plane; 8826 struct drm_plane_state *old_plane_state, *new_plane_state; 8827 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); 8828 struct drm_crtc_state *new_pcrtc_state = 8829 drm_atomic_get_new_crtc_state(state, pcrtc); 8830 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 8831 struct dm_crtc_state *dm_old_crtc_state = 8832 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 8833 int planes_count = 0, vpos, hpos; 8834 long r; 8835 unsigned long flags; 8836 struct amdgpu_bo *abo; 8837 uint32_t target_vblank, last_flip_vblank; 8838 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); 8839 bool pflip_present = false; 8840 struct { 8841 struct dc_surface_update surface_updates[MAX_SURFACES]; 8842 struct dc_plane_info plane_infos[MAX_SURFACES]; 8843 struct dc_scaling_info scaling_infos[MAX_SURFACES]; 8844 struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 8845 struct dc_stream_update stream_update; 8846 } *bundle; 8847 8848 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 8849 8850 if (!bundle) { 8851 dm_error("Failed to allocate update bundle\n"); 8852 goto cleanup; 8853 } 8854 8855 /* 8856 * Disable the cursor first if we're disabling all the planes. 8857 * It'll remain on the screen after the planes are re-enabled 8858 * if we don't. 8859 */ 8860 if (acrtc_state->active_planes == 0) 8861 amdgpu_dm_commit_cursors(state); 8862 8863 /* update planes when needed */ 8864 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 8865 struct drm_crtc *crtc = new_plane_state->crtc; 8866 struct drm_crtc_state *new_crtc_state; 8867 struct drm_framebuffer *fb = new_plane_state->fb; 8868 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; 8869 bool plane_needs_flip; 8870 struct dc_plane_state *dc_plane; 8871 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); 8872 8873 /* Cursor plane is handled after stream updates */ 8874 if (plane->type == DRM_PLANE_TYPE_CURSOR) 8875 continue; 8876 8877 if (!fb || !crtc || pcrtc != crtc) 8878 continue; 8879 8880 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 8881 if (!new_crtc_state->active) 8882 continue; 8883 8884 dc_plane = dm_new_plane_state->dc_state; 8885 8886 bundle->surface_updates[planes_count].surface = dc_plane; 8887 if (new_pcrtc_state->color_mgmt_changed) { 8888 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; 8889 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; 8890 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; 8891 } 8892 8893 fill_dc_scaling_info(new_plane_state, 8894 &bundle->scaling_infos[planes_count]); 8895 8896 bundle->surface_updates[planes_count].scaling_info = 8897 &bundle->scaling_infos[planes_count]; 8898 8899 plane_needs_flip = old_plane_state->fb && new_plane_state->fb; 8900 8901 pflip_present = pflip_present || plane_needs_flip; 8902 8903 if (!plane_needs_flip) { 8904 planes_count += 1; 8905 continue; 8906 } 8907 8908 abo = gem_to_amdgpu_bo(fb->obj[0]); 8909 8910 /* 8911 * Wait for all fences on this FB. Do limited wait to avoid 8912 * deadlock during GPU reset when this fence will not signal 8913 * but we hold reservation lock for the BO. 8914 */ 8915 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false, 8916 msecs_to_jiffies(5000)); 8917 if (unlikely(r <= 0)) 8918 DRM_ERROR("Waiting for fences timed out!"); 8919 8920 fill_dc_plane_info_and_addr( 8921 dm->adev, new_plane_state, 8922 afb->tiling_flags, 8923 &bundle->plane_infos[planes_count], 8924 &bundle->flip_addrs[planes_count].address, 8925 afb->tmz_surface, false); 8926 8927 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n", 8928 new_plane_state->plane->index, 8929 bundle->plane_infos[planes_count].dcc.enable); 8930 8931 bundle->surface_updates[planes_count].plane_info = 8932 &bundle->plane_infos[planes_count]; 8933 8934 /* 8935 * Only allow immediate flips for fast updates that don't 8936 * change FB pitch, DCC state, rotation or mirroing. 8937 */ 8938 bundle->flip_addrs[planes_count].flip_immediate = 8939 crtc->state->async_flip && 8940 acrtc_state->update_type == UPDATE_TYPE_FAST; 8941 8942 timestamp_ns = ktime_get_ns(); 8943 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 8944 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; 8945 bundle->surface_updates[planes_count].surface = dc_plane; 8946 8947 if (!bundle->surface_updates[planes_count].surface) { 8948 DRM_ERROR("No surface for CRTC: id=%d\n", 8949 acrtc_attach->crtc_id); 8950 continue; 8951 } 8952 8953 if (plane == pcrtc->primary) 8954 update_freesync_state_on_stream( 8955 dm, 8956 acrtc_state, 8957 acrtc_state->stream, 8958 dc_plane, 8959 bundle->flip_addrs[planes_count].flip_timestamp_in_us); 8960 8961 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n", 8962 __func__, 8963 bundle->flip_addrs[planes_count].address.grph.addr.high_part, 8964 bundle->flip_addrs[planes_count].address.grph.addr.low_part); 8965 8966 planes_count += 1; 8967 8968 } 8969 8970 if (pflip_present) { 8971 if (!vrr_active) { 8972 /* Use old throttling in non-vrr fixed refresh rate mode 8973 * to keep flip scheduling based on target vblank counts 8974 * working in a backwards compatible way, e.g., for 8975 * clients using the GLX_OML_sync_control extension or 8976 * DRI3/Present extension with defined target_msc. 8977 */ 8978 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); 8979 } 8980 else { 8981 /* For variable refresh rate mode only: 8982 * Get vblank of last completed flip to avoid > 1 vrr 8983 * flips per video frame by use of throttling, but allow 8984 * flip programming anywhere in the possibly large 8985 * variable vrr vblank interval for fine-grained flip 8986 * timing control and more opportunity to avoid stutter 8987 * on late submission of flips. 8988 */ 8989 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 8990 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; 8991 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 8992 } 8993 8994 target_vblank = last_flip_vblank + wait_for_vblank; 8995 8996 /* 8997 * Wait until we're out of the vertical blank period before the one 8998 * targeted by the flip 8999 */ 9000 while ((acrtc_attach->enabled && 9001 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, 9002 0, &vpos, &hpos, NULL, 9003 NULL, &pcrtc->hwmode) 9004 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == 9005 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && 9006 (int)(target_vblank - 9007 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { 9008 usleep_range(1000, 1100); 9009 } 9010 9011 /** 9012 * Prepare the flip event for the pageflip interrupt to handle. 9013 * 9014 * This only works in the case where we've already turned on the 9015 * appropriate hardware blocks (eg. HUBP) so in the transition case 9016 * from 0 -> n planes we have to skip a hardware generated event 9017 * and rely on sending it from software. 9018 */ 9019 if (acrtc_attach->base.state->event && 9020 acrtc_state->active_planes > 0 && 9021 !acrtc_state->force_dpms_off) { 9022 drm_crtc_vblank_get(pcrtc); 9023 9024 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9025 9026 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); 9027 prepare_flip_isr(acrtc_attach); 9028 9029 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9030 } 9031 9032 if (acrtc_state->stream) { 9033 if (acrtc_state->freesync_vrr_info_changed) 9034 bundle->stream_update.vrr_infopacket = 9035 &acrtc_state->stream->vrr_infopacket; 9036 } 9037 } 9038 9039 /* Update the planes if changed or disable if we don't have any. */ 9040 if ((planes_count || acrtc_state->active_planes == 0) && 9041 acrtc_state->stream) { 9042 #if defined(CONFIG_DRM_AMD_DC_DCN) 9043 /* 9044 * If PSR or idle optimizations are enabled then flush out 9045 * any pending work before hardware programming. 9046 */ 9047 if (dm->vblank_control_workqueue) 9048 flush_workqueue(dm->vblank_control_workqueue); 9049 #endif 9050 9051 bundle->stream_update.stream = acrtc_state->stream; 9052 if (new_pcrtc_state->mode_changed) { 9053 bundle->stream_update.src = acrtc_state->stream->src; 9054 bundle->stream_update.dst = acrtc_state->stream->dst; 9055 } 9056 9057 if (new_pcrtc_state->color_mgmt_changed) { 9058 /* 9059 * TODO: This isn't fully correct since we've actually 9060 * already modified the stream in place. 9061 */ 9062 bundle->stream_update.gamut_remap = 9063 &acrtc_state->stream->gamut_remap_matrix; 9064 bundle->stream_update.output_csc_transform = 9065 &acrtc_state->stream->csc_color_matrix; 9066 bundle->stream_update.out_transfer_func = 9067 acrtc_state->stream->out_transfer_func; 9068 } 9069 9070 acrtc_state->stream->abm_level = acrtc_state->abm_level; 9071 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) 9072 bundle->stream_update.abm_level = &acrtc_state->abm_level; 9073 9074 /* 9075 * If FreeSync state on the stream has changed then we need to 9076 * re-adjust the min/max bounds now that DC doesn't handle this 9077 * as part of commit. 9078 */ 9079 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { 9080 spin_lock_irqsave(&pcrtc->dev->event_lock, flags); 9081 dc_stream_adjust_vmin_vmax( 9082 dm->dc, acrtc_state->stream, 9083 &acrtc_attach->dm_irq_params.vrr_params.adjust); 9084 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); 9085 } 9086 mutex_lock(&dm->dc_lock); 9087 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 9088 acrtc_state->stream->link->psr_settings.psr_allow_active) 9089 amdgpu_dm_psr_disable(acrtc_state->stream); 9090 9091 dc_commit_updates_for_stream(dm->dc, 9092 bundle->surface_updates, 9093 planes_count, 9094 acrtc_state->stream, 9095 &bundle->stream_update, 9096 dc_state); 9097 9098 /** 9099 * Enable or disable the interrupts on the backend. 9100 * 9101 * Most pipes are put into power gating when unused. 9102 * 9103 * When power gating is enabled on a pipe we lose the 9104 * interrupt enablement state when power gating is disabled. 9105 * 9106 * So we need to update the IRQ control state in hardware 9107 * whenever the pipe turns on (since it could be previously 9108 * power gated) or off (since some pipes can't be power gated 9109 * on some ASICs). 9110 */ 9111 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) 9112 dm_update_pflip_irq_state(drm_to_adev(dev), 9113 acrtc_attach); 9114 9115 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && 9116 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && 9117 !acrtc_state->stream->link->psr_settings.psr_feature_enabled) 9118 amdgpu_dm_link_setup_psr(acrtc_state->stream); 9119 9120 /* Decrement skip count when PSR is enabled and we're doing fast updates. */ 9121 if (acrtc_state->update_type == UPDATE_TYPE_FAST && 9122 acrtc_state->stream->link->psr_settings.psr_feature_enabled) { 9123 struct amdgpu_dm_connector *aconn = 9124 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; 9125 9126 if (aconn->psr_skip_count > 0) 9127 aconn->psr_skip_count--; 9128 9129 /* Allow PSR when skip count is 0. */ 9130 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; 9131 } else { 9132 acrtc_attach->dm_irq_params.allow_psr_entry = false; 9133 } 9134 9135 mutex_unlock(&dm->dc_lock); 9136 } 9137 9138 /* 9139 * Update cursor state *after* programming all the planes. 9140 * This avoids redundant programming in the case where we're going 9141 * to be disabling a single plane - those pipes are being disabled. 9142 */ 9143 if (acrtc_state->active_planes) 9144 amdgpu_dm_commit_cursors(state); 9145 9146 cleanup: 9147 kfree(bundle); 9148 } 9149 9150 static void amdgpu_dm_commit_audio(struct drm_device *dev, 9151 struct drm_atomic_state *state) 9152 { 9153 struct amdgpu_device *adev = drm_to_adev(dev); 9154 struct amdgpu_dm_connector *aconnector; 9155 struct drm_connector *connector; 9156 struct drm_connector_state *old_con_state, *new_con_state; 9157 struct drm_crtc_state *new_crtc_state; 9158 struct dm_crtc_state *new_dm_crtc_state; 9159 const struct dc_stream_status *status; 9160 int i, inst; 9161 9162 /* Notify device removals. */ 9163 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9164 if (old_con_state->crtc != new_con_state->crtc) { 9165 /* CRTC changes require notification. */ 9166 goto notify; 9167 } 9168 9169 if (!new_con_state->crtc) 9170 continue; 9171 9172 new_crtc_state = drm_atomic_get_new_crtc_state( 9173 state, new_con_state->crtc); 9174 9175 if (!new_crtc_state) 9176 continue; 9177 9178 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9179 continue; 9180 9181 notify: 9182 aconnector = to_amdgpu_dm_connector(connector); 9183 9184 mutex_lock(&adev->dm.audio_lock); 9185 inst = aconnector->audio_inst; 9186 aconnector->audio_inst = -1; 9187 mutex_unlock(&adev->dm.audio_lock); 9188 9189 amdgpu_dm_audio_eld_notify(adev, inst); 9190 } 9191 9192 /* Notify audio device additions. */ 9193 for_each_new_connector_in_state(state, connector, new_con_state, i) { 9194 if (!new_con_state->crtc) 9195 continue; 9196 9197 new_crtc_state = drm_atomic_get_new_crtc_state( 9198 state, new_con_state->crtc); 9199 9200 if (!new_crtc_state) 9201 continue; 9202 9203 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9204 continue; 9205 9206 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 9207 if (!new_dm_crtc_state->stream) 9208 continue; 9209 9210 status = dc_stream_get_status(new_dm_crtc_state->stream); 9211 if (!status) 9212 continue; 9213 9214 aconnector = to_amdgpu_dm_connector(connector); 9215 9216 mutex_lock(&adev->dm.audio_lock); 9217 inst = status->audio_inst; 9218 aconnector->audio_inst = inst; 9219 mutex_unlock(&adev->dm.audio_lock); 9220 9221 amdgpu_dm_audio_eld_notify(adev, inst); 9222 } 9223 } 9224 9225 /* 9226 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC 9227 * @crtc_state: the DRM CRTC state 9228 * @stream_state: the DC stream state. 9229 * 9230 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring 9231 * a dc_stream_state's flags in sync with a drm_crtc_state's flags. 9232 */ 9233 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, 9234 struct dc_stream_state *stream_state) 9235 { 9236 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 9237 } 9238 9239 /** 9240 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. 9241 * @state: The atomic state to commit 9242 * 9243 * This will tell DC to commit the constructed DC state from atomic_check, 9244 * programming the hardware. Any failures here implies a hardware failure, since 9245 * atomic check should have filtered anything non-kosher. 9246 */ 9247 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) 9248 { 9249 struct drm_device *dev = state->dev; 9250 struct amdgpu_device *adev = drm_to_adev(dev); 9251 struct amdgpu_display_manager *dm = &adev->dm; 9252 struct dm_atomic_state *dm_state; 9253 struct dc_state *dc_state = NULL, *dc_state_temp = NULL; 9254 uint32_t i, j; 9255 struct drm_crtc *crtc; 9256 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 9257 unsigned long flags; 9258 bool wait_for_vblank = true; 9259 struct drm_connector *connector; 9260 struct drm_connector_state *old_con_state, *new_con_state; 9261 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9262 int crtc_disable_count = 0; 9263 bool mode_set_reset_required = false; 9264 9265 trace_amdgpu_dm_atomic_commit_tail_begin(state); 9266 9267 drm_atomic_helper_update_legacy_modeset_state(dev, state); 9268 9269 dm_state = dm_atomic_get_new_state(state); 9270 if (dm_state && dm_state->context) { 9271 dc_state = dm_state->context; 9272 } else { 9273 /* No state changes, retain current state. */ 9274 dc_state_temp = dc_create_state(dm->dc); 9275 ASSERT(dc_state_temp); 9276 dc_state = dc_state_temp; 9277 dc_resource_state_copy_construct_current(dm->dc, dc_state); 9278 } 9279 9280 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state, 9281 new_crtc_state, i) { 9282 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9283 9284 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9285 9286 if (old_crtc_state->active && 9287 (!new_crtc_state->active || 9288 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 9289 manage_dm_interrupts(adev, acrtc, false); 9290 dc_stream_release(dm_old_crtc_state->stream); 9291 } 9292 } 9293 9294 drm_atomic_helper_calc_timestamping_constants(state); 9295 9296 /* update changed items */ 9297 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9298 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9299 9300 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9301 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9302 9303 DRM_DEBUG_ATOMIC( 9304 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " 9305 "planes_changed:%d, mode_changed:%d,active_changed:%d," 9306 "connectors_changed:%d\n", 9307 acrtc->crtc_id, 9308 new_crtc_state->enable, 9309 new_crtc_state->active, 9310 new_crtc_state->planes_changed, 9311 new_crtc_state->mode_changed, 9312 new_crtc_state->active_changed, 9313 new_crtc_state->connectors_changed); 9314 9315 /* Disable cursor if disabling crtc */ 9316 if (old_crtc_state->active && !new_crtc_state->active) { 9317 struct dc_cursor_position position; 9318 9319 memset(&position, 0, sizeof(position)); 9320 mutex_lock(&dm->dc_lock); 9321 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position); 9322 mutex_unlock(&dm->dc_lock); 9323 } 9324 9325 /* Copy all transient state flags into dc state */ 9326 if (dm_new_crtc_state->stream) { 9327 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, 9328 dm_new_crtc_state->stream); 9329 } 9330 9331 /* handles headless hotplug case, updating new_state and 9332 * aconnector as needed 9333 */ 9334 9335 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { 9336 9337 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); 9338 9339 if (!dm_new_crtc_state->stream) { 9340 /* 9341 * this could happen because of issues with 9342 * userspace notifications delivery. 9343 * In this case userspace tries to set mode on 9344 * display which is disconnected in fact. 9345 * dc_sink is NULL in this case on aconnector. 9346 * We expect reset mode will come soon. 9347 * 9348 * This can also happen when unplug is done 9349 * during resume sequence ended 9350 * 9351 * In this case, we want to pretend we still 9352 * have a sink to keep the pipe running so that 9353 * hw state is consistent with the sw state 9354 */ 9355 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 9356 __func__, acrtc->base.base.id); 9357 continue; 9358 } 9359 9360 if (dm_old_crtc_state->stream) 9361 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9362 9363 pm_runtime_get_noresume(dev->dev); 9364 9365 acrtc->enabled = true; 9366 acrtc->hw_mode = new_crtc_state->mode; 9367 crtc->hwmode = new_crtc_state->mode; 9368 mode_set_reset_required = true; 9369 } else if (modereset_required(new_crtc_state)) { 9370 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); 9371 /* i.e. reset mode */ 9372 if (dm_old_crtc_state->stream) 9373 remove_stream(adev, acrtc, dm_old_crtc_state->stream); 9374 9375 mode_set_reset_required = true; 9376 } 9377 } /* for_each_crtc_in_state() */ 9378 9379 if (dc_state) { 9380 /* if there mode set or reset, disable eDP PSR */ 9381 if (mode_set_reset_required) { 9382 #if defined(CONFIG_DRM_AMD_DC_DCN) 9383 if (dm->vblank_control_workqueue) 9384 flush_workqueue(dm->vblank_control_workqueue); 9385 #endif 9386 amdgpu_dm_psr_disable_all(dm); 9387 } 9388 9389 dm_enable_per_frame_crtc_master_sync(dc_state); 9390 mutex_lock(&dm->dc_lock); 9391 WARN_ON(!dc_commit_state(dm->dc, dc_state)); 9392 #if defined(CONFIG_DRM_AMD_DC_DCN) 9393 /* Allow idle optimization when vblank count is 0 for display off */ 9394 if (dm->active_vblank_irq_count == 0) 9395 dc_allow_idle_optimizations(dm->dc,true); 9396 #endif 9397 mutex_unlock(&dm->dc_lock); 9398 } 9399 9400 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 9401 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9402 9403 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9404 9405 if (dm_new_crtc_state->stream != NULL) { 9406 const struct dc_stream_status *status = 9407 dc_stream_get_status(dm_new_crtc_state->stream); 9408 9409 if (!status) 9410 status = dc_stream_get_status_from_state(dc_state, 9411 dm_new_crtc_state->stream); 9412 if (!status) 9413 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); 9414 else 9415 acrtc->otg_inst = status->primary_otg_inst; 9416 } 9417 } 9418 #ifdef CONFIG_DRM_AMD_DC_HDCP 9419 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9420 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9421 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9422 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9423 9424 new_crtc_state = NULL; 9425 9426 if (acrtc) 9427 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9428 9429 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9430 9431 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && 9432 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 9433 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); 9434 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; 9435 dm_new_con_state->update_hdcp = true; 9436 continue; 9437 } 9438 9439 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) 9440 hdcp_update_display( 9441 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, 9442 new_con_state->hdcp_content_type, 9443 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); 9444 } 9445 #endif 9446 9447 /* Handle connector state changes */ 9448 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 9449 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 9450 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 9451 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 9452 struct dc_surface_update dummy_updates[MAX_SURFACES]; 9453 struct dc_stream_update stream_update; 9454 struct dc_info_packet hdr_packet; 9455 struct dc_stream_status *status = NULL; 9456 bool abm_changed, hdr_changed, scaling_changed; 9457 9458 memset(&dummy_updates, 0, sizeof(dummy_updates)); 9459 memset(&stream_update, 0, sizeof(stream_update)); 9460 9461 if (acrtc) { 9462 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); 9463 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); 9464 } 9465 9466 /* Skip any modesets/resets */ 9467 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) 9468 continue; 9469 9470 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9471 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9472 9473 scaling_changed = is_scaling_state_different(dm_new_con_state, 9474 dm_old_con_state); 9475 9476 abm_changed = dm_new_crtc_state->abm_level != 9477 dm_old_crtc_state->abm_level; 9478 9479 hdr_changed = 9480 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); 9481 9482 if (!scaling_changed && !abm_changed && !hdr_changed) 9483 continue; 9484 9485 stream_update.stream = dm_new_crtc_state->stream; 9486 if (scaling_changed) { 9487 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 9488 dm_new_con_state, dm_new_crtc_state->stream); 9489 9490 stream_update.src = dm_new_crtc_state->stream->src; 9491 stream_update.dst = dm_new_crtc_state->stream->dst; 9492 } 9493 9494 if (abm_changed) { 9495 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; 9496 9497 stream_update.abm_level = &dm_new_crtc_state->abm_level; 9498 } 9499 9500 if (hdr_changed) { 9501 fill_hdr_info_packet(new_con_state, &hdr_packet); 9502 stream_update.hdr_static_metadata = &hdr_packet; 9503 } 9504 9505 status = dc_stream_get_status(dm_new_crtc_state->stream); 9506 9507 if (WARN_ON(!status)) 9508 continue; 9509 9510 WARN_ON(!status->plane_count); 9511 9512 /* 9513 * TODO: DC refuses to perform stream updates without a dc_surface_update. 9514 * Here we create an empty update on each plane. 9515 * To fix this, DC should permit updating only stream properties. 9516 */ 9517 for (j = 0; j < status->plane_count; j++) 9518 dummy_updates[j].surface = status->plane_states[0]; 9519 9520 9521 mutex_lock(&dm->dc_lock); 9522 dc_commit_updates_for_stream(dm->dc, 9523 dummy_updates, 9524 status->plane_count, 9525 dm_new_crtc_state->stream, 9526 &stream_update, 9527 dc_state); 9528 mutex_unlock(&dm->dc_lock); 9529 } 9530 9531 /* Count number of newly disabled CRTCs for dropping PM refs later. */ 9532 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 9533 new_crtc_state, i) { 9534 if (old_crtc_state->active && !new_crtc_state->active) 9535 crtc_disable_count++; 9536 9537 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9538 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9539 9540 /* For freesync config update on crtc state and params for irq */ 9541 update_stream_irq_parameters(dm, dm_new_crtc_state); 9542 9543 /* Handle vrr on->off / off->on transitions */ 9544 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, 9545 dm_new_crtc_state); 9546 } 9547 9548 /** 9549 * Enable interrupts for CRTCs that are newly enabled or went through 9550 * a modeset. It was intentionally deferred until after the front end 9551 * state was modified to wait until the OTG was on and so the IRQ 9552 * handlers didn't access stale or invalid state. 9553 */ 9554 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 9555 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 9556 #ifdef CONFIG_DEBUG_FS 9557 bool configure_crc = false; 9558 enum amdgpu_dm_pipe_crc_source cur_crc_src; 9559 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 9560 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk; 9561 #endif 9562 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9563 cur_crc_src = acrtc->dm_irq_params.crc_src; 9564 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9565 #endif 9566 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9567 9568 if (new_crtc_state->active && 9569 (!old_crtc_state->active || 9570 drm_atomic_crtc_needs_modeset(new_crtc_state))) { 9571 dc_stream_retain(dm_new_crtc_state->stream); 9572 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; 9573 manage_dm_interrupts(adev, acrtc, true); 9574 9575 #ifdef CONFIG_DEBUG_FS 9576 /** 9577 * Frontend may have changed so reapply the CRC capture 9578 * settings for the stream. 9579 */ 9580 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9581 9582 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { 9583 configure_crc = true; 9584 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 9585 if (amdgpu_dm_crc_window_is_activated(crtc)) { 9586 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9587 acrtc->dm_irq_params.crc_window.update_win = true; 9588 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2; 9589 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); 9590 crc_rd_wrk->crtc = crtc; 9591 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); 9592 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9593 } 9594 #endif 9595 } 9596 9597 if (configure_crc) 9598 if (amdgpu_dm_crtc_configure_crc_source( 9599 crtc, dm_new_crtc_state, cur_crc_src)) 9600 DRM_DEBUG_DRIVER("Failed to configure crc source"); 9601 #endif 9602 } 9603 } 9604 9605 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 9606 if (new_crtc_state->async_flip) 9607 wait_for_vblank = false; 9608 9609 /* update planes when needed per crtc*/ 9610 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { 9611 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9612 9613 if (dm_new_crtc_state->stream) 9614 amdgpu_dm_commit_planes(state, dc_state, dev, 9615 dm, crtc, wait_for_vblank); 9616 } 9617 9618 /* Update audio instances for each connector. */ 9619 amdgpu_dm_commit_audio(dev, state); 9620 9621 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ 9622 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 9623 /* restore the backlight level */ 9624 for (i = 0; i < dm->num_of_edps; i++) { 9625 if (dm->backlight_dev[i] && 9626 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i])) 9627 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 9628 } 9629 #endif 9630 /* 9631 * send vblank event on all events not handled in flip and 9632 * mark consumed event for drm_atomic_helper_commit_hw_done 9633 */ 9634 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 9635 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 9636 9637 if (new_crtc_state->event) 9638 drm_send_event_locked(dev, &new_crtc_state->event->base); 9639 9640 new_crtc_state->event = NULL; 9641 } 9642 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 9643 9644 /* Signal HW programming completion */ 9645 drm_atomic_helper_commit_hw_done(state); 9646 9647 if (wait_for_vblank) 9648 drm_atomic_helper_wait_for_flip_done(dev, state); 9649 9650 drm_atomic_helper_cleanup_planes(dev, state); 9651 9652 /* return the stolen vga memory back to VRAM */ 9653 if (!adev->mman.keep_stolen_vga_memory) 9654 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); 9655 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); 9656 9657 /* 9658 * Finally, drop a runtime PM reference for each newly disabled CRTC, 9659 * so we can put the GPU into runtime suspend if we're not driving any 9660 * displays anymore 9661 */ 9662 for (i = 0; i < crtc_disable_count; i++) 9663 pm_runtime_put_autosuspend(dev->dev); 9664 pm_runtime_mark_last_busy(dev->dev); 9665 9666 if (dc_state_temp) 9667 dc_release_state(dc_state_temp); 9668 } 9669 9670 9671 static int dm_force_atomic_commit(struct drm_connector *connector) 9672 { 9673 int ret = 0; 9674 struct drm_device *ddev = connector->dev; 9675 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); 9676 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 9677 struct drm_plane *plane = disconnected_acrtc->base.primary; 9678 struct drm_connector_state *conn_state; 9679 struct drm_crtc_state *crtc_state; 9680 struct drm_plane_state *plane_state; 9681 9682 if (!state) 9683 return -ENOMEM; 9684 9685 state->acquire_ctx = ddev->mode_config.acquire_ctx; 9686 9687 /* Construct an atomic state to restore previous display setting */ 9688 9689 /* 9690 * Attach connectors to drm_atomic_state 9691 */ 9692 conn_state = drm_atomic_get_connector_state(state, connector); 9693 9694 ret = PTR_ERR_OR_ZERO(conn_state); 9695 if (ret) 9696 goto out; 9697 9698 /* Attach crtc to drm_atomic_state*/ 9699 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); 9700 9701 ret = PTR_ERR_OR_ZERO(crtc_state); 9702 if (ret) 9703 goto out; 9704 9705 /* force a restore */ 9706 crtc_state->mode_changed = true; 9707 9708 /* Attach plane to drm_atomic_state */ 9709 plane_state = drm_atomic_get_plane_state(state, plane); 9710 9711 ret = PTR_ERR_OR_ZERO(plane_state); 9712 if (ret) 9713 goto out; 9714 9715 /* Call commit internally with the state we just constructed */ 9716 ret = drm_atomic_commit(state); 9717 9718 out: 9719 drm_atomic_state_put(state); 9720 if (ret) 9721 DRM_ERROR("Restoring old state failed with %i\n", ret); 9722 9723 return ret; 9724 } 9725 9726 /* 9727 * This function handles all cases when set mode does not come upon hotplug. 9728 * This includes when a display is unplugged then plugged back into the 9729 * same port and when running without usermode desktop manager supprot 9730 */ 9731 void dm_restore_drm_connector_state(struct drm_device *dev, 9732 struct drm_connector *connector) 9733 { 9734 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 9735 struct amdgpu_crtc *disconnected_acrtc; 9736 struct dm_crtc_state *acrtc_state; 9737 9738 if (!aconnector->dc_sink || !connector->state || !connector->encoder) 9739 return; 9740 9741 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 9742 if (!disconnected_acrtc) 9743 return; 9744 9745 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 9746 if (!acrtc_state->stream) 9747 return; 9748 9749 /* 9750 * If the previous sink is not released and different from the current, 9751 * we deduce we are in a state where we can not rely on usermode call 9752 * to turn on the display, so we do it here 9753 */ 9754 if (acrtc_state->stream->sink != aconnector->dc_sink) 9755 dm_force_atomic_commit(&aconnector->base); 9756 } 9757 9758 /* 9759 * Grabs all modesetting locks to serialize against any blocking commits, 9760 * Waits for completion of all non blocking commits. 9761 */ 9762 static int do_aquire_global_lock(struct drm_device *dev, 9763 struct drm_atomic_state *state) 9764 { 9765 struct drm_crtc *crtc; 9766 struct drm_crtc_commit *commit; 9767 long ret; 9768 9769 /* 9770 * Adding all modeset locks to aquire_ctx will 9771 * ensure that when the framework release it the 9772 * extra locks we are locking here will get released to 9773 */ 9774 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); 9775 if (ret) 9776 return ret; 9777 9778 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 9779 spin_lock(&crtc->commit_lock); 9780 commit = list_first_entry_or_null(&crtc->commit_list, 9781 struct drm_crtc_commit, commit_entry); 9782 if (commit) 9783 drm_crtc_commit_get(commit); 9784 spin_unlock(&crtc->commit_lock); 9785 9786 if (!commit) 9787 continue; 9788 9789 /* 9790 * Make sure all pending HW programming completed and 9791 * page flips done 9792 */ 9793 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); 9794 9795 if (ret > 0) 9796 ret = wait_for_completion_interruptible_timeout( 9797 &commit->flip_done, 10*HZ); 9798 9799 if (ret == 0) 9800 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " 9801 "timed out\n", crtc->base.id, crtc->name); 9802 9803 drm_crtc_commit_put(commit); 9804 } 9805 9806 return ret < 0 ? ret : 0; 9807 } 9808 9809 static void get_freesync_config_for_crtc( 9810 struct dm_crtc_state *new_crtc_state, 9811 struct dm_connector_state *new_con_state) 9812 { 9813 struct mod_freesync_config config = {0}; 9814 struct amdgpu_dm_connector *aconnector = 9815 to_amdgpu_dm_connector(new_con_state->base.connector); 9816 struct drm_display_mode *mode = &new_crtc_state->base.mode; 9817 int vrefresh = drm_mode_vrefresh(mode); 9818 bool fs_vid_mode = false; 9819 9820 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 9821 vrefresh >= aconnector->min_vfreq && 9822 vrefresh <= aconnector->max_vfreq; 9823 9824 if (new_crtc_state->vrr_supported) { 9825 new_crtc_state->stream->ignore_msa_timing_param = true; 9826 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; 9827 9828 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; 9829 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; 9830 config.vsif_supported = true; 9831 config.btr = true; 9832 9833 if (fs_vid_mode) { 9834 config.state = VRR_STATE_ACTIVE_FIXED; 9835 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; 9836 goto out; 9837 } else if (new_crtc_state->base.vrr_enabled) { 9838 config.state = VRR_STATE_ACTIVE_VARIABLE; 9839 } else { 9840 config.state = VRR_STATE_INACTIVE; 9841 } 9842 } 9843 out: 9844 new_crtc_state->freesync_config = config; 9845 } 9846 9847 static void reset_freesync_config_for_crtc( 9848 struct dm_crtc_state *new_crtc_state) 9849 { 9850 new_crtc_state->vrr_supported = false; 9851 9852 memset(&new_crtc_state->vrr_infopacket, 0, 9853 sizeof(new_crtc_state->vrr_infopacket)); 9854 } 9855 9856 static bool 9857 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 9858 struct drm_crtc_state *new_crtc_state) 9859 { 9860 struct drm_display_mode old_mode, new_mode; 9861 9862 if (!old_crtc_state || !new_crtc_state) 9863 return false; 9864 9865 old_mode = old_crtc_state->mode; 9866 new_mode = new_crtc_state->mode; 9867 9868 if (old_mode.clock == new_mode.clock && 9869 old_mode.hdisplay == new_mode.hdisplay && 9870 old_mode.vdisplay == new_mode.vdisplay && 9871 old_mode.htotal == new_mode.htotal && 9872 old_mode.vtotal != new_mode.vtotal && 9873 old_mode.hsync_start == new_mode.hsync_start && 9874 old_mode.vsync_start != new_mode.vsync_start && 9875 old_mode.hsync_end == new_mode.hsync_end && 9876 old_mode.vsync_end != new_mode.vsync_end && 9877 old_mode.hskew == new_mode.hskew && 9878 old_mode.vscan == new_mode.vscan && 9879 (old_mode.vsync_end - old_mode.vsync_start) == 9880 (new_mode.vsync_end - new_mode.vsync_start)) 9881 return true; 9882 9883 return false; 9884 } 9885 9886 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { 9887 uint64_t num, den, res; 9888 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; 9889 9890 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; 9891 9892 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; 9893 den = (unsigned long long)new_crtc_state->mode.htotal * 9894 (unsigned long long)new_crtc_state->mode.vtotal; 9895 9896 res = div_u64(num, den); 9897 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; 9898 } 9899 9900 static int dm_update_crtc_state(struct amdgpu_display_manager *dm, 9901 struct drm_atomic_state *state, 9902 struct drm_crtc *crtc, 9903 struct drm_crtc_state *old_crtc_state, 9904 struct drm_crtc_state *new_crtc_state, 9905 bool enable, 9906 bool *lock_and_validation_needed) 9907 { 9908 struct dm_atomic_state *dm_state = NULL; 9909 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 9910 struct dc_stream_state *new_stream; 9911 int ret = 0; 9912 9913 /* 9914 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set 9915 * update changed items 9916 */ 9917 struct amdgpu_crtc *acrtc = NULL; 9918 struct amdgpu_dm_connector *aconnector = NULL; 9919 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; 9920 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; 9921 9922 new_stream = NULL; 9923 9924 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 9925 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 9926 acrtc = to_amdgpu_crtc(crtc); 9927 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 9928 9929 /* TODO This hack should go away */ 9930 if (aconnector && enable) { 9931 /* Make sure fake sink is created in plug-in scenario */ 9932 drm_new_conn_state = drm_atomic_get_new_connector_state(state, 9933 &aconnector->base); 9934 drm_old_conn_state = drm_atomic_get_old_connector_state(state, 9935 &aconnector->base); 9936 9937 if (IS_ERR(drm_new_conn_state)) { 9938 ret = PTR_ERR_OR_ZERO(drm_new_conn_state); 9939 goto fail; 9940 } 9941 9942 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); 9943 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); 9944 9945 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 9946 goto skip_modeset; 9947 9948 new_stream = create_validate_stream_for_sink(aconnector, 9949 &new_crtc_state->mode, 9950 dm_new_conn_state, 9951 dm_old_crtc_state->stream); 9952 9953 /* 9954 * we can have no stream on ACTION_SET if a display 9955 * was disconnected during S3, in this case it is not an 9956 * error, the OS will be updated after detection, and 9957 * will do the right thing on next atomic commit 9958 */ 9959 9960 if (!new_stream) { 9961 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", 9962 __func__, acrtc->base.base.id); 9963 ret = -ENOMEM; 9964 goto fail; 9965 } 9966 9967 /* 9968 * TODO: Check VSDB bits to decide whether this should 9969 * be enabled or not. 9970 */ 9971 new_stream->triggered_crtc_reset.enabled = 9972 dm->force_timing_sync; 9973 9974 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 9975 9976 ret = fill_hdr_info_packet(drm_new_conn_state, 9977 &new_stream->hdr_static_metadata); 9978 if (ret) 9979 goto fail; 9980 9981 /* 9982 * If we already removed the old stream from the context 9983 * (and set the new stream to NULL) then we can't reuse 9984 * the old stream even if the stream and scaling are unchanged. 9985 * We'll hit the BUG_ON and black screen. 9986 * 9987 * TODO: Refactor this function to allow this check to work 9988 * in all conditions. 9989 */ 9990 if (amdgpu_freesync_vid_mode && 9991 dm_new_crtc_state->stream && 9992 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) 9993 goto skip_modeset; 9994 9995 if (dm_new_crtc_state->stream && 9996 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 9997 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 9998 new_crtc_state->mode_changed = false; 9999 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", 10000 new_crtc_state->mode_changed); 10001 } 10002 } 10003 10004 /* mode_changed flag may get updated above, need to check again */ 10005 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 10006 goto skip_modeset; 10007 10008 DRM_DEBUG_ATOMIC( 10009 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " 10010 "planes_changed:%d, mode_changed:%d,active_changed:%d," 10011 "connectors_changed:%d\n", 10012 acrtc->crtc_id, 10013 new_crtc_state->enable, 10014 new_crtc_state->active, 10015 new_crtc_state->planes_changed, 10016 new_crtc_state->mode_changed, 10017 new_crtc_state->active_changed, 10018 new_crtc_state->connectors_changed); 10019 10020 /* Remove stream for any changed/disabled CRTC */ 10021 if (!enable) { 10022 10023 if (!dm_old_crtc_state->stream) 10024 goto skip_modeset; 10025 10026 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && 10027 is_timing_unchanged_for_freesync(new_crtc_state, 10028 old_crtc_state)) { 10029 new_crtc_state->mode_changed = false; 10030 DRM_DEBUG_DRIVER( 10031 "Mode change not required for front porch change, " 10032 "setting mode_changed to %d", 10033 new_crtc_state->mode_changed); 10034 10035 set_freesync_fixed_config(dm_new_crtc_state); 10036 10037 goto skip_modeset; 10038 } else if (amdgpu_freesync_vid_mode && aconnector && 10039 is_freesync_video_mode(&new_crtc_state->mode, 10040 aconnector)) { 10041 struct drm_display_mode *high_mode; 10042 10043 high_mode = get_highest_refresh_rate_mode(aconnector, false); 10044 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) { 10045 set_freesync_fixed_config(dm_new_crtc_state); 10046 } 10047 } 10048 10049 ret = dm_atomic_get_state(state, &dm_state); 10050 if (ret) 10051 goto fail; 10052 10053 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 10054 crtc->base.id); 10055 10056 /* i.e. reset mode */ 10057 if (dc_remove_stream_from_ctx( 10058 dm->dc, 10059 dm_state->context, 10060 dm_old_crtc_state->stream) != DC_OK) { 10061 ret = -EINVAL; 10062 goto fail; 10063 } 10064 10065 dc_stream_release(dm_old_crtc_state->stream); 10066 dm_new_crtc_state->stream = NULL; 10067 10068 reset_freesync_config_for_crtc(dm_new_crtc_state); 10069 10070 *lock_and_validation_needed = true; 10071 10072 } else {/* Add stream for any updated/enabled CRTC */ 10073 /* 10074 * Quick fix to prevent NULL pointer on new_stream when 10075 * added MST connectors not found in existing crtc_state in the chained mode 10076 * TODO: need to dig out the root cause of that 10077 */ 10078 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port)) 10079 goto skip_modeset; 10080 10081 if (modereset_required(new_crtc_state)) 10082 goto skip_modeset; 10083 10084 if (modeset_required(new_crtc_state, new_stream, 10085 dm_old_crtc_state->stream)) { 10086 10087 WARN_ON(dm_new_crtc_state->stream); 10088 10089 ret = dm_atomic_get_state(state, &dm_state); 10090 if (ret) 10091 goto fail; 10092 10093 dm_new_crtc_state->stream = new_stream; 10094 10095 dc_stream_retain(new_stream); 10096 10097 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", 10098 crtc->base.id); 10099 10100 if (dc_add_stream_to_ctx( 10101 dm->dc, 10102 dm_state->context, 10103 dm_new_crtc_state->stream) != DC_OK) { 10104 ret = -EINVAL; 10105 goto fail; 10106 } 10107 10108 *lock_and_validation_needed = true; 10109 } 10110 } 10111 10112 skip_modeset: 10113 /* Release extra reference */ 10114 if (new_stream) 10115 dc_stream_release(new_stream); 10116 10117 /* 10118 * We want to do dc stream updates that do not require a 10119 * full modeset below. 10120 */ 10121 if (!(enable && aconnector && new_crtc_state->active)) 10122 return 0; 10123 /* 10124 * Given above conditions, the dc state cannot be NULL because: 10125 * 1. We're in the process of enabling CRTCs (just been added 10126 * to the dc context, or already is on the context) 10127 * 2. Has a valid connector attached, and 10128 * 3. Is currently active and enabled. 10129 * => The dc stream state currently exists. 10130 */ 10131 BUG_ON(dm_new_crtc_state->stream == NULL); 10132 10133 /* Scaling or underscan settings */ 10134 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || 10135 drm_atomic_crtc_needs_modeset(new_crtc_state)) 10136 update_stream_scaling_settings( 10137 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); 10138 10139 /* ABM settings */ 10140 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; 10141 10142 /* 10143 * Color management settings. We also update color properties 10144 * when a modeset is needed, to ensure it gets reprogrammed. 10145 */ 10146 if (dm_new_crtc_state->base.color_mgmt_changed || 10147 drm_atomic_crtc_needs_modeset(new_crtc_state)) { 10148 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); 10149 if (ret) 10150 goto fail; 10151 } 10152 10153 /* Update Freesync settings. */ 10154 get_freesync_config_for_crtc(dm_new_crtc_state, 10155 dm_new_conn_state); 10156 10157 return ret; 10158 10159 fail: 10160 if (new_stream) 10161 dc_stream_release(new_stream); 10162 return ret; 10163 } 10164 10165 static bool should_reset_plane(struct drm_atomic_state *state, 10166 struct drm_plane *plane, 10167 struct drm_plane_state *old_plane_state, 10168 struct drm_plane_state *new_plane_state) 10169 { 10170 struct drm_plane *other; 10171 struct drm_plane_state *old_other_state, *new_other_state; 10172 struct drm_crtc_state *new_crtc_state; 10173 int i; 10174 10175 /* 10176 * TODO: Remove this hack once the checks below are sufficient 10177 * enough to determine when we need to reset all the planes on 10178 * the stream. 10179 */ 10180 if (state->allow_modeset) 10181 return true; 10182 10183 /* Exit early if we know that we're adding or removing the plane. */ 10184 if (old_plane_state->crtc != new_plane_state->crtc) 10185 return true; 10186 10187 /* old crtc == new_crtc == NULL, plane not in context. */ 10188 if (!new_plane_state->crtc) 10189 return false; 10190 10191 new_crtc_state = 10192 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); 10193 10194 if (!new_crtc_state) 10195 return true; 10196 10197 /* CRTC Degamma changes currently require us to recreate planes. */ 10198 if (new_crtc_state->color_mgmt_changed) 10199 return true; 10200 10201 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) 10202 return true; 10203 10204 /* 10205 * If there are any new primary or overlay planes being added or 10206 * removed then the z-order can potentially change. To ensure 10207 * correct z-order and pipe acquisition the current DC architecture 10208 * requires us to remove and recreate all existing planes. 10209 * 10210 * TODO: Come up with a more elegant solution for this. 10211 */ 10212 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { 10213 struct amdgpu_framebuffer *old_afb, *new_afb; 10214 if (other->type == DRM_PLANE_TYPE_CURSOR) 10215 continue; 10216 10217 if (old_other_state->crtc != new_plane_state->crtc && 10218 new_other_state->crtc != new_plane_state->crtc) 10219 continue; 10220 10221 if (old_other_state->crtc != new_other_state->crtc) 10222 return true; 10223 10224 /* Src/dst size and scaling updates. */ 10225 if (old_other_state->src_w != new_other_state->src_w || 10226 old_other_state->src_h != new_other_state->src_h || 10227 old_other_state->crtc_w != new_other_state->crtc_w || 10228 old_other_state->crtc_h != new_other_state->crtc_h) 10229 return true; 10230 10231 /* Rotation / mirroring updates. */ 10232 if (old_other_state->rotation != new_other_state->rotation) 10233 return true; 10234 10235 /* Blending updates. */ 10236 if (old_other_state->pixel_blend_mode != 10237 new_other_state->pixel_blend_mode) 10238 return true; 10239 10240 /* Alpha updates. */ 10241 if (old_other_state->alpha != new_other_state->alpha) 10242 return true; 10243 10244 /* Colorspace changes. */ 10245 if (old_other_state->color_range != new_other_state->color_range || 10246 old_other_state->color_encoding != new_other_state->color_encoding) 10247 return true; 10248 10249 /* Framebuffer checks fall at the end. */ 10250 if (!old_other_state->fb || !new_other_state->fb) 10251 continue; 10252 10253 /* Pixel format changes can require bandwidth updates. */ 10254 if (old_other_state->fb->format != new_other_state->fb->format) 10255 return true; 10256 10257 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; 10258 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; 10259 10260 /* Tiling and DCC changes also require bandwidth updates. */ 10261 if (old_afb->tiling_flags != new_afb->tiling_flags || 10262 old_afb->base.modifier != new_afb->base.modifier) 10263 return true; 10264 } 10265 10266 return false; 10267 } 10268 10269 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, 10270 struct drm_plane_state *new_plane_state, 10271 struct drm_framebuffer *fb) 10272 { 10273 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); 10274 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); 10275 unsigned int pitch; 10276 bool linear; 10277 10278 if (fb->width > new_acrtc->max_cursor_width || 10279 fb->height > new_acrtc->max_cursor_height) { 10280 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n", 10281 new_plane_state->fb->width, 10282 new_plane_state->fb->height); 10283 return -EINVAL; 10284 } 10285 if (new_plane_state->src_w != fb->width << 16 || 10286 new_plane_state->src_h != fb->height << 16) { 10287 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 10288 return -EINVAL; 10289 } 10290 10291 /* Pitch in pixels */ 10292 pitch = fb->pitches[0] / fb->format->cpp[0]; 10293 10294 if (fb->width != pitch) { 10295 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d", 10296 fb->width, pitch); 10297 return -EINVAL; 10298 } 10299 10300 switch (pitch) { 10301 case 64: 10302 case 128: 10303 case 256: 10304 /* FB pitch is supported by cursor plane */ 10305 break; 10306 default: 10307 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch); 10308 return -EINVAL; 10309 } 10310 10311 /* Core DRM takes care of checking FB modifiers, so we only need to 10312 * check tiling flags when the FB doesn't have a modifier. */ 10313 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { 10314 if (adev->family < AMDGPU_FAMILY_AI) { 10315 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && 10316 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && 10317 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; 10318 } else { 10319 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; 10320 } 10321 if (!linear) { 10322 DRM_DEBUG_ATOMIC("Cursor FB not linear"); 10323 return -EINVAL; 10324 } 10325 } 10326 10327 return 0; 10328 } 10329 10330 static int dm_update_plane_state(struct dc *dc, 10331 struct drm_atomic_state *state, 10332 struct drm_plane *plane, 10333 struct drm_plane_state *old_plane_state, 10334 struct drm_plane_state *new_plane_state, 10335 bool enable, 10336 bool *lock_and_validation_needed) 10337 { 10338 10339 struct dm_atomic_state *dm_state = NULL; 10340 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 10341 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10342 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 10343 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 10344 struct amdgpu_crtc *new_acrtc; 10345 bool needs_reset; 10346 int ret = 0; 10347 10348 10349 new_plane_crtc = new_plane_state->crtc; 10350 old_plane_crtc = old_plane_state->crtc; 10351 dm_new_plane_state = to_dm_plane_state(new_plane_state); 10352 dm_old_plane_state = to_dm_plane_state(old_plane_state); 10353 10354 if (plane->type == DRM_PLANE_TYPE_CURSOR) { 10355 if (!enable || !new_plane_crtc || 10356 drm_atomic_plane_disabling(plane->state, new_plane_state)) 10357 return 0; 10358 10359 new_acrtc = to_amdgpu_crtc(new_plane_crtc); 10360 10361 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { 10362 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); 10363 return -EINVAL; 10364 } 10365 10366 if (new_plane_state->fb) { 10367 ret = dm_check_cursor_fb(new_acrtc, new_plane_state, 10368 new_plane_state->fb); 10369 if (ret) 10370 return ret; 10371 } 10372 10373 return 0; 10374 } 10375 10376 needs_reset = should_reset_plane(state, plane, old_plane_state, 10377 new_plane_state); 10378 10379 /* Remove any changed/removed planes */ 10380 if (!enable) { 10381 if (!needs_reset) 10382 return 0; 10383 10384 if (!old_plane_crtc) 10385 return 0; 10386 10387 old_crtc_state = drm_atomic_get_old_crtc_state( 10388 state, old_plane_crtc); 10389 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10390 10391 if (!dm_old_crtc_state->stream) 10392 return 0; 10393 10394 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 10395 plane->base.id, old_plane_crtc->base.id); 10396 10397 ret = dm_atomic_get_state(state, &dm_state); 10398 if (ret) 10399 return ret; 10400 10401 if (!dc_remove_plane_from_context( 10402 dc, 10403 dm_old_crtc_state->stream, 10404 dm_old_plane_state->dc_state, 10405 dm_state->context)) { 10406 10407 return -EINVAL; 10408 } 10409 10410 10411 dc_plane_state_release(dm_old_plane_state->dc_state); 10412 dm_new_plane_state->dc_state = NULL; 10413 10414 *lock_and_validation_needed = true; 10415 10416 } else { /* Add new planes */ 10417 struct dc_plane_state *dc_new_plane_state; 10418 10419 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 10420 return 0; 10421 10422 if (!new_plane_crtc) 10423 return 0; 10424 10425 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); 10426 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 10427 10428 if (!dm_new_crtc_state->stream) 10429 return 0; 10430 10431 if (!needs_reset) 10432 return 0; 10433 10434 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state); 10435 if (ret) 10436 return ret; 10437 10438 WARN_ON(dm_new_plane_state->dc_state); 10439 10440 dc_new_plane_state = dc_create_plane_state(dc); 10441 if (!dc_new_plane_state) 10442 return -ENOMEM; 10443 10444 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", 10445 plane->base.id, new_plane_crtc->base.id); 10446 10447 ret = fill_dc_plane_attributes( 10448 drm_to_adev(new_plane_crtc->dev), 10449 dc_new_plane_state, 10450 new_plane_state, 10451 new_crtc_state); 10452 if (ret) { 10453 dc_plane_state_release(dc_new_plane_state); 10454 return ret; 10455 } 10456 10457 ret = dm_atomic_get_state(state, &dm_state); 10458 if (ret) { 10459 dc_plane_state_release(dc_new_plane_state); 10460 return ret; 10461 } 10462 10463 /* 10464 * Any atomic check errors that occur after this will 10465 * not need a release. The plane state will be attached 10466 * to the stream, and therefore part of the atomic 10467 * state. It'll be released when the atomic state is 10468 * cleaned. 10469 */ 10470 if (!dc_add_plane_to_context( 10471 dc, 10472 dm_new_crtc_state->stream, 10473 dc_new_plane_state, 10474 dm_state->context)) { 10475 10476 dc_plane_state_release(dc_new_plane_state); 10477 return -EINVAL; 10478 } 10479 10480 dm_new_plane_state->dc_state = dc_new_plane_state; 10481 10482 /* Tell DC to do a full surface update every time there 10483 * is a plane change. Inefficient, but works for now. 10484 */ 10485 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; 10486 10487 *lock_and_validation_needed = true; 10488 } 10489 10490 10491 return ret; 10492 } 10493 10494 static int dm_check_crtc_cursor(struct drm_atomic_state *state, 10495 struct drm_crtc *crtc, 10496 struct drm_crtc_state *new_crtc_state) 10497 { 10498 struct drm_plane_state *new_cursor_state, *new_primary_state; 10499 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h; 10500 10501 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a 10502 * cursor per pipe but it's going to inherit the scaling and 10503 * positioning from the underlying pipe. Check the cursor plane's 10504 * blending properties match the primary plane's. */ 10505 10506 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor); 10507 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary); 10508 if (!new_cursor_state || !new_primary_state || 10509 !new_cursor_state->fb || !new_primary_state->fb) { 10510 return 0; 10511 } 10512 10513 cursor_scale_w = new_cursor_state->crtc_w * 1000 / 10514 (new_cursor_state->src_w >> 16); 10515 cursor_scale_h = new_cursor_state->crtc_h * 1000 / 10516 (new_cursor_state->src_h >> 16); 10517 10518 primary_scale_w = new_primary_state->crtc_w * 1000 / 10519 (new_primary_state->src_w >> 16); 10520 primary_scale_h = new_primary_state->crtc_h * 1000 / 10521 (new_primary_state->src_h >> 16); 10522 10523 if (cursor_scale_w != primary_scale_w || 10524 cursor_scale_h != primary_scale_h) { 10525 drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n"); 10526 return -EINVAL; 10527 } 10528 10529 return 0; 10530 } 10531 10532 #if defined(CONFIG_DRM_AMD_DC_DCN) 10533 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) 10534 { 10535 struct drm_connector *connector; 10536 struct drm_connector_state *conn_state; 10537 struct amdgpu_dm_connector *aconnector = NULL; 10538 int i; 10539 for_each_new_connector_in_state(state, connector, conn_state, i) { 10540 if (conn_state->crtc != crtc) 10541 continue; 10542 10543 aconnector = to_amdgpu_dm_connector(connector); 10544 if (!aconnector->port || !aconnector->mst_port) 10545 aconnector = NULL; 10546 else 10547 break; 10548 } 10549 10550 if (!aconnector) 10551 return 0; 10552 10553 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr); 10554 } 10555 #endif 10556 10557 static int validate_overlay(struct drm_atomic_state *state) 10558 { 10559 int i; 10560 struct drm_plane *plane; 10561 struct drm_plane_state *new_plane_state; 10562 struct drm_plane_state *primary_state, *overlay_state = NULL; 10563 10564 /* Check if primary plane is contained inside overlay */ 10565 for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) { 10566 if (plane->type == DRM_PLANE_TYPE_OVERLAY) { 10567 if (drm_atomic_plane_disabling(plane->state, new_plane_state)) 10568 return 0; 10569 10570 overlay_state = new_plane_state; 10571 continue; 10572 } 10573 } 10574 10575 /* check if we're making changes to the overlay plane */ 10576 if (!overlay_state) 10577 return 0; 10578 10579 /* check if overlay plane is enabled */ 10580 if (!overlay_state->crtc) 10581 return 0; 10582 10583 /* find the primary plane for the CRTC that the overlay is enabled on */ 10584 primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary); 10585 if (IS_ERR(primary_state)) 10586 return PTR_ERR(primary_state); 10587 10588 /* check if primary plane is enabled */ 10589 if (!primary_state->crtc) 10590 return 0; 10591 10592 /* Perform the bounds check to ensure the overlay plane covers the primary */ 10593 if (primary_state->crtc_x < overlay_state->crtc_x || 10594 primary_state->crtc_y < overlay_state->crtc_y || 10595 primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w || 10596 primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) { 10597 DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n"); 10598 return -EINVAL; 10599 } 10600 10601 return 0; 10602 } 10603 10604 /** 10605 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. 10606 * @dev: The DRM device 10607 * @state: The atomic state to commit 10608 * 10609 * Validate that the given atomic state is programmable by DC into hardware. 10610 * This involves constructing a &struct dc_state reflecting the new hardware 10611 * state we wish to commit, then querying DC to see if it is programmable. It's 10612 * important not to modify the existing DC state. Otherwise, atomic_check 10613 * may unexpectedly commit hardware changes. 10614 * 10615 * When validating the DC state, it's important that the right locks are 10616 * acquired. For full updates case which removes/adds/updates streams on one 10617 * CRTC while flipping on another CRTC, acquiring global lock will guarantee 10618 * that any such full update commit will wait for completion of any outstanding 10619 * flip using DRMs synchronization events. 10620 * 10621 * Note that DM adds the affected connectors for all CRTCs in state, when that 10622 * might not seem necessary. This is because DC stream creation requires the 10623 * DC sink, which is tied to the DRM connector state. Cleaning this up should 10624 * be possible but non-trivial - a possible TODO item. 10625 * 10626 * Return: -Error code if validation failed. 10627 */ 10628 static int amdgpu_dm_atomic_check(struct drm_device *dev, 10629 struct drm_atomic_state *state) 10630 { 10631 struct amdgpu_device *adev = drm_to_adev(dev); 10632 struct dm_atomic_state *dm_state = NULL; 10633 struct dc *dc = adev->dm.dc; 10634 struct drm_connector *connector; 10635 struct drm_connector_state *old_con_state, *new_con_state; 10636 struct drm_crtc *crtc; 10637 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 10638 struct drm_plane *plane; 10639 struct drm_plane_state *old_plane_state, *new_plane_state; 10640 enum dc_status status; 10641 int ret, i; 10642 bool lock_and_validation_needed = false; 10643 struct dm_crtc_state *dm_old_crtc_state; 10644 #if defined(CONFIG_DRM_AMD_DC_DCN) 10645 struct dsc_mst_fairness_vars vars[MAX_PIPES]; 10646 #endif 10647 10648 trace_amdgpu_dm_atomic_check_begin(state); 10649 10650 ret = drm_atomic_helper_check_modeset(dev, state); 10651 if (ret) 10652 goto fail; 10653 10654 /* Check connector changes */ 10655 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10656 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 10657 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10658 10659 /* Skip connectors that are disabled or part of modeset already. */ 10660 if (!old_con_state->crtc && !new_con_state->crtc) 10661 continue; 10662 10663 if (!new_con_state->crtc) 10664 continue; 10665 10666 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); 10667 if (IS_ERR(new_crtc_state)) { 10668 ret = PTR_ERR(new_crtc_state); 10669 goto fail; 10670 } 10671 10672 if (dm_old_con_state->abm_level != 10673 dm_new_con_state->abm_level) 10674 new_crtc_state->connectors_changed = true; 10675 } 10676 10677 #if defined(CONFIG_DRM_AMD_DC_DCN) 10678 if (dc_resource_is_dsc_encoding_supported(dc)) { 10679 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10680 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 10681 ret = add_affected_mst_dsc_crtcs(state, crtc); 10682 if (ret) 10683 goto fail; 10684 } 10685 } 10686 } 10687 #endif 10688 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10689 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 10690 10691 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 10692 !new_crtc_state->color_mgmt_changed && 10693 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && 10694 dm_old_crtc_state->dsc_force_changed == false) 10695 continue; 10696 10697 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); 10698 if (ret) 10699 goto fail; 10700 10701 if (!new_crtc_state->enable) 10702 continue; 10703 10704 ret = drm_atomic_add_affected_connectors(state, crtc); 10705 if (ret) 10706 return ret; 10707 10708 ret = drm_atomic_add_affected_planes(state, crtc); 10709 if (ret) 10710 goto fail; 10711 10712 if (dm_old_crtc_state->dsc_force_changed) 10713 new_crtc_state->mode_changed = true; 10714 } 10715 10716 /* 10717 * Add all primary and overlay planes on the CRTC to the state 10718 * whenever a plane is enabled to maintain correct z-ordering 10719 * and to enable fast surface updates. 10720 */ 10721 drm_for_each_crtc(crtc, dev) { 10722 bool modified = false; 10723 10724 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 10725 if (plane->type == DRM_PLANE_TYPE_CURSOR) 10726 continue; 10727 10728 if (new_plane_state->crtc == crtc || 10729 old_plane_state->crtc == crtc) { 10730 modified = true; 10731 break; 10732 } 10733 } 10734 10735 if (!modified) 10736 continue; 10737 10738 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 10739 if (plane->type == DRM_PLANE_TYPE_CURSOR) 10740 continue; 10741 10742 new_plane_state = 10743 drm_atomic_get_plane_state(state, plane); 10744 10745 if (IS_ERR(new_plane_state)) { 10746 ret = PTR_ERR(new_plane_state); 10747 goto fail; 10748 } 10749 } 10750 } 10751 10752 /* Remove exiting planes if they are modified */ 10753 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 10754 ret = dm_update_plane_state(dc, state, plane, 10755 old_plane_state, 10756 new_plane_state, 10757 false, 10758 &lock_and_validation_needed); 10759 if (ret) 10760 goto fail; 10761 } 10762 10763 /* Disable all crtcs which require disable */ 10764 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10765 ret = dm_update_crtc_state(&adev->dm, state, crtc, 10766 old_crtc_state, 10767 new_crtc_state, 10768 false, 10769 &lock_and_validation_needed); 10770 if (ret) 10771 goto fail; 10772 } 10773 10774 /* Enable all crtcs which require enable */ 10775 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 10776 ret = dm_update_crtc_state(&adev->dm, state, crtc, 10777 old_crtc_state, 10778 new_crtc_state, 10779 true, 10780 &lock_and_validation_needed); 10781 if (ret) 10782 goto fail; 10783 } 10784 10785 ret = validate_overlay(state); 10786 if (ret) 10787 goto fail; 10788 10789 /* Add new/modified planes */ 10790 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { 10791 ret = dm_update_plane_state(dc, state, plane, 10792 old_plane_state, 10793 new_plane_state, 10794 true, 10795 &lock_and_validation_needed); 10796 if (ret) 10797 goto fail; 10798 } 10799 10800 /* Run this here since we want to validate the streams we created */ 10801 ret = drm_atomic_helper_check_planes(dev, state); 10802 if (ret) 10803 goto fail; 10804 10805 /* Check cursor planes scaling */ 10806 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10807 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); 10808 if (ret) 10809 goto fail; 10810 } 10811 10812 if (state->legacy_cursor_update) { 10813 /* 10814 * This is a fast cursor update coming from the plane update 10815 * helper, check if it can be done asynchronously for better 10816 * performance. 10817 */ 10818 state->async_update = 10819 !drm_atomic_helper_async_check(dev, state); 10820 10821 /* 10822 * Skip the remaining global validation if this is an async 10823 * update. Cursor updates can be done without affecting 10824 * state or bandwidth calcs and this avoids the performance 10825 * penalty of locking the private state object and 10826 * allocating a new dc_state. 10827 */ 10828 if (state->async_update) 10829 return 0; 10830 } 10831 10832 /* Check scaling and underscan changes*/ 10833 /* TODO Removed scaling changes validation due to inability to commit 10834 * new stream into context w\o causing full reset. Need to 10835 * decide how to handle. 10836 */ 10837 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { 10838 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); 10839 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); 10840 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); 10841 10842 /* Skip any modesets/resets */ 10843 if (!acrtc || drm_atomic_crtc_needs_modeset( 10844 drm_atomic_get_new_crtc_state(state, &acrtc->base))) 10845 continue; 10846 10847 /* Skip any thing not scale or underscan changes */ 10848 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) 10849 continue; 10850 10851 lock_and_validation_needed = true; 10852 } 10853 10854 /** 10855 * Streams and planes are reset when there are changes that affect 10856 * bandwidth. Anything that affects bandwidth needs to go through 10857 * DC global validation to ensure that the configuration can be applied 10858 * to hardware. 10859 * 10860 * We have to currently stall out here in atomic_check for outstanding 10861 * commits to finish in this case because our IRQ handlers reference 10862 * DRM state directly - we can end up disabling interrupts too early 10863 * if we don't. 10864 * 10865 * TODO: Remove this stall and drop DM state private objects. 10866 */ 10867 if (lock_and_validation_needed) { 10868 ret = dm_atomic_get_state(state, &dm_state); 10869 if (ret) 10870 goto fail; 10871 10872 ret = do_aquire_global_lock(dev, state); 10873 if (ret) 10874 goto fail; 10875 10876 #if defined(CONFIG_DRM_AMD_DC_DCN) 10877 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) 10878 goto fail; 10879 10880 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 10881 if (ret) 10882 goto fail; 10883 #endif 10884 10885 /* 10886 * Perform validation of MST topology in the state: 10887 * We need to perform MST atomic check before calling 10888 * dc_validate_global_state(), or there is a chance 10889 * to get stuck in an infinite loop and hang eventually. 10890 */ 10891 ret = drm_dp_mst_atomic_check(state); 10892 if (ret) 10893 goto fail; 10894 status = dc_validate_global_state(dc, dm_state->context, false); 10895 if (status != DC_OK) { 10896 drm_dbg_atomic(dev, 10897 "DC global validation failure: %s (%d)", 10898 dc_status_to_str(status), status); 10899 ret = -EINVAL; 10900 goto fail; 10901 } 10902 } else { 10903 /* 10904 * The commit is a fast update. Fast updates shouldn't change 10905 * the DC context, affect global validation, and can have their 10906 * commit work done in parallel with other commits not touching 10907 * the same resource. If we have a new DC context as part of 10908 * the DM atomic state from validation we need to free it and 10909 * retain the existing one instead. 10910 * 10911 * Furthermore, since the DM atomic state only contains the DC 10912 * context and can safely be annulled, we can free the state 10913 * and clear the associated private object now to free 10914 * some memory and avoid a possible use-after-free later. 10915 */ 10916 10917 for (i = 0; i < state->num_private_objs; i++) { 10918 struct drm_private_obj *obj = state->private_objs[i].ptr; 10919 10920 if (obj->funcs == adev->dm.atomic_obj.funcs) { 10921 int j = state->num_private_objs-1; 10922 10923 dm_atomic_destroy_state(obj, 10924 state->private_objs[i].state); 10925 10926 /* If i is not at the end of the array then the 10927 * last element needs to be moved to where i was 10928 * before the array can safely be truncated. 10929 */ 10930 if (i != j) 10931 state->private_objs[i] = 10932 state->private_objs[j]; 10933 10934 state->private_objs[j].ptr = NULL; 10935 state->private_objs[j].state = NULL; 10936 state->private_objs[j].old_state = NULL; 10937 state->private_objs[j].new_state = NULL; 10938 10939 state->num_private_objs = j; 10940 break; 10941 } 10942 } 10943 } 10944 10945 /* Store the overall update type for use later in atomic check. */ 10946 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { 10947 struct dm_crtc_state *dm_new_crtc_state = 10948 to_dm_crtc_state(new_crtc_state); 10949 10950 dm_new_crtc_state->update_type = lock_and_validation_needed ? 10951 UPDATE_TYPE_FULL : 10952 UPDATE_TYPE_FAST; 10953 } 10954 10955 /* Must be success */ 10956 WARN_ON(ret); 10957 10958 trace_amdgpu_dm_atomic_check_finish(state, ret); 10959 10960 return ret; 10961 10962 fail: 10963 if (ret == -EDEADLK) 10964 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n"); 10965 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) 10966 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); 10967 else 10968 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret); 10969 10970 trace_amdgpu_dm_atomic_check_finish(state, ret); 10971 10972 return ret; 10973 } 10974 10975 static bool is_dp_capable_without_timing_msa(struct dc *dc, 10976 struct amdgpu_dm_connector *amdgpu_dm_connector) 10977 { 10978 uint8_t dpcd_data; 10979 bool capable = false; 10980 10981 if (amdgpu_dm_connector->dc_link && 10982 dm_helpers_dp_read_dpcd( 10983 NULL, 10984 amdgpu_dm_connector->dc_link, 10985 DP_DOWN_STREAM_PORT_COUNT, 10986 &dpcd_data, 10987 sizeof(dpcd_data))) { 10988 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; 10989 } 10990 10991 return capable; 10992 } 10993 10994 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, 10995 unsigned int offset, 10996 unsigned int total_length, 10997 uint8_t *data, 10998 unsigned int length, 10999 struct amdgpu_hdmi_vsdb_info *vsdb) 11000 { 11001 bool res; 11002 union dmub_rb_cmd cmd; 11003 struct dmub_cmd_send_edid_cea *input; 11004 struct dmub_cmd_edid_cea_output *output; 11005 11006 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) 11007 return false; 11008 11009 memset(&cmd, 0, sizeof(cmd)); 11010 11011 input = &cmd.edid_cea.data.input; 11012 11013 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; 11014 cmd.edid_cea.header.sub_type = 0; 11015 cmd.edid_cea.header.payload_bytes = 11016 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); 11017 input->offset = offset; 11018 input->length = length; 11019 input->total_length = total_length; 11020 memcpy(input->payload, data, length); 11021 11022 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); 11023 if (!res) { 11024 DRM_ERROR("EDID CEA parser failed\n"); 11025 return false; 11026 } 11027 11028 output = &cmd.edid_cea.data.output; 11029 11030 if (output->type == DMUB_CMD__EDID_CEA_ACK) { 11031 if (!output->ack.success) { 11032 DRM_ERROR("EDID CEA ack failed at offset %d\n", 11033 output->ack.offset); 11034 } 11035 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { 11036 if (!output->amd_vsdb.vsdb_found) 11037 return false; 11038 11039 vsdb->freesync_supported = output->amd_vsdb.freesync_supported; 11040 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; 11041 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; 11042 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; 11043 } else { 11044 DRM_WARN("Unknown EDID CEA parser results\n"); 11045 return false; 11046 } 11047 11048 return true; 11049 } 11050 11051 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, 11052 uint8_t *edid_ext, int len, 11053 struct amdgpu_hdmi_vsdb_info *vsdb_info) 11054 { 11055 int i; 11056 11057 /* send extension block to DMCU for parsing */ 11058 for (i = 0; i < len; i += 8) { 11059 bool res; 11060 int offset; 11061 11062 /* send 8 bytes a time */ 11063 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) 11064 return false; 11065 11066 if (i+8 == len) { 11067 /* EDID block sent completed, expect result */ 11068 int version, min_rate, max_rate; 11069 11070 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); 11071 if (res) { 11072 /* amd vsdb found */ 11073 vsdb_info->freesync_supported = 1; 11074 vsdb_info->amd_vsdb_version = version; 11075 vsdb_info->min_refresh_rate_hz = min_rate; 11076 vsdb_info->max_refresh_rate_hz = max_rate; 11077 return true; 11078 } 11079 /* not amd vsdb */ 11080 return false; 11081 } 11082 11083 /* check for ack*/ 11084 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); 11085 if (!res) 11086 return false; 11087 } 11088 11089 return false; 11090 } 11091 11092 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, 11093 uint8_t *edid_ext, int len, 11094 struct amdgpu_hdmi_vsdb_info *vsdb_info) 11095 { 11096 int i; 11097 11098 /* send extension block to DMCU for parsing */ 11099 for (i = 0; i < len; i += 8) { 11100 /* send 8 bytes a time */ 11101 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) 11102 return false; 11103 } 11104 11105 return vsdb_info->freesync_supported; 11106 } 11107 11108 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, 11109 uint8_t *edid_ext, int len, 11110 struct amdgpu_hdmi_vsdb_info *vsdb_info) 11111 { 11112 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); 11113 11114 if (adev->dm.dmub_srv) 11115 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); 11116 else 11117 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); 11118 } 11119 11120 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, 11121 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) 11122 { 11123 uint8_t *edid_ext = NULL; 11124 int i; 11125 bool valid_vsdb_found = false; 11126 11127 /*----- drm_find_cea_extension() -----*/ 11128 /* No EDID or EDID extensions */ 11129 if (edid == NULL || edid->extensions == 0) 11130 return -ENODEV; 11131 11132 /* Find CEA extension */ 11133 for (i = 0; i < edid->extensions; i++) { 11134 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); 11135 if (edid_ext[0] == CEA_EXT) 11136 break; 11137 } 11138 11139 if (i == edid->extensions) 11140 return -ENODEV; 11141 11142 /*----- cea_db_offsets() -----*/ 11143 if (edid_ext[0] != CEA_EXT) 11144 return -ENODEV; 11145 11146 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); 11147 11148 return valid_vsdb_found ? i : -ENODEV; 11149 } 11150 11151 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, 11152 struct edid *edid) 11153 { 11154 int i = 0; 11155 struct detailed_timing *timing; 11156 struct detailed_non_pixel *data; 11157 struct detailed_data_monitor_range *range; 11158 struct amdgpu_dm_connector *amdgpu_dm_connector = 11159 to_amdgpu_dm_connector(connector); 11160 struct dm_connector_state *dm_con_state = NULL; 11161 struct dc_sink *sink; 11162 11163 struct drm_device *dev = connector->dev; 11164 struct amdgpu_device *adev = drm_to_adev(dev); 11165 bool freesync_capable = false; 11166 struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; 11167 11168 if (!connector->state) { 11169 DRM_ERROR("%s - Connector has no state", __func__); 11170 goto update; 11171 } 11172 11173 sink = amdgpu_dm_connector->dc_sink ? 11174 amdgpu_dm_connector->dc_sink : 11175 amdgpu_dm_connector->dc_em_sink; 11176 11177 if (!edid || !sink) { 11178 dm_con_state = to_dm_connector_state(connector->state); 11179 11180 amdgpu_dm_connector->min_vfreq = 0; 11181 amdgpu_dm_connector->max_vfreq = 0; 11182 amdgpu_dm_connector->pixel_clock_mhz = 0; 11183 connector->display_info.monitor_range.min_vfreq = 0; 11184 connector->display_info.monitor_range.max_vfreq = 0; 11185 freesync_capable = false; 11186 11187 goto update; 11188 } 11189 11190 dm_con_state = to_dm_connector_state(connector->state); 11191 11192 if (!adev->dm.freesync_module) 11193 goto update; 11194 11195 11196 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT 11197 || sink->sink_signal == SIGNAL_TYPE_EDP) { 11198 bool edid_check_required = false; 11199 11200 if (edid) { 11201 edid_check_required = is_dp_capable_without_timing_msa( 11202 adev->dm.dc, 11203 amdgpu_dm_connector); 11204 } 11205 11206 if (edid_check_required == true && (edid->version > 1 || 11207 (edid->version == 1 && edid->revision > 1))) { 11208 for (i = 0; i < 4; i++) { 11209 11210 timing = &edid->detailed_timings[i]; 11211 data = &timing->data.other_data; 11212 range = &data->data.range; 11213 /* 11214 * Check if monitor has continuous frequency mode 11215 */ 11216 if (data->type != EDID_DETAIL_MONITOR_RANGE) 11217 continue; 11218 /* 11219 * Check for flag range limits only. If flag == 1 then 11220 * no additional timing information provided. 11221 * Default GTF, GTF Secondary curve and CVT are not 11222 * supported 11223 */ 11224 if (range->flags != 1) 11225 continue; 11226 11227 amdgpu_dm_connector->min_vfreq = range->min_vfreq; 11228 amdgpu_dm_connector->max_vfreq = range->max_vfreq; 11229 amdgpu_dm_connector->pixel_clock_mhz = 11230 range->pixel_clock_mhz * 10; 11231 11232 connector->display_info.monitor_range.min_vfreq = range->min_vfreq; 11233 connector->display_info.monitor_range.max_vfreq = range->max_vfreq; 11234 11235 break; 11236 } 11237 11238 if (amdgpu_dm_connector->max_vfreq - 11239 amdgpu_dm_connector->min_vfreq > 10) { 11240 11241 freesync_capable = true; 11242 } 11243 } 11244 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { 11245 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); 11246 if (i >= 0 && vsdb_info.freesync_supported) { 11247 timing = &edid->detailed_timings[i]; 11248 data = &timing->data.other_data; 11249 11250 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; 11251 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; 11252 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) 11253 freesync_capable = true; 11254 11255 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; 11256 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; 11257 } 11258 } 11259 11260 update: 11261 if (dm_con_state) 11262 dm_con_state->freesync_capable = freesync_capable; 11263 11264 if (connector->vrr_capable_property) 11265 drm_connector_set_vrr_capable_property(connector, 11266 freesync_capable); 11267 } 11268 11269 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) 11270 { 11271 struct amdgpu_device *adev = drm_to_adev(dev); 11272 struct dc *dc = adev->dm.dc; 11273 int i; 11274 11275 mutex_lock(&adev->dm.dc_lock); 11276 if (dc->current_state) { 11277 for (i = 0; i < dc->current_state->stream_count; ++i) 11278 dc->current_state->streams[i] 11279 ->triggered_crtc_reset.enabled = 11280 adev->dm.force_timing_sync; 11281 11282 dm_enable_per_frame_crtc_master_sync(dc->current_state); 11283 dc_trigger_sync(dc, dc->current_state); 11284 } 11285 mutex_unlock(&adev->dm.dc_lock); 11286 } 11287 11288 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, 11289 uint32_t value, const char *func_name) 11290 { 11291 #ifdef DM_CHECK_ADDR_0 11292 if (address == 0) { 11293 DC_ERR("invalid register write. address = 0"); 11294 return; 11295 } 11296 #endif 11297 cgs_write_register(ctx->cgs_device, address, value); 11298 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); 11299 } 11300 11301 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, 11302 const char *func_name) 11303 { 11304 uint32_t value; 11305 #ifdef DM_CHECK_ADDR_0 11306 if (address == 0) { 11307 DC_ERR("invalid register read; address = 0\n"); 11308 return 0; 11309 } 11310 #endif 11311 11312 if (ctx->dmub_srv && 11313 ctx->dmub_srv->reg_helper_offload.gather_in_progress && 11314 !ctx->dmub_srv->reg_helper_offload.should_burst_write) { 11315 ASSERT(false); 11316 return 0; 11317 } 11318 11319 value = cgs_read_register(ctx->cgs_device, address); 11320 11321 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); 11322 11323 return value; 11324 } 11325 11326 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex, 11327 struct aux_payload *payload, enum aux_return_code_type *operation_result) 11328 { 11329 struct amdgpu_device *adev = ctx->driver_context; 11330 int ret = 0; 11331 11332 dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload); 11333 ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ); 11334 if (ret == 0) { 11335 *operation_result = AUX_RET_ERROR_TIMEOUT; 11336 return -1; 11337 } 11338 *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result; 11339 11340 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) { 11341 (*payload->reply) = adev->dm.dmub_notify->aux_reply.command; 11342 11343 // For read case, Copy data to payload 11344 if (!payload->write && adev->dm.dmub_notify->aux_reply.length && 11345 (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK)) 11346 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data, 11347 adev->dm.dmub_notify->aux_reply.length); 11348 } 11349 11350 return adev->dm.dmub_notify->aux_reply.length; 11351 } 11352