1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26 #include <drm/drm_crtc.h> 27 #include <drm/drm_vblank.h> 28 29 #include "amdgpu.h" 30 #include "amdgpu_dm.h" 31 #include "dc.h" 32 #include "amdgpu_securedisplay.h" 33 34 static const char *const pipe_crc_sources[] = { 35 "none", 36 "crtc", 37 "crtc dither", 38 "dprx", 39 "dprx dither", 40 "auto", 41 }; 42 43 static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source) 44 { 45 if (!source || !strcmp(source, "none")) 46 return AMDGPU_DM_PIPE_CRC_SOURCE_NONE; 47 if (!strcmp(source, "auto") || !strcmp(source, "crtc")) 48 return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC; 49 if (!strcmp(source, "dprx")) 50 return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX; 51 if (!strcmp(source, "crtc dither")) 52 return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER; 53 if (!strcmp(source, "dprx dither")) 54 return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER; 55 56 return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID; 57 } 58 59 static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src) 60 { 61 return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) || 62 (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER); 63 } 64 65 static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src) 66 { 67 return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) || 68 (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER); 69 } 70 71 static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src) 72 { 73 return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) || 74 (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) || 75 (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE); 76 } 77 78 const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, 79 size_t *count) 80 { 81 *count = ARRAY_SIZE(pipe_crc_sources); 82 return pipe_crc_sources; 83 } 84 85 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 86 static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc) 87 { 88 struct drm_device *drm_dev = crtc->dev; 89 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 90 91 spin_lock_irq(&drm_dev->event_lock); 92 acrtc->dm_irq_params.crc_window.x_start = 0; 93 acrtc->dm_irq_params.crc_window.y_start = 0; 94 acrtc->dm_irq_params.crc_window.x_end = 0; 95 acrtc->dm_irq_params.crc_window.y_end = 0; 96 acrtc->dm_irq_params.crc_window.activated = false; 97 acrtc->dm_irq_params.crc_window.update_win = false; 98 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; 99 spin_unlock_irq(&drm_dev->event_lock); 100 } 101 102 static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) 103 { 104 struct crc_rd_work *crc_rd_wrk; 105 struct amdgpu_device *adev; 106 struct psp_context *psp; 107 struct securedisplay_cmd *securedisplay_cmd; 108 struct drm_crtc *crtc; 109 uint8_t phy_id; 110 int ret; 111 112 crc_rd_wrk = container_of(work, struct crc_rd_work, notify_ta_work); 113 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); 114 crtc = crc_rd_wrk->crtc; 115 116 if (!crtc) { 117 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); 118 return; 119 } 120 121 adev = drm_to_adev(crtc->dev); 122 psp = &adev->psp; 123 phy_id = crc_rd_wrk->phy_inst; 124 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); 125 126 psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, 127 TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); 128 securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = 129 phy_id; 130 ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); 131 if (!ret) { 132 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { 133 psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); 134 } 135 } 136 } 137 138 bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc) 139 { 140 struct drm_device *drm_dev = crtc->dev; 141 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 142 bool ret = false; 143 144 spin_lock_irq(&drm_dev->event_lock); 145 ret = acrtc->dm_irq_params.crc_window.activated; 146 spin_unlock_irq(&drm_dev->event_lock); 147 148 return ret; 149 } 150 #endif 151 152 int 153 amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name, 154 size_t *values_cnt) 155 { 156 enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name); 157 158 if (source < 0) { 159 DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n", 160 src_name, crtc->index); 161 return -EINVAL; 162 } 163 164 *values_cnt = 3; 165 return 0; 166 } 167 168 int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, 169 struct dm_crtc_state *dm_crtc_state, 170 enum amdgpu_dm_pipe_crc_source source) 171 { 172 struct amdgpu_device *adev = drm_to_adev(crtc->dev); 173 struct dc_stream_state *stream_state = dm_crtc_state->stream; 174 bool enable = amdgpu_dm_is_valid_crc_source(source); 175 int ret = 0; 176 177 /* Configuration will be deferred to stream enable. */ 178 if (!stream_state) 179 return -EINVAL; 180 181 mutex_lock(&adev->dm.dc_lock); 182 183 /* Enable CRTC CRC generation if necessary. */ 184 if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { 185 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 186 if (!enable) { 187 if (adev->dm.crc_rd_wrk) { 188 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); 189 spin_lock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock); 190 if (adev->dm.crc_rd_wrk->crtc == crtc) { 191 dc_stream_stop_dmcu_crc_win_update(stream_state->ctx->dc, 192 dm_crtc_state->stream); 193 adev->dm.crc_rd_wrk->crtc = NULL; 194 } 195 spin_unlock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock); 196 } 197 } 198 #endif 199 if (!dc_stream_configure_crc(stream_state->ctx->dc, 200 stream_state, NULL, enable, enable)) { 201 ret = -EINVAL; 202 goto unlock; 203 } 204 } 205 206 /* Configure dithering */ 207 if (!dm_need_crc_dither(source)) { 208 dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8); 209 dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, 210 DYN_EXPANSION_DISABLE); 211 } else { 212 dc_stream_set_dither_option(stream_state, 213 DITHER_OPTION_DEFAULT); 214 dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, 215 DYN_EXPANSION_AUTO); 216 } 217 218 unlock: 219 mutex_unlock(&adev->dm.dc_lock); 220 221 return ret; 222 } 223 224 int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) 225 { 226 enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name); 227 enum amdgpu_dm_pipe_crc_source cur_crc_src; 228 struct drm_crtc_commit *commit; 229 struct dm_crtc_state *crtc_state; 230 struct drm_device *drm_dev = crtc->dev; 231 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 232 struct drm_dp_aux *aux = NULL; 233 bool enable = false; 234 bool enabled = false; 235 int ret = 0; 236 237 if (source < 0) { 238 DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n", 239 src_name, crtc->index); 240 return -EINVAL; 241 } 242 243 ret = drm_modeset_lock(&crtc->mutex, NULL); 244 if (ret) 245 return ret; 246 247 spin_lock(&crtc->commit_lock); 248 commit = list_first_entry_or_null(&crtc->commit_list, 249 struct drm_crtc_commit, commit_entry); 250 if (commit) 251 drm_crtc_commit_get(commit); 252 spin_unlock(&crtc->commit_lock); 253 254 if (commit) { 255 /* 256 * Need to wait for all outstanding programming to complete 257 * in commit tail since it can modify CRC related fields and 258 * hardware state. Since we're holding the CRTC lock we're 259 * guaranteed that no other commit work can be queued off 260 * before we modify the state below. 261 */ 262 ret = wait_for_completion_interruptible_timeout( 263 &commit->hw_done, 10 * HZ); 264 if (ret) 265 goto cleanup; 266 } 267 268 enable = amdgpu_dm_is_valid_crc_source(source); 269 crtc_state = to_dm_crtc_state(crtc->state); 270 spin_lock_irq(&drm_dev->event_lock); 271 cur_crc_src = acrtc->dm_irq_params.crc_src; 272 spin_unlock_irq(&drm_dev->event_lock); 273 274 /* 275 * USER REQ SRC | CURRENT SRC | BEHAVIOR 276 * ----------------------------- 277 * None | None | Do nothing 278 * None | CRTC | Disable CRTC CRC, set default to dither 279 * None | DPRX | Disable DPRX CRC, need 'aux', set default to dither 280 * None | CRTC DITHER | Disable CRTC CRC 281 * None | DPRX DITHER | Disable DPRX CRC, need 'aux' 282 * CRTC | XXXX | Enable CRTC CRC, no dither 283 * DPRX | XXXX | Enable DPRX CRC, need 'aux', no dither 284 * CRTC DITHER | XXXX | Enable CRTC CRC, set dither 285 * DPRX DITHER | XXXX | Enable DPRX CRC, need 'aux', set dither 286 */ 287 if (dm_is_crc_source_dprx(source) || 288 (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE && 289 dm_is_crc_source_dprx(cur_crc_src))) { 290 struct amdgpu_dm_connector *aconn = NULL; 291 struct drm_connector *connector; 292 struct drm_connector_list_iter conn_iter; 293 294 drm_connector_list_iter_begin(crtc->dev, &conn_iter); 295 drm_for_each_connector_iter(connector, &conn_iter) { 296 if (!connector->state || connector->state->crtc != crtc) 297 continue; 298 299 aconn = to_amdgpu_dm_connector(connector); 300 break; 301 } 302 drm_connector_list_iter_end(&conn_iter); 303 304 if (!aconn) { 305 DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index); 306 ret = -EINVAL; 307 goto cleanup; 308 } 309 310 aux = (aconn->port) ? &aconn->port->aux : &aconn->dm_dp_aux.aux; 311 312 if (!aux) { 313 DRM_DEBUG_DRIVER("No dp aux for amd connector\n"); 314 ret = -EINVAL; 315 goto cleanup; 316 } 317 } 318 319 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 320 amdgpu_dm_set_crc_window_default(crtc); 321 #endif 322 323 if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) { 324 ret = -EINVAL; 325 goto cleanup; 326 } 327 328 /* 329 * Reading the CRC requires the vblank interrupt handler to be 330 * enabled. Keep a reference until CRC capture stops. 331 */ 332 enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src); 333 if (!enabled && enable) { 334 ret = drm_crtc_vblank_get(crtc); 335 if (ret) 336 goto cleanup; 337 338 if (dm_is_crc_source_dprx(source)) { 339 if (drm_dp_start_crc(aux, crtc)) { 340 DRM_DEBUG_DRIVER("dp start crc failed\n"); 341 ret = -EINVAL; 342 goto cleanup; 343 } 344 } 345 } else if (enabled && !enable) { 346 drm_crtc_vblank_put(crtc); 347 if (dm_is_crc_source_dprx(source)) { 348 if (drm_dp_stop_crc(aux)) { 349 DRM_DEBUG_DRIVER("dp stop crc failed\n"); 350 ret = -EINVAL; 351 goto cleanup; 352 } 353 } 354 } 355 356 spin_lock_irq(&drm_dev->event_lock); 357 acrtc->dm_irq_params.crc_src = source; 358 spin_unlock_irq(&drm_dev->event_lock); 359 360 /* Reset crc_skipped on dm state */ 361 crtc_state->crc_skip_count = 0; 362 363 cleanup: 364 if (commit) 365 drm_crtc_commit_put(commit); 366 367 drm_modeset_unlock(&crtc->mutex); 368 369 return ret; 370 } 371 372 /** 373 * amdgpu_dm_crtc_handle_crc_irq: Report to DRM the CRC on given CRTC. 374 * @crtc: DRM CRTC object. 375 * 376 * This function should be called at the end of a vblank, when the fb has been 377 * fully processed through the pipe. 378 */ 379 void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) 380 { 381 struct dm_crtc_state *crtc_state; 382 struct dc_stream_state *stream_state; 383 struct drm_device *drm_dev = NULL; 384 enum amdgpu_dm_pipe_crc_source cur_crc_src; 385 struct amdgpu_crtc *acrtc = NULL; 386 uint32_t crcs[3]; 387 unsigned long flags; 388 389 if (crtc == NULL) 390 return; 391 392 crtc_state = to_dm_crtc_state(crtc->state); 393 stream_state = crtc_state->stream; 394 acrtc = to_amdgpu_crtc(crtc); 395 drm_dev = crtc->dev; 396 397 spin_lock_irqsave(&drm_dev->event_lock, flags); 398 cur_crc_src = acrtc->dm_irq_params.crc_src; 399 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 400 401 /* Early return if CRC capture is not enabled. */ 402 if (!amdgpu_dm_is_valid_crc_source(cur_crc_src)) 403 return; 404 405 /* 406 * Since flipping and crc enablement happen asynchronously, we - more 407 * often than not - will be returning an 'uncooked' crc on first frame. 408 * Probably because hw isn't ready yet. For added security, skip the 409 * first two CRC values. 410 */ 411 if (crtc_state->crc_skip_count < 2) { 412 crtc_state->crc_skip_count += 1; 413 return; 414 } 415 416 if (dm_is_crc_source_crtc(cur_crc_src)) { 417 if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, 418 &crcs[0], &crcs[1], &crcs[2])) 419 return; 420 421 drm_crtc_add_crc_entry(crtc, true, 422 drm_crtc_accurate_vblank_count(crtc), crcs); 423 } 424 } 425 426 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 427 void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc) 428 { 429 struct dc_stream_state *stream_state; 430 struct drm_device *drm_dev = NULL; 431 enum amdgpu_dm_pipe_crc_source cur_crc_src; 432 struct amdgpu_crtc *acrtc = NULL; 433 struct amdgpu_device *adev = NULL; 434 struct crc_rd_work *crc_rd_wrk = NULL; 435 struct crc_params *crc_window = NULL, tmp_window; 436 unsigned long flags1, flags2; 437 struct crtc_position position; 438 uint32_t v_blank; 439 uint32_t v_back_porch; 440 uint32_t crc_window_latch_up_line; 441 struct dc_crtc_timing *timing_out; 442 443 if (crtc == NULL) 444 return; 445 446 acrtc = to_amdgpu_crtc(crtc); 447 adev = drm_to_adev(crtc->dev); 448 drm_dev = crtc->dev; 449 450 spin_lock_irqsave(&drm_dev->event_lock, flags1); 451 stream_state = acrtc->dm_irq_params.stream; 452 cur_crc_src = acrtc->dm_irq_params.crc_src; 453 timing_out = &stream_state->timing; 454 455 /* Early return if CRC capture is not enabled. */ 456 if (!amdgpu_dm_is_valid_crc_source(cur_crc_src)) 457 goto cleanup; 458 459 if (dm_is_crc_source_crtc(cur_crc_src)) { 460 if (acrtc->dm_irq_params.crc_window.activated) { 461 if (acrtc->dm_irq_params.crc_window.update_win) { 462 if (acrtc->dm_irq_params.crc_window.skip_frame_cnt) { 463 acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1; 464 goto cleanup; 465 } 466 crc_window = &tmp_window; 467 468 tmp_window.windowa_x_start = 469 acrtc->dm_irq_params.crc_window.x_start; 470 tmp_window.windowa_y_start = 471 acrtc->dm_irq_params.crc_window.y_start; 472 tmp_window.windowa_x_end = 473 acrtc->dm_irq_params.crc_window.x_end; 474 tmp_window.windowa_y_end = 475 acrtc->dm_irq_params.crc_window.y_end; 476 tmp_window.windowb_x_start = 477 acrtc->dm_irq_params.crc_window.x_start; 478 tmp_window.windowb_y_start = 479 acrtc->dm_irq_params.crc_window.y_start; 480 tmp_window.windowb_x_end = 481 acrtc->dm_irq_params.crc_window.x_end; 482 tmp_window.windowb_y_end = 483 acrtc->dm_irq_params.crc_window.y_end; 484 485 dc_stream_forward_dmcu_crc_window(stream_state->ctx->dc, 486 stream_state, crc_window); 487 488 acrtc->dm_irq_params.crc_window.update_win = false; 489 490 dc_stream_get_crtc_position(stream_state->ctx->dc, &stream_state, 1, 491 &position.vertical_count, 492 &position.nominal_vcount); 493 494 v_blank = timing_out->v_total - timing_out->v_border_top - 495 timing_out->v_addressable - timing_out->v_border_bottom; 496 497 v_back_porch = v_blank - timing_out->v_front_porch - 498 timing_out->v_sync_width; 499 500 crc_window_latch_up_line = v_back_porch + timing_out->v_sync_width; 501 502 /* take 3 lines margin*/ 503 if ((position.vertical_count + 3) >= crc_window_latch_up_line) 504 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 1; 505 else 506 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; 507 } else { 508 if (acrtc->dm_irq_params.crc_window.skip_frame_cnt == 0) { 509 if (adev->dm.crc_rd_wrk) { 510 crc_rd_wrk = adev->dm.crc_rd_wrk; 511 spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2); 512 crc_rd_wrk->phy_inst = 513 stream_state->link->link_enc_hw_inst; 514 spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2); 515 schedule_work(&crc_rd_wrk->notify_ta_work); 516 } 517 } else { 518 acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1; 519 } 520 } 521 } 522 } 523 524 cleanup: 525 spin_unlock_irqrestore(&drm_dev->event_lock, flags1); 526 } 527 528 struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void) 529 { 530 struct crc_rd_work *crc_rd_wrk = NULL; 531 532 crc_rd_wrk = kzalloc(sizeof(*crc_rd_wrk), GFP_KERNEL); 533 534 if (!crc_rd_wrk) 535 return NULL; 536 537 spin_lock_init(&crc_rd_wrk->crc_rd_work_lock); 538 INIT_WORK(&crc_rd_wrk->notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); 539 540 return crc_rd_wrk; 541 } 542 #endif 543