1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright (C) 2017 Google, Inc. 4 * Copyright _ 2017-2019, Intel Corporation. 5 * 6 * Authors: 7 * Sean Paul <seanpaul@chromium.org> 8 * Ramalingam C <ramalingam.c@intel.com> 9 */ 10 11 #include <linux/component.h> 12 #include <linux/i2c.h> 13 #include <linux/random.h> 14 15 #include <drm/display/drm_hdcp_helper.h> 16 #include <drm/i915_component.h> 17 18 #include "i915_drv.h" 19 #include "i915_reg.h" 20 #include "intel_connector.h" 21 #include "intel_de.h" 22 #include "intel_display_power.h" 23 #include "intel_display_power_well.h" 24 #include "intel_display_types.h" 25 #include "intel_hdcp.h" 26 #include "intel_hdcp_gsc.h" 27 #include "intel_hdcp_regs.h" 28 #include "intel_pcode.h" 29 30 #define KEY_LOAD_TRIES 5 31 #define HDCP2_LC_RETRY_CNT 3 32 33 static int intel_conn_to_vcpi(struct drm_atomic_state *state, 34 struct intel_connector *connector) 35 { 36 struct drm_dp_mst_topology_mgr *mgr; 37 struct drm_dp_mst_atomic_payload *payload; 38 struct drm_dp_mst_topology_state *mst_state; 39 int vcpi = 0; 40 41 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */ 42 if (!connector->port) 43 return 0; 44 mgr = connector->port->mgr; 45 46 drm_modeset_lock(&mgr->base.lock, state->acquire_ctx); 47 mst_state = to_drm_dp_mst_topology_state(mgr->base.state); 48 payload = drm_atomic_get_mst_payload_state(mst_state, connector->port); 49 if (drm_WARN_ON(mgr->dev, !payload)) 50 goto out; 51 52 vcpi = payload->vcpi; 53 if (drm_WARN_ON(mgr->dev, vcpi < 0)) { 54 vcpi = 0; 55 goto out; 56 } 57 out: 58 return vcpi; 59 } 60 61 /* 62 * intel_hdcp_required_content_stream selects the most highest common possible HDCP 63 * content_type for all streams in DP MST topology because security f/w doesn't 64 * have any provision to mark content_type for each stream separately, it marks 65 * all available streams with the content_type proivided at the time of port 66 * authentication. This may prohibit the userspace to use type1 content on 67 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in 68 * DP MST topology. Though it is not compulsory, security fw should change its 69 * policy to mark different content_types for different streams. 70 */ 71 static void 72 intel_hdcp_required_content_stream(struct intel_digital_port *dig_port) 73 { 74 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 75 bool enforce_type0 = false; 76 int k; 77 78 if (dig_port->hdcp_auth_status) 79 return; 80 81 if (!dig_port->hdcp_mst_type1_capable) 82 enforce_type0 = true; 83 84 /* 85 * Apply common protection level across all streams in DP MST Topology. 86 * Use highest supported content type for all streams in DP MST Topology. 87 */ 88 for (k = 0; k < data->k; k++) 89 data->streams[k].stream_type = 90 enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1; 91 } 92 93 static void intel_hdcp_prepare_streams(struct intel_connector *connector) 94 { 95 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 96 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 97 struct intel_hdcp *hdcp = &connector->hdcp; 98 99 if (!intel_encoder_is_mst(intel_attached_encoder(connector))) { 100 data->streams[0].stream_type = hdcp->content_type; 101 } else { 102 intel_hdcp_required_content_stream(dig_port); 103 } 104 } 105 106 static 107 bool intel_hdcp_is_ksv_valid(u8 *ksv) 108 { 109 int i, ones = 0; 110 /* KSV has 20 1's and 20 0's */ 111 for (i = 0; i < DRM_HDCP_KSV_LEN; i++) 112 ones += hweight8(ksv[i]); 113 if (ones != 20) 114 return false; 115 116 return true; 117 } 118 119 static 120 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, 121 const struct intel_hdcp_shim *shim, u8 *bksv) 122 { 123 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 124 int ret, i, tries = 2; 125 126 /* HDCP spec states that we must retry the bksv if it is invalid */ 127 for (i = 0; i < tries; i++) { 128 ret = shim->read_bksv(dig_port, bksv); 129 if (ret) 130 return ret; 131 if (intel_hdcp_is_ksv_valid(bksv)) 132 break; 133 } 134 if (i == tries) { 135 drm_dbg_kms(&i915->drm, "Bksv is invalid\n"); 136 return -ENODEV; 137 } 138 139 return 0; 140 } 141 142 /* Is HDCP1.4 capable on Platform and Sink */ 143 bool intel_hdcp_capable(struct intel_connector *connector) 144 { 145 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 146 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 147 bool capable = false; 148 u8 bksv[5]; 149 150 if (!shim) 151 return capable; 152 153 if (shim->hdcp_capable) { 154 shim->hdcp_capable(dig_port, &capable); 155 } else { 156 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv)) 157 capable = true; 158 } 159 160 return capable; 161 } 162 163 /* Is HDCP2.2 capable on Platform and Sink */ 164 bool intel_hdcp2_capable(struct intel_connector *connector) 165 { 166 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 167 struct drm_i915_private *i915 = to_i915(connector->base.dev); 168 struct intel_hdcp *hdcp = &connector->hdcp; 169 bool capable = false; 170 171 /* I915 support for HDCP2.2 */ 172 if (!hdcp->hdcp2_supported) 173 return false; 174 175 /* If MTL+ make sure gsc is loaded and proxy is setup */ 176 if (intel_hdcp_gsc_cs_required(i915)) { 177 struct intel_gt *gt = i915->media_gt; 178 struct intel_gsc_uc *gsc = gt ? >->uc.gsc : NULL; 179 180 if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) 181 return false; 182 } 183 184 /* MEI/GSC interface is solid depending on which is used */ 185 mutex_lock(&i915->display.hdcp.hdcp_mutex); 186 if (!i915->display.hdcp.comp_added || !i915->display.hdcp.arbiter) { 187 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 188 return false; 189 } 190 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 191 192 /* Sink's capability for HDCP2.2 */ 193 hdcp->shim->hdcp_2_2_capable(dig_port, &capable); 194 195 return capable; 196 } 197 198 static bool intel_hdcp_in_use(struct drm_i915_private *i915, 199 enum transcoder cpu_transcoder, enum port port) 200 { 201 return intel_de_read(i915, 202 HDCP_STATUS(i915, cpu_transcoder, port)) & 203 HDCP_STATUS_ENC; 204 } 205 206 static bool intel_hdcp2_in_use(struct drm_i915_private *i915, 207 enum transcoder cpu_transcoder, enum port port) 208 { 209 return intel_de_read(i915, 210 HDCP2_STATUS(i915, cpu_transcoder, port)) & 211 LINK_ENCRYPTION_STATUS; 212 } 213 214 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, 215 const struct intel_hdcp_shim *shim) 216 { 217 int ret, read_ret; 218 bool ksv_ready; 219 220 /* Poll for ksv list ready (spec says max time allowed is 5s) */ 221 ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port, 222 &ksv_ready), 223 read_ret || ksv_ready, 5 * 1000 * 1000, 1000, 224 100 * 1000); 225 if (ret) 226 return ret; 227 if (read_ret) 228 return read_ret; 229 if (!ksv_ready) 230 return -ETIMEDOUT; 231 232 return 0; 233 } 234 235 static bool hdcp_key_loadable(struct drm_i915_private *i915) 236 { 237 enum i915_power_well_id id; 238 intel_wakeref_t wakeref; 239 bool enabled = false; 240 241 /* 242 * On HSW and BDW, Display HW loads the Key as soon as Display resumes. 243 * On all BXT+, SW can load the keys only when the PW#1 is turned on. 244 */ 245 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 246 id = HSW_DISP_PW_GLOBAL; 247 else 248 id = SKL_DISP_PW_1; 249 250 /* PG1 (power well #1) needs to be enabled */ 251 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 252 enabled = intel_display_power_well_is_enabled(i915, id); 253 254 /* 255 * Another req for hdcp key loadability is enabled state of pll for 256 * cdclk. Without active crtc we wont land here. So we are assuming that 257 * cdclk is already on. 258 */ 259 260 return enabled; 261 } 262 263 static void intel_hdcp_clear_keys(struct drm_i915_private *i915) 264 { 265 intel_de_write(i915, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); 266 intel_de_write(i915, HDCP_KEY_STATUS, 267 HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); 268 } 269 270 static int intel_hdcp_load_keys(struct drm_i915_private *i915) 271 { 272 int ret; 273 u32 val; 274 275 val = intel_de_read(i915, HDCP_KEY_STATUS); 276 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) 277 return 0; 278 279 /* 280 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes 281 * out of reset. So if Key is not already loaded, its an error state. 282 */ 283 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 284 if (!(intel_de_read(i915, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) 285 return -ENXIO; 286 287 /* 288 * Initiate loading the HDCP key from fuses. 289 * 290 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display 291 * version 9 platforms (minus BXT) differ in the key load trigger 292 * process from other platforms. These platforms use the GT Driver 293 * Mailbox interface. 294 */ 295 if (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)) { 296 ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1); 297 if (ret) { 298 drm_err(&i915->drm, 299 "Failed to initiate HDCP key load (%d)\n", 300 ret); 301 return ret; 302 } 303 } else { 304 intel_de_write(i915, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); 305 } 306 307 /* Wait for the keys to load (500us) */ 308 ret = __intel_wait_for_register(&i915->uncore, HDCP_KEY_STATUS, 309 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, 310 10, 1, &val); 311 if (ret) 312 return ret; 313 else if (!(val & HDCP_KEY_LOAD_STATUS)) 314 return -ENXIO; 315 316 /* Send Aksv over to PCH display for use in authentication */ 317 intel_de_write(i915, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); 318 319 return 0; 320 } 321 322 /* Returns updated SHA-1 index */ 323 static int intel_write_sha_text(struct drm_i915_private *i915, u32 sha_text) 324 { 325 intel_de_write(i915, HDCP_SHA_TEXT, sha_text); 326 if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { 327 drm_err(&i915->drm, "Timed out waiting for SHA1 ready\n"); 328 return -ETIMEDOUT; 329 } 330 return 0; 331 } 332 333 static 334 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915, 335 enum transcoder cpu_transcoder, enum port port) 336 { 337 if (DISPLAY_VER(i915) >= 12) { 338 switch (cpu_transcoder) { 339 case TRANSCODER_A: 340 return HDCP_TRANSA_REP_PRESENT | 341 HDCP_TRANSA_SHA1_M0; 342 case TRANSCODER_B: 343 return HDCP_TRANSB_REP_PRESENT | 344 HDCP_TRANSB_SHA1_M0; 345 case TRANSCODER_C: 346 return HDCP_TRANSC_REP_PRESENT | 347 HDCP_TRANSC_SHA1_M0; 348 case TRANSCODER_D: 349 return HDCP_TRANSD_REP_PRESENT | 350 HDCP_TRANSD_SHA1_M0; 351 default: 352 drm_err(&i915->drm, "Unknown transcoder %d\n", 353 cpu_transcoder); 354 return -EINVAL; 355 } 356 } 357 358 switch (port) { 359 case PORT_A: 360 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; 361 case PORT_B: 362 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0; 363 case PORT_C: 364 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0; 365 case PORT_D: 366 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0; 367 case PORT_E: 368 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; 369 default: 370 drm_err(&i915->drm, "Unknown port %d\n", port); 371 return -EINVAL; 372 } 373 } 374 375 static 376 int intel_hdcp_validate_v_prime(struct intel_connector *connector, 377 const struct intel_hdcp_shim *shim, 378 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) 379 { 380 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 381 struct drm_i915_private *i915 = to_i915(connector->base.dev); 382 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 383 enum port port = dig_port->base.port; 384 u32 vprime, sha_text, sha_leftovers, rep_ctl; 385 int ret, i, j, sha_idx; 386 387 /* Process V' values from the receiver */ 388 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { 389 ret = shim->read_v_prime_part(dig_port, i, &vprime); 390 if (ret) 391 return ret; 392 intel_de_write(i915, HDCP_SHA_V_PRIME(i), vprime); 393 } 394 395 /* 396 * We need to write the concatenation of all device KSVs, BINFO (DP) || 397 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte 398 * stream is written via the HDCP_SHA_TEXT register in 32-bit 399 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This 400 * index will keep track of our progress through the 64 bytes as well as 401 * helping us work the 40-bit KSVs through our 32-bit register. 402 * 403 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian 404 */ 405 sha_idx = 0; 406 sha_text = 0; 407 sha_leftovers = 0; 408 rep_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port); 409 intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 410 for (i = 0; i < num_downstream; i++) { 411 unsigned int sha_empty; 412 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; 413 414 /* Fill up the empty slots in sha_text and write it out */ 415 sha_empty = sizeof(sha_text) - sha_leftovers; 416 for (j = 0; j < sha_empty; j++) { 417 u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8); 418 sha_text |= ksv[j] << off; 419 } 420 421 ret = intel_write_sha_text(i915, sha_text); 422 if (ret < 0) 423 return ret; 424 425 /* Programming guide writes this every 64 bytes */ 426 sha_idx += sizeof(sha_text); 427 if (!(sha_idx % 64)) 428 intel_de_write(i915, HDCP_REP_CTL, 429 rep_ctl | HDCP_SHA1_TEXT_32); 430 431 /* Store the leftover bytes from the ksv in sha_text */ 432 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty; 433 sha_text = 0; 434 for (j = 0; j < sha_leftovers; j++) 435 sha_text |= ksv[sha_empty + j] << 436 ((sizeof(sha_text) - j - 1) * 8); 437 438 /* 439 * If we still have room in sha_text for more data, continue. 440 * Otherwise, write it out immediately. 441 */ 442 if (sizeof(sha_text) > sha_leftovers) 443 continue; 444 445 ret = intel_write_sha_text(i915, sha_text); 446 if (ret < 0) 447 return ret; 448 sha_leftovers = 0; 449 sha_text = 0; 450 sha_idx += sizeof(sha_text); 451 } 452 453 /* 454 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many 455 * bytes are leftover from the last ksv, we might be able to fit them 456 * all in sha_text (first 2 cases), or we might need to split them up 457 * into 2 writes (last 2 cases). 458 */ 459 if (sha_leftovers == 0) { 460 /* Write 16 bits of text, 16 bits of M0 */ 461 intel_de_write(i915, HDCP_REP_CTL, 462 rep_ctl | HDCP_SHA1_TEXT_16); 463 ret = intel_write_sha_text(i915, 464 bstatus[0] << 8 | bstatus[1]); 465 if (ret < 0) 466 return ret; 467 sha_idx += sizeof(sha_text); 468 469 /* Write 32 bits of M0 */ 470 intel_de_write(i915, HDCP_REP_CTL, 471 rep_ctl | HDCP_SHA1_TEXT_0); 472 ret = intel_write_sha_text(i915, 0); 473 if (ret < 0) 474 return ret; 475 sha_idx += sizeof(sha_text); 476 477 /* Write 16 bits of M0 */ 478 intel_de_write(i915, HDCP_REP_CTL, 479 rep_ctl | HDCP_SHA1_TEXT_16); 480 ret = intel_write_sha_text(i915, 0); 481 if (ret < 0) 482 return ret; 483 sha_idx += sizeof(sha_text); 484 485 } else if (sha_leftovers == 1) { 486 /* Write 24 bits of text, 8 bits of M0 */ 487 intel_de_write(i915, HDCP_REP_CTL, 488 rep_ctl | HDCP_SHA1_TEXT_24); 489 sha_text |= bstatus[0] << 16 | bstatus[1] << 8; 490 /* Only 24-bits of data, must be in the LSB */ 491 sha_text = (sha_text & 0xffffff00) >> 8; 492 ret = intel_write_sha_text(i915, sha_text); 493 if (ret < 0) 494 return ret; 495 sha_idx += sizeof(sha_text); 496 497 /* Write 32 bits of M0 */ 498 intel_de_write(i915, HDCP_REP_CTL, 499 rep_ctl | HDCP_SHA1_TEXT_0); 500 ret = intel_write_sha_text(i915, 0); 501 if (ret < 0) 502 return ret; 503 sha_idx += sizeof(sha_text); 504 505 /* Write 24 bits of M0 */ 506 intel_de_write(i915, HDCP_REP_CTL, 507 rep_ctl | HDCP_SHA1_TEXT_8); 508 ret = intel_write_sha_text(i915, 0); 509 if (ret < 0) 510 return ret; 511 sha_idx += sizeof(sha_text); 512 513 } else if (sha_leftovers == 2) { 514 /* Write 32 bits of text */ 515 intel_de_write(i915, HDCP_REP_CTL, 516 rep_ctl | HDCP_SHA1_TEXT_32); 517 sha_text |= bstatus[0] << 8 | bstatus[1]; 518 ret = intel_write_sha_text(i915, sha_text); 519 if (ret < 0) 520 return ret; 521 sha_idx += sizeof(sha_text); 522 523 /* Write 64 bits of M0 */ 524 intel_de_write(i915, HDCP_REP_CTL, 525 rep_ctl | HDCP_SHA1_TEXT_0); 526 for (i = 0; i < 2; i++) { 527 ret = intel_write_sha_text(i915, 0); 528 if (ret < 0) 529 return ret; 530 sha_idx += sizeof(sha_text); 531 } 532 533 /* 534 * Terminate the SHA-1 stream by hand. For the other leftover 535 * cases this is appended by the hardware. 536 */ 537 intel_de_write(i915, HDCP_REP_CTL, 538 rep_ctl | HDCP_SHA1_TEXT_32); 539 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24; 540 ret = intel_write_sha_text(i915, sha_text); 541 if (ret < 0) 542 return ret; 543 sha_idx += sizeof(sha_text); 544 } else if (sha_leftovers == 3) { 545 /* Write 32 bits of text (filled from LSB) */ 546 intel_de_write(i915, HDCP_REP_CTL, 547 rep_ctl | HDCP_SHA1_TEXT_32); 548 sha_text |= bstatus[0]; 549 ret = intel_write_sha_text(i915, sha_text); 550 if (ret < 0) 551 return ret; 552 sha_idx += sizeof(sha_text); 553 554 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */ 555 intel_de_write(i915, HDCP_REP_CTL, 556 rep_ctl | HDCP_SHA1_TEXT_8); 557 ret = intel_write_sha_text(i915, bstatus[1]); 558 if (ret < 0) 559 return ret; 560 sha_idx += sizeof(sha_text); 561 562 /* Write 32 bits of M0 */ 563 intel_de_write(i915, HDCP_REP_CTL, 564 rep_ctl | HDCP_SHA1_TEXT_0); 565 ret = intel_write_sha_text(i915, 0); 566 if (ret < 0) 567 return ret; 568 sha_idx += sizeof(sha_text); 569 570 /* Write 8 bits of M0 */ 571 intel_de_write(i915, HDCP_REP_CTL, 572 rep_ctl | HDCP_SHA1_TEXT_24); 573 ret = intel_write_sha_text(i915, 0); 574 if (ret < 0) 575 return ret; 576 sha_idx += sizeof(sha_text); 577 } else { 578 drm_dbg_kms(&i915->drm, "Invalid number of leftovers %d\n", 579 sha_leftovers); 580 return -EINVAL; 581 } 582 583 intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 584 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ 585 while ((sha_idx % 64) < (64 - sizeof(sha_text))) { 586 ret = intel_write_sha_text(i915, 0); 587 if (ret < 0) 588 return ret; 589 sha_idx += sizeof(sha_text); 590 } 591 592 /* 593 * Last write gets the length of the concatenation in bits. That is: 594 * - 5 bytes per device 595 * - 10 bytes for BINFO/BSTATUS(2), M0(8) 596 */ 597 sha_text = (num_downstream * 5 + 10) * 8; 598 ret = intel_write_sha_text(i915, sha_text); 599 if (ret < 0) 600 return ret; 601 602 /* Tell the HW we're done with the hash and wait for it to ACK */ 603 intel_de_write(i915, HDCP_REP_CTL, 604 rep_ctl | HDCP_SHA1_COMPLETE_HASH); 605 if (intel_de_wait_for_set(i915, HDCP_REP_CTL, 606 HDCP_SHA1_COMPLETE, 1)) { 607 drm_err(&i915->drm, "Timed out waiting for SHA1 complete\n"); 608 return -ETIMEDOUT; 609 } 610 if (!(intel_de_read(i915, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { 611 drm_dbg_kms(&i915->drm, "SHA-1 mismatch, HDCP failed\n"); 612 return -ENXIO; 613 } 614 615 return 0; 616 } 617 618 /* Implements Part 2 of the HDCP authorization procedure */ 619 static 620 int intel_hdcp_auth_downstream(struct intel_connector *connector) 621 { 622 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 623 struct drm_i915_private *i915 = to_i915(connector->base.dev); 624 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 625 u8 bstatus[2], num_downstream, *ksv_fifo; 626 int ret, i, tries = 3; 627 628 ret = intel_hdcp_poll_ksv_fifo(dig_port, shim); 629 if (ret) { 630 drm_dbg_kms(&i915->drm, 631 "KSV list failed to become ready (%d)\n", ret); 632 return ret; 633 } 634 635 ret = shim->read_bstatus(dig_port, bstatus); 636 if (ret) 637 return ret; 638 639 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || 640 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { 641 drm_dbg_kms(&i915->drm, "Max Topology Limit Exceeded\n"); 642 return -EPERM; 643 } 644 645 /* 646 * When repeater reports 0 device count, HDCP1.4 spec allows disabling 647 * the HDCP encryption. That implies that repeater can't have its own 648 * display. As there is no consumption of encrypted content in the 649 * repeater with 0 downstream devices, we are failing the 650 * authentication. 651 */ 652 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); 653 if (num_downstream == 0) { 654 drm_dbg_kms(&i915->drm, 655 "Repeater with zero downstream devices\n"); 656 return -EINVAL; 657 } 658 659 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); 660 if (!ksv_fifo) { 661 drm_dbg_kms(&i915->drm, "Out of mem: ksv_fifo\n"); 662 return -ENOMEM; 663 } 664 665 ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo); 666 if (ret) 667 goto err; 668 669 if (drm_hdcp_check_ksvs_revoked(&i915->drm, ksv_fifo, 670 num_downstream) > 0) { 671 drm_err(&i915->drm, "Revoked Ksv(s) in ksv_fifo\n"); 672 ret = -EPERM; 673 goto err; 674 } 675 676 /* 677 * When V prime mismatches, DP Spec mandates re-read of 678 * V prime atleast twice. 679 */ 680 for (i = 0; i < tries; i++) { 681 ret = intel_hdcp_validate_v_prime(connector, shim, 682 ksv_fifo, num_downstream, 683 bstatus); 684 if (!ret) 685 break; 686 } 687 688 if (i == tries) { 689 drm_dbg_kms(&i915->drm, 690 "V Prime validation failed.(%d)\n", ret); 691 goto err; 692 } 693 694 drm_dbg_kms(&i915->drm, "HDCP is enabled (%d downstream devices)\n", 695 num_downstream); 696 ret = 0; 697 err: 698 kfree(ksv_fifo); 699 return ret; 700 } 701 702 /* Implements Part 1 of the HDCP authorization procedure */ 703 static int intel_hdcp_auth(struct intel_connector *connector) 704 { 705 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 706 struct drm_i915_private *i915 = to_i915(connector->base.dev); 707 struct intel_hdcp *hdcp = &connector->hdcp; 708 const struct intel_hdcp_shim *shim = hdcp->shim; 709 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 710 enum port port = dig_port->base.port; 711 unsigned long r0_prime_gen_start; 712 int ret, i, tries = 2; 713 union { 714 u32 reg[2]; 715 u8 shim[DRM_HDCP_AN_LEN]; 716 } an; 717 union { 718 u32 reg[2]; 719 u8 shim[DRM_HDCP_KSV_LEN]; 720 } bksv; 721 union { 722 u32 reg; 723 u8 shim[DRM_HDCP_RI_LEN]; 724 } ri; 725 bool repeater_present, hdcp_capable; 726 727 /* 728 * Detects whether the display is HDCP capable. Although we check for 729 * valid Bksv below, the HDCP over DP spec requires that we check 730 * whether the display supports HDCP before we write An. For HDMI 731 * displays, this is not necessary. 732 */ 733 if (shim->hdcp_capable) { 734 ret = shim->hdcp_capable(dig_port, &hdcp_capable); 735 if (ret) 736 return ret; 737 if (!hdcp_capable) { 738 drm_dbg_kms(&i915->drm, 739 "Panel is not HDCP capable\n"); 740 return -EINVAL; 741 } 742 } 743 744 /* Initialize An with 2 random values and acquire it */ 745 for (i = 0; i < 2; i++) 746 intel_de_write(i915, 747 HDCP_ANINIT(i915, cpu_transcoder, port), 748 get_random_u32()); 749 intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 750 HDCP_CONF_CAPTURE_AN); 751 752 /* Wait for An to be acquired */ 753 if (intel_de_wait_for_set(i915, 754 HDCP_STATUS(i915, cpu_transcoder, port), 755 HDCP_STATUS_AN_READY, 1)) { 756 drm_err(&i915->drm, "Timed out waiting for An\n"); 757 return -ETIMEDOUT; 758 } 759 760 an.reg[0] = intel_de_read(i915, 761 HDCP_ANLO(i915, cpu_transcoder, port)); 762 an.reg[1] = intel_de_read(i915, 763 HDCP_ANHI(i915, cpu_transcoder, port)); 764 ret = shim->write_an_aksv(dig_port, an.shim); 765 if (ret) 766 return ret; 767 768 r0_prime_gen_start = jiffies; 769 770 memset(&bksv, 0, sizeof(bksv)); 771 772 ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim); 773 if (ret < 0) 774 return ret; 775 776 if (drm_hdcp_check_ksvs_revoked(&i915->drm, bksv.shim, 1) > 0) { 777 drm_err(&i915->drm, "BKSV is revoked\n"); 778 return -EPERM; 779 } 780 781 intel_de_write(i915, HDCP_BKSVLO(i915, cpu_transcoder, port), 782 bksv.reg[0]); 783 intel_de_write(i915, HDCP_BKSVHI(i915, cpu_transcoder, port), 784 bksv.reg[1]); 785 786 ret = shim->repeater_present(dig_port, &repeater_present); 787 if (ret) 788 return ret; 789 if (repeater_present) 790 intel_de_write(i915, HDCP_REP_CTL, 791 intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port)); 792 793 ret = shim->toggle_signalling(dig_port, cpu_transcoder, true); 794 if (ret) 795 return ret; 796 797 intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 798 HDCP_CONF_AUTH_AND_ENC); 799 800 /* Wait for R0 ready */ 801 if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & 802 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { 803 drm_err(&i915->drm, "Timed out waiting for R0 ready\n"); 804 return -ETIMEDOUT; 805 } 806 807 /* 808 * Wait for R0' to become available. The spec says 100ms from Aksv, but 809 * some monitors can take longer than this. We'll set the timeout at 810 * 300ms just to be sure. 811 * 812 * On DP, there's an R0_READY bit available but no such bit 813 * exists on HDMI. Since the upper-bound is the same, we'll just do 814 * the stupid thing instead of polling on one and not the other. 815 */ 816 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300); 817 818 tries = 3; 819 820 /* 821 * DP HDCP Spec mandates the two more reattempt to read R0, incase 822 * of R0 mismatch. 823 */ 824 for (i = 0; i < tries; i++) { 825 ri.reg = 0; 826 ret = shim->read_ri_prime(dig_port, ri.shim); 827 if (ret) 828 return ret; 829 intel_de_write(i915, 830 HDCP_RPRIME(i915, cpu_transcoder, port), 831 ri.reg); 832 833 /* Wait for Ri prime match */ 834 if (!wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & 835 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) 836 break; 837 } 838 839 if (i == tries) { 840 drm_dbg_kms(&i915->drm, 841 "Timed out waiting for Ri prime match (%x)\n", 842 intel_de_read(i915, 843 HDCP_STATUS(i915, cpu_transcoder, port))); 844 return -ETIMEDOUT; 845 } 846 847 /* Wait for encryption confirmation */ 848 if (intel_de_wait_for_set(i915, 849 HDCP_STATUS(i915, cpu_transcoder, port), 850 HDCP_STATUS_ENC, 851 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 852 drm_err(&i915->drm, "Timed out waiting for encryption\n"); 853 return -ETIMEDOUT; 854 } 855 856 /* DP MST Auth Part 1 Step 2.a and Step 2.b */ 857 if (shim->stream_encryption) { 858 ret = shim->stream_encryption(connector, true); 859 if (ret) { 860 drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n", 861 connector->base.name, connector->base.base.id); 862 return ret; 863 } 864 drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", 865 transcoder_name(hdcp->stream_transcoder)); 866 } 867 868 if (repeater_present) 869 return intel_hdcp_auth_downstream(connector); 870 871 drm_dbg_kms(&i915->drm, "HDCP is enabled (no repeater present)\n"); 872 return 0; 873 } 874 875 static int _intel_hdcp_disable(struct intel_connector *connector) 876 { 877 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 878 struct drm_i915_private *i915 = to_i915(connector->base.dev); 879 struct intel_hdcp *hdcp = &connector->hdcp; 880 enum port port = dig_port->base.port; 881 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 882 u32 repeater_ctl; 883 int ret; 884 885 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being disabled...\n", 886 connector->base.name, connector->base.base.id); 887 888 if (hdcp->shim->stream_encryption) { 889 ret = hdcp->shim->stream_encryption(connector, false); 890 if (ret) { 891 drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n", 892 connector->base.name, connector->base.base.id); 893 return ret; 894 } 895 drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", 896 transcoder_name(hdcp->stream_transcoder)); 897 /* 898 * If there are other connectors on this port using HDCP, 899 * don't disable it until it disabled HDCP encryption for 900 * all connectors in MST topology. 901 */ 902 if (dig_port->num_hdcp_streams > 0) 903 return 0; 904 } 905 906 hdcp->hdcp_encrypted = false; 907 intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 0); 908 if (intel_de_wait_for_clear(i915, 909 HDCP_STATUS(i915, cpu_transcoder, port), 910 ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 911 drm_err(&i915->drm, 912 "Failed to disable HDCP, timeout clearing status\n"); 913 return -ETIMEDOUT; 914 } 915 916 repeater_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, 917 port); 918 intel_de_rmw(i915, HDCP_REP_CTL, repeater_ctl, 0); 919 920 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); 921 if (ret) { 922 drm_err(&i915->drm, "Failed to disable HDCP signalling\n"); 923 return ret; 924 } 925 926 drm_dbg_kms(&i915->drm, "HDCP is disabled\n"); 927 return 0; 928 } 929 930 static int _intel_hdcp_enable(struct intel_connector *connector) 931 { 932 struct drm_i915_private *i915 = to_i915(connector->base.dev); 933 struct intel_hdcp *hdcp = &connector->hdcp; 934 int i, ret, tries = 3; 935 936 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being enabled...\n", 937 connector->base.name, connector->base.base.id); 938 939 if (!hdcp_key_loadable(i915)) { 940 drm_err(&i915->drm, "HDCP key Load is not possible\n"); 941 return -ENXIO; 942 } 943 944 for (i = 0; i < KEY_LOAD_TRIES; i++) { 945 ret = intel_hdcp_load_keys(i915); 946 if (!ret) 947 break; 948 intel_hdcp_clear_keys(i915); 949 } 950 if (ret) { 951 drm_err(&i915->drm, "Could not load HDCP keys, (%d)\n", 952 ret); 953 return ret; 954 } 955 956 /* Incase of authentication failures, HDCP spec expects reauth. */ 957 for (i = 0; i < tries; i++) { 958 ret = intel_hdcp_auth(connector); 959 if (!ret) { 960 hdcp->hdcp_encrypted = true; 961 return 0; 962 } 963 964 drm_dbg_kms(&i915->drm, "HDCP Auth failure (%d)\n", ret); 965 966 /* Ensuring HDCP encryption and signalling are stopped. */ 967 _intel_hdcp_disable(connector); 968 } 969 970 drm_dbg_kms(&i915->drm, 971 "HDCP authentication failed (%d tries/%d)\n", tries, ret); 972 return ret; 973 } 974 975 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) 976 { 977 return container_of(hdcp, struct intel_connector, hdcp); 978 } 979 980 static void intel_hdcp_update_value(struct intel_connector *connector, 981 u64 value, bool update_property) 982 { 983 struct drm_device *dev = connector->base.dev; 984 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 985 struct intel_hdcp *hdcp = &connector->hdcp; 986 struct drm_i915_private *i915 = to_i915(connector->base.dev); 987 988 drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex)); 989 990 if (hdcp->value == value) 991 return; 992 993 drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex)); 994 995 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 996 if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0)) 997 dig_port->num_hdcp_streams--; 998 } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 999 dig_port->num_hdcp_streams++; 1000 } 1001 1002 hdcp->value = value; 1003 if (update_property) { 1004 drm_connector_get(&connector->base); 1005 queue_work(i915->unordered_wq, &hdcp->prop_work); 1006 } 1007 } 1008 1009 /* Implements Part 3 of the HDCP authorization procedure */ 1010 static int intel_hdcp_check_link(struct intel_connector *connector) 1011 { 1012 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1013 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1014 struct intel_hdcp *hdcp = &connector->hdcp; 1015 enum port port = dig_port->base.port; 1016 enum transcoder cpu_transcoder; 1017 int ret = 0; 1018 1019 mutex_lock(&hdcp->mutex); 1020 mutex_lock(&dig_port->hdcp_mutex); 1021 1022 cpu_transcoder = hdcp->cpu_transcoder; 1023 1024 /* Check_link valid only when HDCP1.4 is enabled */ 1025 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 1026 !hdcp->hdcp_encrypted) { 1027 ret = -EINVAL; 1028 goto out; 1029 } 1030 1031 if (drm_WARN_ON(&i915->drm, 1032 !intel_hdcp_in_use(i915, cpu_transcoder, port))) { 1033 drm_err(&i915->drm, 1034 "%s:%d HDCP link stopped encryption,%x\n", 1035 connector->base.name, connector->base.base.id, 1036 intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port))); 1037 ret = -ENXIO; 1038 intel_hdcp_update_value(connector, 1039 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1040 true); 1041 goto out; 1042 } 1043 1044 if (hdcp->shim->check_link(dig_port, connector)) { 1045 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 1046 intel_hdcp_update_value(connector, 1047 DRM_MODE_CONTENT_PROTECTION_ENABLED, true); 1048 } 1049 goto out; 1050 } 1051 1052 drm_dbg_kms(&i915->drm, 1053 "[%s:%d] HDCP link failed, retrying authentication\n", 1054 connector->base.name, connector->base.base.id); 1055 1056 ret = _intel_hdcp_disable(connector); 1057 if (ret) { 1058 drm_err(&i915->drm, "Failed to disable hdcp (%d)\n", ret); 1059 intel_hdcp_update_value(connector, 1060 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1061 true); 1062 goto out; 1063 } 1064 1065 ret = _intel_hdcp_enable(connector); 1066 if (ret) { 1067 drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret); 1068 intel_hdcp_update_value(connector, 1069 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1070 true); 1071 goto out; 1072 } 1073 1074 out: 1075 mutex_unlock(&dig_port->hdcp_mutex); 1076 mutex_unlock(&hdcp->mutex); 1077 return ret; 1078 } 1079 1080 static void intel_hdcp_prop_work(struct work_struct *work) 1081 { 1082 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, 1083 prop_work); 1084 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 1085 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1086 1087 drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL); 1088 mutex_lock(&hdcp->mutex); 1089 1090 /* 1091 * This worker is only used to flip between ENABLED/DESIRED. Either of 1092 * those to UNDESIRED is handled by core. If value == UNDESIRED, 1093 * we're running just after hdcp has been disabled, so just exit 1094 */ 1095 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 1096 drm_hdcp_update_content_protection(&connector->base, 1097 hdcp->value); 1098 1099 mutex_unlock(&hdcp->mutex); 1100 drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); 1101 1102 drm_connector_put(&connector->base); 1103 } 1104 1105 bool is_hdcp_supported(struct drm_i915_private *i915, enum port port) 1106 { 1107 return DISPLAY_RUNTIME_INFO(i915)->has_hdcp && 1108 (DISPLAY_VER(i915) >= 12 || port < PORT_E); 1109 } 1110 1111 static int 1112 hdcp2_prepare_ake_init(struct intel_connector *connector, 1113 struct hdcp2_ake_init *ake_data) 1114 { 1115 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1116 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1117 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1118 struct i915_hdcp_arbiter *arbiter; 1119 int ret; 1120 1121 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1122 arbiter = i915->display.hdcp.arbiter; 1123 1124 if (!arbiter || !arbiter->ops) { 1125 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1126 return -EINVAL; 1127 } 1128 1129 ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data); 1130 if (ret) 1131 drm_dbg_kms(&i915->drm, "Prepare_ake_init failed. %d\n", 1132 ret); 1133 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1134 1135 return ret; 1136 } 1137 1138 static int 1139 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, 1140 struct hdcp2_ake_send_cert *rx_cert, 1141 bool *paired, 1142 struct hdcp2_ake_no_stored_km *ek_pub_km, 1143 size_t *msg_sz) 1144 { 1145 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1146 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1147 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1148 struct i915_hdcp_arbiter *arbiter; 1149 int ret; 1150 1151 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1152 arbiter = i915->display.hdcp.arbiter; 1153 1154 if (!arbiter || !arbiter->ops) { 1155 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1156 return -EINVAL; 1157 } 1158 1159 ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data, 1160 rx_cert, paired, 1161 ek_pub_km, msg_sz); 1162 if (ret < 0) 1163 drm_dbg_kms(&i915->drm, "Verify rx_cert failed. %d\n", 1164 ret); 1165 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1166 1167 return ret; 1168 } 1169 1170 static int hdcp2_verify_hprime(struct intel_connector *connector, 1171 struct hdcp2_ake_send_hprime *rx_hprime) 1172 { 1173 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1174 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1175 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1176 struct i915_hdcp_arbiter *arbiter; 1177 int ret; 1178 1179 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1180 arbiter = i915->display.hdcp.arbiter; 1181 1182 if (!arbiter || !arbiter->ops) { 1183 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1184 return -EINVAL; 1185 } 1186 1187 ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime); 1188 if (ret < 0) 1189 drm_dbg_kms(&i915->drm, "Verify hprime failed. %d\n", ret); 1190 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1191 1192 return ret; 1193 } 1194 1195 static int 1196 hdcp2_store_pairing_info(struct intel_connector *connector, 1197 struct hdcp2_ake_send_pairing_info *pairing_info) 1198 { 1199 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1200 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1201 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1202 struct i915_hdcp_arbiter *arbiter; 1203 int ret; 1204 1205 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1206 arbiter = i915->display.hdcp.arbiter; 1207 1208 if (!arbiter || !arbiter->ops) { 1209 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1210 return -EINVAL; 1211 } 1212 1213 ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info); 1214 if (ret < 0) 1215 drm_dbg_kms(&i915->drm, "Store pairing info failed. %d\n", 1216 ret); 1217 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1218 1219 return ret; 1220 } 1221 1222 static int 1223 hdcp2_prepare_lc_init(struct intel_connector *connector, 1224 struct hdcp2_lc_init *lc_init) 1225 { 1226 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1227 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1228 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1229 struct i915_hdcp_arbiter *arbiter; 1230 int ret; 1231 1232 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1233 arbiter = i915->display.hdcp.arbiter; 1234 1235 if (!arbiter || !arbiter->ops) { 1236 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1237 return -EINVAL; 1238 } 1239 1240 ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init); 1241 if (ret < 0) 1242 drm_dbg_kms(&i915->drm, "Prepare lc_init failed. %d\n", 1243 ret); 1244 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1245 1246 return ret; 1247 } 1248 1249 static int 1250 hdcp2_verify_lprime(struct intel_connector *connector, 1251 struct hdcp2_lc_send_lprime *rx_lprime) 1252 { 1253 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1254 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1255 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1256 struct i915_hdcp_arbiter *arbiter; 1257 int ret; 1258 1259 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1260 arbiter = i915->display.hdcp.arbiter; 1261 1262 if (!arbiter || !arbiter->ops) { 1263 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1264 return -EINVAL; 1265 } 1266 1267 ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime); 1268 if (ret < 0) 1269 drm_dbg_kms(&i915->drm, "Verify L_Prime failed. %d\n", 1270 ret); 1271 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1272 1273 return ret; 1274 } 1275 1276 static int hdcp2_prepare_skey(struct intel_connector *connector, 1277 struct hdcp2_ske_send_eks *ske_data) 1278 { 1279 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1280 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1281 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1282 struct i915_hdcp_arbiter *arbiter; 1283 int ret; 1284 1285 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1286 arbiter = i915->display.hdcp.arbiter; 1287 1288 if (!arbiter || !arbiter->ops) { 1289 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1290 return -EINVAL; 1291 } 1292 1293 ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data); 1294 if (ret < 0) 1295 drm_dbg_kms(&i915->drm, "Get session key failed. %d\n", 1296 ret); 1297 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1298 1299 return ret; 1300 } 1301 1302 static int 1303 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, 1304 struct hdcp2_rep_send_receiverid_list 1305 *rep_topology, 1306 struct hdcp2_rep_send_ack *rep_send_ack) 1307 { 1308 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1309 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1310 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1311 struct i915_hdcp_arbiter *arbiter; 1312 int ret; 1313 1314 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1315 arbiter = i915->display.hdcp.arbiter; 1316 1317 if (!arbiter || !arbiter->ops) { 1318 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1319 return -EINVAL; 1320 } 1321 1322 ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev, 1323 data, 1324 rep_topology, 1325 rep_send_ack); 1326 if (ret < 0) 1327 drm_dbg_kms(&i915->drm, 1328 "Verify rep topology failed. %d\n", ret); 1329 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1330 1331 return ret; 1332 } 1333 1334 static int 1335 hdcp2_verify_mprime(struct intel_connector *connector, 1336 struct hdcp2_rep_stream_ready *stream_ready) 1337 { 1338 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1339 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1340 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1341 struct i915_hdcp_arbiter *arbiter; 1342 int ret; 1343 1344 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1345 arbiter = i915->display.hdcp.arbiter; 1346 1347 if (!arbiter || !arbiter->ops) { 1348 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1349 return -EINVAL; 1350 } 1351 1352 ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready); 1353 if (ret < 0) 1354 drm_dbg_kms(&i915->drm, "Verify mprime failed. %d\n", ret); 1355 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1356 1357 return ret; 1358 } 1359 1360 static int hdcp2_authenticate_port(struct intel_connector *connector) 1361 { 1362 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1363 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1364 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1365 struct i915_hdcp_arbiter *arbiter; 1366 int ret; 1367 1368 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1369 arbiter = i915->display.hdcp.arbiter; 1370 1371 if (!arbiter || !arbiter->ops) { 1372 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1373 return -EINVAL; 1374 } 1375 1376 ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data); 1377 if (ret < 0) 1378 drm_dbg_kms(&i915->drm, "Enable hdcp auth failed. %d\n", 1379 ret); 1380 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1381 1382 return ret; 1383 } 1384 1385 static int hdcp2_close_session(struct intel_connector *connector) 1386 { 1387 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1388 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1389 struct i915_hdcp_arbiter *arbiter; 1390 int ret; 1391 1392 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1393 arbiter = i915->display.hdcp.arbiter; 1394 1395 if (!arbiter || !arbiter->ops) { 1396 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1397 return -EINVAL; 1398 } 1399 1400 ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev, 1401 &dig_port->hdcp_port_data); 1402 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1403 1404 return ret; 1405 } 1406 1407 static int hdcp2_deauthenticate_port(struct intel_connector *connector) 1408 { 1409 return hdcp2_close_session(connector); 1410 } 1411 1412 /* Authentication flow starts from here */ 1413 static int hdcp2_authentication_key_exchange(struct intel_connector *connector) 1414 { 1415 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1416 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1417 struct intel_hdcp *hdcp = &connector->hdcp; 1418 union { 1419 struct hdcp2_ake_init ake_init; 1420 struct hdcp2_ake_send_cert send_cert; 1421 struct hdcp2_ake_no_stored_km no_stored_km; 1422 struct hdcp2_ake_send_hprime send_hprime; 1423 struct hdcp2_ake_send_pairing_info pairing_info; 1424 } msgs; 1425 const struct intel_hdcp_shim *shim = hdcp->shim; 1426 size_t size; 1427 int ret; 1428 1429 /* Init for seq_num */ 1430 hdcp->seq_num_v = 0; 1431 hdcp->seq_num_m = 0; 1432 1433 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init); 1434 if (ret < 0) 1435 return ret; 1436 1437 ret = shim->write_2_2_msg(dig_port, &msgs.ake_init, 1438 sizeof(msgs.ake_init)); 1439 if (ret < 0) 1440 return ret; 1441 1442 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT, 1443 &msgs.send_cert, sizeof(msgs.send_cert)); 1444 if (ret < 0) 1445 return ret; 1446 1447 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { 1448 drm_dbg_kms(&i915->drm, "cert.rx_caps dont claim HDCP2.2\n"); 1449 return -EINVAL; 1450 } 1451 1452 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); 1453 1454 if (drm_hdcp_check_ksvs_revoked(&i915->drm, 1455 msgs.send_cert.cert_rx.receiver_id, 1456 1) > 0) { 1457 drm_err(&i915->drm, "Receiver ID is revoked\n"); 1458 return -EPERM; 1459 } 1460 1461 /* 1462 * Here msgs.no_stored_km will hold msgs corresponding to the km 1463 * stored also. 1464 */ 1465 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert, 1466 &hdcp->is_paired, 1467 &msgs.no_stored_km, &size); 1468 if (ret < 0) 1469 return ret; 1470 1471 ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size); 1472 if (ret < 0) 1473 return ret; 1474 1475 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME, 1476 &msgs.send_hprime, sizeof(msgs.send_hprime)); 1477 if (ret < 0) 1478 return ret; 1479 1480 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime); 1481 if (ret < 0) 1482 return ret; 1483 1484 if (!hdcp->is_paired) { 1485 /* Pairing is required */ 1486 ret = shim->read_2_2_msg(dig_port, 1487 HDCP_2_2_AKE_SEND_PAIRING_INFO, 1488 &msgs.pairing_info, 1489 sizeof(msgs.pairing_info)); 1490 if (ret < 0) 1491 return ret; 1492 1493 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info); 1494 if (ret < 0) 1495 return ret; 1496 hdcp->is_paired = true; 1497 } 1498 1499 return 0; 1500 } 1501 1502 static int hdcp2_locality_check(struct intel_connector *connector) 1503 { 1504 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1505 struct intel_hdcp *hdcp = &connector->hdcp; 1506 union { 1507 struct hdcp2_lc_init lc_init; 1508 struct hdcp2_lc_send_lprime send_lprime; 1509 } msgs; 1510 const struct intel_hdcp_shim *shim = hdcp->shim; 1511 int tries = HDCP2_LC_RETRY_CNT, ret, i; 1512 1513 for (i = 0; i < tries; i++) { 1514 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init); 1515 if (ret < 0) 1516 continue; 1517 1518 ret = shim->write_2_2_msg(dig_port, &msgs.lc_init, 1519 sizeof(msgs.lc_init)); 1520 if (ret < 0) 1521 continue; 1522 1523 ret = shim->read_2_2_msg(dig_port, 1524 HDCP_2_2_LC_SEND_LPRIME, 1525 &msgs.send_lprime, 1526 sizeof(msgs.send_lprime)); 1527 if (ret < 0) 1528 continue; 1529 1530 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime); 1531 if (!ret) 1532 break; 1533 } 1534 1535 return ret; 1536 } 1537 1538 static int hdcp2_session_key_exchange(struct intel_connector *connector) 1539 { 1540 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1541 struct intel_hdcp *hdcp = &connector->hdcp; 1542 struct hdcp2_ske_send_eks send_eks; 1543 int ret; 1544 1545 ret = hdcp2_prepare_skey(connector, &send_eks); 1546 if (ret < 0) 1547 return ret; 1548 1549 ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks, 1550 sizeof(send_eks)); 1551 if (ret < 0) 1552 return ret; 1553 1554 return 0; 1555 } 1556 1557 static 1558 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1559 { 1560 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1561 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1562 struct intel_hdcp *hdcp = &connector->hdcp; 1563 union { 1564 struct hdcp2_rep_stream_manage stream_manage; 1565 struct hdcp2_rep_stream_ready stream_ready; 1566 } msgs; 1567 const struct intel_hdcp_shim *shim = hdcp->shim; 1568 int ret, streams_size_delta, i; 1569 1570 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) 1571 return -ERANGE; 1572 1573 /* Prepare RepeaterAuth_Stream_Manage msg */ 1574 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE; 1575 drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m); 1576 1577 msgs.stream_manage.k = cpu_to_be16(data->k); 1578 1579 for (i = 0; i < data->k; i++) { 1580 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id; 1581 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type; 1582 } 1583 1584 streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) * 1585 sizeof(struct hdcp2_streamid_type); 1586 /* Send it to Repeater */ 1587 ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage, 1588 sizeof(msgs.stream_manage) - streams_size_delta); 1589 if (ret < 0) 1590 goto out; 1591 1592 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY, 1593 &msgs.stream_ready, sizeof(msgs.stream_ready)); 1594 if (ret < 0) 1595 goto out; 1596 1597 data->seq_num_m = hdcp->seq_num_m; 1598 1599 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready); 1600 1601 out: 1602 hdcp->seq_num_m++; 1603 1604 return ret; 1605 } 1606 1607 static 1608 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) 1609 { 1610 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1611 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1612 struct intel_hdcp *hdcp = &connector->hdcp; 1613 union { 1614 struct hdcp2_rep_send_receiverid_list recvid_list; 1615 struct hdcp2_rep_send_ack rep_ack; 1616 } msgs; 1617 const struct intel_hdcp_shim *shim = hdcp->shim; 1618 u32 seq_num_v, device_cnt; 1619 u8 *rx_info; 1620 int ret; 1621 1622 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST, 1623 &msgs.recvid_list, sizeof(msgs.recvid_list)); 1624 if (ret < 0) 1625 return ret; 1626 1627 rx_info = msgs.recvid_list.rx_info; 1628 1629 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || 1630 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { 1631 drm_dbg_kms(&i915->drm, "Topology Max Size Exceeded\n"); 1632 return -EINVAL; 1633 } 1634 1635 /* 1636 * MST topology is not Type 1 capable if it contains a downstream 1637 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant. 1638 */ 1639 dig_port->hdcp_mst_type1_capable = 1640 !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) && 1641 !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]); 1642 1643 /* Converting and Storing the seq_num_v to local variable as DWORD */ 1644 seq_num_v = 1645 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); 1646 1647 if (!hdcp->hdcp2_encrypted && seq_num_v) { 1648 drm_dbg_kms(&i915->drm, 1649 "Non zero Seq_num_v at first RecvId_List msg\n"); 1650 return -EINVAL; 1651 } 1652 1653 if (seq_num_v < hdcp->seq_num_v) { 1654 /* Roll over of the seq_num_v from repeater. Reauthenticate. */ 1655 drm_dbg_kms(&i915->drm, "Seq_num_v roll over.\n"); 1656 return -EINVAL; 1657 } 1658 1659 device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 1660 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 1661 if (drm_hdcp_check_ksvs_revoked(&i915->drm, 1662 msgs.recvid_list.receiver_ids, 1663 device_cnt) > 0) { 1664 drm_err(&i915->drm, "Revoked receiver ID(s) is in list\n"); 1665 return -EPERM; 1666 } 1667 1668 ret = hdcp2_verify_rep_topology_prepare_ack(connector, 1669 &msgs.recvid_list, 1670 &msgs.rep_ack); 1671 if (ret < 0) 1672 return ret; 1673 1674 hdcp->seq_num_v = seq_num_v; 1675 ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack, 1676 sizeof(msgs.rep_ack)); 1677 if (ret < 0) 1678 return ret; 1679 1680 return 0; 1681 } 1682 1683 static int hdcp2_authenticate_sink(struct intel_connector *connector) 1684 { 1685 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1686 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1687 struct intel_hdcp *hdcp = &connector->hdcp; 1688 const struct intel_hdcp_shim *shim = hdcp->shim; 1689 int ret; 1690 1691 ret = hdcp2_authentication_key_exchange(connector); 1692 if (ret < 0) { 1693 drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret); 1694 return ret; 1695 } 1696 1697 ret = hdcp2_locality_check(connector); 1698 if (ret < 0) { 1699 drm_dbg_kms(&i915->drm, 1700 "Locality Check failed. Err : %d\n", ret); 1701 return ret; 1702 } 1703 1704 ret = hdcp2_session_key_exchange(connector); 1705 if (ret < 0) { 1706 drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret); 1707 return ret; 1708 } 1709 1710 if (shim->config_stream_type) { 1711 ret = shim->config_stream_type(dig_port, 1712 hdcp->is_repeater, 1713 hdcp->content_type); 1714 if (ret < 0) 1715 return ret; 1716 } 1717 1718 if (hdcp->is_repeater) { 1719 ret = hdcp2_authenticate_repeater_topology(connector); 1720 if (ret < 0) { 1721 drm_dbg_kms(&i915->drm, 1722 "Repeater Auth Failed. Err: %d\n", ret); 1723 return ret; 1724 } 1725 } 1726 1727 return ret; 1728 } 1729 1730 static int hdcp2_enable_stream_encryption(struct intel_connector *connector) 1731 { 1732 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1733 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1734 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1735 struct intel_hdcp *hdcp = &connector->hdcp; 1736 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1737 enum port port = dig_port->base.port; 1738 int ret = 0; 1739 1740 if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & 1741 LINK_ENCRYPTION_STATUS)) { 1742 drm_err(&i915->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n", 1743 connector->base.name, connector->base.base.id); 1744 ret = -EPERM; 1745 goto link_recover; 1746 } 1747 1748 if (hdcp->shim->stream_2_2_encryption) { 1749 ret = hdcp->shim->stream_2_2_encryption(connector, true); 1750 if (ret) { 1751 drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n", 1752 connector->base.name, connector->base.base.id); 1753 return ret; 1754 } 1755 drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", 1756 transcoder_name(hdcp->stream_transcoder)); 1757 } 1758 1759 return 0; 1760 1761 link_recover: 1762 if (hdcp2_deauthenticate_port(connector) < 0) 1763 drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); 1764 1765 dig_port->hdcp_auth_status = false; 1766 data->k = 0; 1767 1768 return ret; 1769 } 1770 1771 static int hdcp2_enable_encryption(struct intel_connector *connector) 1772 { 1773 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1774 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1775 struct intel_hdcp *hdcp = &connector->hdcp; 1776 enum port port = dig_port->base.port; 1777 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1778 int ret; 1779 1780 drm_WARN_ON(&i915->drm, 1781 intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & 1782 LINK_ENCRYPTION_STATUS); 1783 if (hdcp->shim->toggle_signalling) { 1784 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, 1785 true); 1786 if (ret) { 1787 drm_err(&i915->drm, 1788 "Failed to enable HDCP signalling. %d\n", 1789 ret); 1790 return ret; 1791 } 1792 } 1793 1794 if (intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & 1795 LINK_AUTH_STATUS) 1796 /* Link is Authenticated. Now set for Encryption */ 1797 intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port), 1798 0, CTL_LINK_ENCRYPTION_REQ); 1799 1800 ret = intel_de_wait_for_set(i915, 1801 HDCP2_STATUS(i915, cpu_transcoder, 1802 port), 1803 LINK_ENCRYPTION_STATUS, 1804 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1805 dig_port->hdcp_auth_status = true; 1806 1807 return ret; 1808 } 1809 1810 static int hdcp2_disable_encryption(struct intel_connector *connector) 1811 { 1812 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1813 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1814 struct intel_hdcp *hdcp = &connector->hdcp; 1815 enum port port = dig_port->base.port; 1816 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1817 int ret; 1818 1819 drm_WARN_ON(&i915->drm, !(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & 1820 LINK_ENCRYPTION_STATUS)); 1821 1822 intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port), 1823 CTL_LINK_ENCRYPTION_REQ, 0); 1824 1825 ret = intel_de_wait_for_clear(i915, 1826 HDCP2_STATUS(i915, cpu_transcoder, 1827 port), 1828 LINK_ENCRYPTION_STATUS, 1829 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1830 if (ret == -ETIMEDOUT) 1831 drm_dbg_kms(&i915->drm, "Disable Encryption Timedout"); 1832 1833 if (hdcp->shim->toggle_signalling) { 1834 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, 1835 false); 1836 if (ret) { 1837 drm_err(&i915->drm, 1838 "Failed to disable HDCP signalling. %d\n", 1839 ret); 1840 return ret; 1841 } 1842 } 1843 1844 return ret; 1845 } 1846 1847 static int 1848 hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1849 { 1850 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1851 int i, tries = 3, ret; 1852 1853 if (!connector->hdcp.is_repeater) 1854 return 0; 1855 1856 for (i = 0; i < tries; i++) { 1857 ret = _hdcp2_propagate_stream_management_info(connector); 1858 if (!ret) 1859 break; 1860 1861 /* Lets restart the auth incase of seq_num_m roll over */ 1862 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { 1863 drm_dbg_kms(&i915->drm, 1864 "seq_num_m roll over.(%d)\n", ret); 1865 break; 1866 } 1867 1868 drm_dbg_kms(&i915->drm, 1869 "HDCP2 stream management %d of %d Failed.(%d)\n", 1870 i + 1, tries, ret); 1871 } 1872 1873 return ret; 1874 } 1875 1876 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) 1877 { 1878 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1879 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1880 int ret = 0, i, tries = 3; 1881 1882 for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) { 1883 ret = hdcp2_authenticate_sink(connector); 1884 if (!ret) { 1885 intel_hdcp_prepare_streams(connector); 1886 1887 ret = hdcp2_propagate_stream_management_info(connector); 1888 if (ret) { 1889 drm_dbg_kms(&i915->drm, 1890 "Stream management failed.(%d)\n", 1891 ret); 1892 break; 1893 } 1894 1895 ret = hdcp2_authenticate_port(connector); 1896 if (!ret) 1897 break; 1898 drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n", 1899 ret); 1900 } 1901 1902 /* Clearing the mei hdcp session */ 1903 drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", 1904 i + 1, tries, ret); 1905 if (hdcp2_deauthenticate_port(connector) < 0) 1906 drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); 1907 } 1908 1909 if (!ret && !dig_port->hdcp_auth_status) { 1910 /* 1911 * Ensuring the required 200mSec min time interval between 1912 * Session Key Exchange and encryption. 1913 */ 1914 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); 1915 ret = hdcp2_enable_encryption(connector); 1916 if (ret < 0) { 1917 drm_dbg_kms(&i915->drm, 1918 "Encryption Enable Failed.(%d)\n", ret); 1919 if (hdcp2_deauthenticate_port(connector) < 0) 1920 drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); 1921 } 1922 } 1923 1924 if (!ret) 1925 ret = hdcp2_enable_stream_encryption(connector); 1926 1927 return ret; 1928 } 1929 1930 static int _intel_hdcp2_enable(struct intel_connector *connector) 1931 { 1932 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1933 struct intel_hdcp *hdcp = &connector->hdcp; 1934 int ret; 1935 1936 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n", 1937 connector->base.name, connector->base.base.id, 1938 hdcp->content_type); 1939 1940 ret = hdcp2_authenticate_and_encrypt(connector); 1941 if (ret) { 1942 drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", 1943 hdcp->content_type, ret); 1944 return ret; 1945 } 1946 1947 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n", 1948 connector->base.name, connector->base.base.id, 1949 hdcp->content_type); 1950 1951 hdcp->hdcp2_encrypted = true; 1952 return 0; 1953 } 1954 1955 static int 1956 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery) 1957 { 1958 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1959 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1960 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1961 struct intel_hdcp *hdcp = &connector->hdcp; 1962 int ret; 1963 1964 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n", 1965 connector->base.name, connector->base.base.id); 1966 1967 if (hdcp->shim->stream_2_2_encryption) { 1968 ret = hdcp->shim->stream_2_2_encryption(connector, false); 1969 if (ret) { 1970 drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n", 1971 connector->base.name, connector->base.base.id); 1972 return ret; 1973 } 1974 drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n", 1975 transcoder_name(hdcp->stream_transcoder)); 1976 1977 if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery) 1978 return 0; 1979 } 1980 1981 ret = hdcp2_disable_encryption(connector); 1982 1983 if (hdcp2_deauthenticate_port(connector) < 0) 1984 drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); 1985 1986 connector->hdcp.hdcp2_encrypted = false; 1987 dig_port->hdcp_auth_status = false; 1988 data->k = 0; 1989 1990 return ret; 1991 } 1992 1993 /* Implements the Link Integrity Check for HDCP2.2 */ 1994 static int intel_hdcp2_check_link(struct intel_connector *connector) 1995 { 1996 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1997 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1998 struct intel_hdcp *hdcp = &connector->hdcp; 1999 enum port port = dig_port->base.port; 2000 enum transcoder cpu_transcoder; 2001 int ret = 0; 2002 2003 mutex_lock(&hdcp->mutex); 2004 mutex_lock(&dig_port->hdcp_mutex); 2005 cpu_transcoder = hdcp->cpu_transcoder; 2006 2007 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ 2008 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 2009 !hdcp->hdcp2_encrypted) { 2010 ret = -EINVAL; 2011 goto out; 2012 } 2013 2014 if (drm_WARN_ON(&i915->drm, 2015 !intel_hdcp2_in_use(i915, cpu_transcoder, port))) { 2016 drm_err(&i915->drm, 2017 "HDCP2.2 link stopped the encryption, %x\n", 2018 intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port))); 2019 ret = -ENXIO; 2020 _intel_hdcp2_disable(connector, true); 2021 intel_hdcp_update_value(connector, 2022 DRM_MODE_CONTENT_PROTECTION_DESIRED, 2023 true); 2024 goto out; 2025 } 2026 2027 ret = hdcp->shim->check_2_2_link(dig_port, connector); 2028 if (ret == HDCP_LINK_PROTECTED) { 2029 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 2030 intel_hdcp_update_value(connector, 2031 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2032 true); 2033 } 2034 goto out; 2035 } 2036 2037 if (ret == HDCP_TOPOLOGY_CHANGE) { 2038 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 2039 goto out; 2040 2041 drm_dbg_kms(&i915->drm, 2042 "HDCP2.2 Downstream topology change\n"); 2043 ret = hdcp2_authenticate_repeater_topology(connector); 2044 if (!ret) { 2045 intel_hdcp_update_value(connector, 2046 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2047 true); 2048 goto out; 2049 } 2050 drm_dbg_kms(&i915->drm, 2051 "[%s:%d] Repeater topology auth failed.(%d)\n", 2052 connector->base.name, connector->base.base.id, 2053 ret); 2054 } else { 2055 drm_dbg_kms(&i915->drm, 2056 "[%s:%d] HDCP2.2 link failed, retrying auth\n", 2057 connector->base.name, connector->base.base.id); 2058 } 2059 2060 ret = _intel_hdcp2_disable(connector, true); 2061 if (ret) { 2062 drm_err(&i915->drm, 2063 "[%s:%d] Failed to disable hdcp2.2 (%d)\n", 2064 connector->base.name, connector->base.base.id, ret); 2065 intel_hdcp_update_value(connector, 2066 DRM_MODE_CONTENT_PROTECTION_DESIRED, true); 2067 goto out; 2068 } 2069 2070 ret = _intel_hdcp2_enable(connector); 2071 if (ret) { 2072 drm_dbg_kms(&i915->drm, 2073 "[%s:%d] Failed to enable hdcp2.2 (%d)\n", 2074 connector->base.name, connector->base.base.id, 2075 ret); 2076 intel_hdcp_update_value(connector, 2077 DRM_MODE_CONTENT_PROTECTION_DESIRED, 2078 true); 2079 goto out; 2080 } 2081 2082 out: 2083 mutex_unlock(&dig_port->hdcp_mutex); 2084 mutex_unlock(&hdcp->mutex); 2085 return ret; 2086 } 2087 2088 static void intel_hdcp_check_work(struct work_struct *work) 2089 { 2090 struct intel_hdcp *hdcp = container_of(to_delayed_work(work), 2091 struct intel_hdcp, 2092 check_work); 2093 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 2094 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2095 2096 if (drm_connector_is_unregistered(&connector->base)) 2097 return; 2098 2099 if (!intel_hdcp2_check_link(connector)) 2100 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2101 DRM_HDCP2_CHECK_PERIOD_MS); 2102 else if (!intel_hdcp_check_link(connector)) 2103 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2104 DRM_HDCP_CHECK_PERIOD_MS); 2105 } 2106 2107 static int i915_hdcp_component_bind(struct device *i915_kdev, 2108 struct device *mei_kdev, void *data) 2109 { 2110 struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); 2111 2112 drm_dbg(&i915->drm, "I915 HDCP comp bind\n"); 2113 mutex_lock(&i915->display.hdcp.hdcp_mutex); 2114 i915->display.hdcp.arbiter = (struct i915_hdcp_arbiter *)data; 2115 i915->display.hdcp.arbiter->hdcp_dev = mei_kdev; 2116 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 2117 2118 return 0; 2119 } 2120 2121 static void i915_hdcp_component_unbind(struct device *i915_kdev, 2122 struct device *mei_kdev, void *data) 2123 { 2124 struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); 2125 2126 drm_dbg(&i915->drm, "I915 HDCP comp unbind\n"); 2127 mutex_lock(&i915->display.hdcp.hdcp_mutex); 2128 i915->display.hdcp.arbiter = NULL; 2129 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 2130 } 2131 2132 static const struct component_ops i915_hdcp_ops = { 2133 .bind = i915_hdcp_component_bind, 2134 .unbind = i915_hdcp_component_unbind, 2135 }; 2136 2137 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port) 2138 { 2139 switch (port) { 2140 case PORT_A: 2141 return HDCP_DDI_A; 2142 case PORT_B ... PORT_F: 2143 return (enum hdcp_ddi)port; 2144 default: 2145 return HDCP_DDI_INVALID_PORT; 2146 } 2147 } 2148 2149 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder) 2150 { 2151 switch (cpu_transcoder) { 2152 case TRANSCODER_A ... TRANSCODER_D: 2153 return (enum hdcp_transcoder)(cpu_transcoder | 0x10); 2154 default: /* eDP, DSI TRANSCODERS are non HDCP capable */ 2155 return HDCP_INVALID_TRANSCODER; 2156 } 2157 } 2158 2159 static int initialize_hdcp_port_data(struct intel_connector *connector, 2160 struct intel_digital_port *dig_port, 2161 const struct intel_hdcp_shim *shim) 2162 { 2163 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2164 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 2165 enum port port = dig_port->base.port; 2166 2167 if (DISPLAY_VER(i915) < 12) 2168 data->hdcp_ddi = intel_get_hdcp_ddi_index(port); 2169 else 2170 /* 2171 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled 2172 * with zero(INVALID PORT index). 2173 */ 2174 data->hdcp_ddi = HDCP_DDI_INVALID_PORT; 2175 2176 /* 2177 * As associated transcoder is set and modified at modeset, here hdcp_transcoder 2178 * is initialized to zero (invalid transcoder index). This will be 2179 * retained for <Gen12 forever. 2180 */ 2181 data->hdcp_transcoder = HDCP_INVALID_TRANSCODER; 2182 2183 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; 2184 data->protocol = (u8)shim->protocol; 2185 2186 if (!data->streams) 2187 data->streams = kcalloc(INTEL_NUM_PIPES(i915), 2188 sizeof(struct hdcp2_streamid_type), 2189 GFP_KERNEL); 2190 if (!data->streams) { 2191 drm_err(&i915->drm, "Out of Memory\n"); 2192 return -ENOMEM; 2193 } 2194 2195 return 0; 2196 } 2197 2198 static bool is_hdcp2_supported(struct drm_i915_private *i915) 2199 { 2200 if (intel_hdcp_gsc_cs_required(i915)) 2201 return true; 2202 2203 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) 2204 return false; 2205 2206 return (DISPLAY_VER(i915) >= 10 || 2207 IS_KABYLAKE(i915) || 2208 IS_COFFEELAKE(i915) || 2209 IS_COMETLAKE(i915)); 2210 } 2211 2212 void intel_hdcp_component_init(struct drm_i915_private *i915) 2213 { 2214 int ret; 2215 2216 if (!is_hdcp2_supported(i915)) 2217 return; 2218 2219 mutex_lock(&i915->display.hdcp.hdcp_mutex); 2220 drm_WARN_ON(&i915->drm, i915->display.hdcp.comp_added); 2221 2222 i915->display.hdcp.comp_added = true; 2223 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 2224 if (intel_hdcp_gsc_cs_required(i915)) 2225 ret = intel_hdcp_gsc_init(i915); 2226 else 2227 ret = component_add_typed(i915->drm.dev, &i915_hdcp_ops, 2228 I915_COMPONENT_HDCP); 2229 2230 if (ret < 0) { 2231 drm_dbg_kms(&i915->drm, "Failed at fw component add(%d)\n", 2232 ret); 2233 mutex_lock(&i915->display.hdcp.hdcp_mutex); 2234 i915->display.hdcp.comp_added = false; 2235 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 2236 return; 2237 } 2238 } 2239 2240 static void intel_hdcp2_init(struct intel_connector *connector, 2241 struct intel_digital_port *dig_port, 2242 const struct intel_hdcp_shim *shim) 2243 { 2244 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2245 struct intel_hdcp *hdcp = &connector->hdcp; 2246 int ret; 2247 2248 ret = initialize_hdcp_port_data(connector, dig_port, shim); 2249 if (ret) { 2250 drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n"); 2251 return; 2252 } 2253 2254 hdcp->hdcp2_supported = true; 2255 } 2256 2257 int intel_hdcp_init(struct intel_connector *connector, 2258 struct intel_digital_port *dig_port, 2259 const struct intel_hdcp_shim *shim) 2260 { 2261 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2262 struct intel_hdcp *hdcp = &connector->hdcp; 2263 int ret; 2264 2265 if (!shim) 2266 return -EINVAL; 2267 2268 if (is_hdcp2_supported(i915)) 2269 intel_hdcp2_init(connector, dig_port, shim); 2270 2271 ret = 2272 drm_connector_attach_content_protection_property(&connector->base, 2273 hdcp->hdcp2_supported); 2274 if (ret) { 2275 hdcp->hdcp2_supported = false; 2276 kfree(dig_port->hdcp_port_data.streams); 2277 return ret; 2278 } 2279 2280 hdcp->shim = shim; 2281 mutex_init(&hdcp->mutex); 2282 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); 2283 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); 2284 init_waitqueue_head(&hdcp->cp_irq_queue); 2285 2286 return 0; 2287 } 2288 2289 static int 2290 intel_hdcp_set_streams(struct intel_digital_port *dig_port, 2291 struct intel_atomic_state *state) 2292 { 2293 struct drm_connector_list_iter conn_iter; 2294 struct intel_digital_port *conn_dig_port; 2295 struct intel_connector *connector; 2296 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 2297 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 2298 2299 if (!intel_encoder_is_mst(&dig_port->base)) { 2300 data->k = 1; 2301 data->streams[0].stream_id = 0; 2302 return 0; 2303 } 2304 2305 data->k = 0; 2306 2307 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 2308 for_each_intel_connector_iter(connector, &conn_iter) { 2309 if (connector->base.status == connector_status_disconnected) 2310 continue; 2311 2312 if (!intel_encoder_is_mst(intel_attached_encoder(connector))) 2313 continue; 2314 2315 conn_dig_port = intel_attached_dig_port(connector); 2316 if (conn_dig_port != dig_port) 2317 continue; 2318 2319 data->streams[data->k].stream_id = 2320 intel_conn_to_vcpi(&state->base, connector); 2321 data->k++; 2322 2323 /* if there is only one active stream */ 2324 if (dig_port->dp.active_mst_links <= 1) 2325 break; 2326 } 2327 drm_connector_list_iter_end(&conn_iter); 2328 2329 if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0)) 2330 return -EINVAL; 2331 2332 return 0; 2333 } 2334 2335 int intel_hdcp_enable(struct intel_atomic_state *state, 2336 struct intel_encoder *encoder, 2337 const struct intel_crtc_state *pipe_config, 2338 const struct drm_connector_state *conn_state) 2339 { 2340 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2341 struct intel_connector *connector = 2342 to_intel_connector(conn_state->connector); 2343 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2344 struct intel_hdcp *hdcp = &connector->hdcp; 2345 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; 2346 int ret = -EINVAL; 2347 2348 if (!hdcp->shim) 2349 return -ENOENT; 2350 2351 if (!connector->encoder) { 2352 drm_err(&i915->drm, "[%s:%d] encoder is not initialized\n", 2353 connector->base.name, connector->base.base.id); 2354 return -ENODEV; 2355 } 2356 2357 mutex_lock(&hdcp->mutex); 2358 mutex_lock(&dig_port->hdcp_mutex); 2359 drm_WARN_ON(&i915->drm, 2360 hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); 2361 hdcp->content_type = (u8)conn_state->hdcp_content_type; 2362 2363 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) { 2364 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder; 2365 hdcp->stream_transcoder = pipe_config->cpu_transcoder; 2366 } else { 2367 hdcp->cpu_transcoder = pipe_config->cpu_transcoder; 2368 hdcp->stream_transcoder = INVALID_TRANSCODER; 2369 } 2370 2371 if (DISPLAY_VER(i915) >= 12) 2372 dig_port->hdcp_port_data.hdcp_transcoder = 2373 intel_get_hdcp_transcoder(hdcp->cpu_transcoder); 2374 2375 /* 2376 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup 2377 * is capable of HDCP2.2, it is preferred to use HDCP2.2. 2378 */ 2379 if (intel_hdcp2_capable(connector)) { 2380 ret = intel_hdcp_set_streams(dig_port, state); 2381 if (!ret) { 2382 ret = _intel_hdcp2_enable(connector); 2383 if (!ret) 2384 check_link_interval = 2385 DRM_HDCP2_CHECK_PERIOD_MS; 2386 } else { 2387 drm_dbg_kms(&i915->drm, 2388 "Set content streams failed: (%d)\n", 2389 ret); 2390 } 2391 } 2392 2393 /* 2394 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will 2395 * be attempted. 2396 */ 2397 if (ret && intel_hdcp_capable(connector) && 2398 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) { 2399 ret = _intel_hdcp_enable(connector); 2400 } 2401 2402 if (!ret) { 2403 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2404 check_link_interval); 2405 intel_hdcp_update_value(connector, 2406 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2407 true); 2408 } 2409 2410 mutex_unlock(&dig_port->hdcp_mutex); 2411 mutex_unlock(&hdcp->mutex); 2412 return ret; 2413 } 2414 2415 int intel_hdcp_disable(struct intel_connector *connector) 2416 { 2417 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2418 struct intel_hdcp *hdcp = &connector->hdcp; 2419 int ret = 0; 2420 2421 if (!hdcp->shim) 2422 return -ENOENT; 2423 2424 mutex_lock(&hdcp->mutex); 2425 mutex_lock(&dig_port->hdcp_mutex); 2426 2427 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 2428 goto out; 2429 2430 intel_hdcp_update_value(connector, 2431 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false); 2432 if (hdcp->hdcp2_encrypted) 2433 ret = _intel_hdcp2_disable(connector, false); 2434 else if (hdcp->hdcp_encrypted) 2435 ret = _intel_hdcp_disable(connector); 2436 2437 out: 2438 mutex_unlock(&dig_port->hdcp_mutex); 2439 mutex_unlock(&hdcp->mutex); 2440 cancel_delayed_work_sync(&hdcp->check_work); 2441 return ret; 2442 } 2443 2444 void intel_hdcp_update_pipe(struct intel_atomic_state *state, 2445 struct intel_encoder *encoder, 2446 const struct intel_crtc_state *crtc_state, 2447 const struct drm_connector_state *conn_state) 2448 { 2449 struct intel_connector *connector = 2450 to_intel_connector(conn_state->connector); 2451 struct intel_hdcp *hdcp = &connector->hdcp; 2452 bool content_protection_type_changed, desired_and_not_enabled = false; 2453 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2454 2455 if (!connector->hdcp.shim) 2456 return; 2457 2458 content_protection_type_changed = 2459 (conn_state->hdcp_content_type != hdcp->content_type && 2460 conn_state->content_protection != 2461 DRM_MODE_CONTENT_PROTECTION_UNDESIRED); 2462 2463 /* 2464 * During the HDCP encryption session if Type change is requested, 2465 * disable the HDCP and reenable it with new TYPE value. 2466 */ 2467 if (conn_state->content_protection == 2468 DRM_MODE_CONTENT_PROTECTION_UNDESIRED || 2469 content_protection_type_changed) 2470 intel_hdcp_disable(connector); 2471 2472 /* 2473 * Mark the hdcp state as DESIRED after the hdcp disable of type 2474 * change procedure. 2475 */ 2476 if (content_protection_type_changed) { 2477 mutex_lock(&hdcp->mutex); 2478 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 2479 drm_connector_get(&connector->base); 2480 queue_work(i915->unordered_wq, &hdcp->prop_work); 2481 mutex_unlock(&hdcp->mutex); 2482 } 2483 2484 if (conn_state->content_protection == 2485 DRM_MODE_CONTENT_PROTECTION_DESIRED) { 2486 mutex_lock(&hdcp->mutex); 2487 /* Avoid enabling hdcp, if it already ENABLED */ 2488 desired_and_not_enabled = 2489 hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED; 2490 mutex_unlock(&hdcp->mutex); 2491 /* 2492 * If HDCP already ENABLED and CP property is DESIRED, schedule 2493 * prop_work to update correct CP property to user space. 2494 */ 2495 if (!desired_and_not_enabled && !content_protection_type_changed) { 2496 drm_connector_get(&connector->base); 2497 queue_work(i915->unordered_wq, &hdcp->prop_work); 2498 } 2499 } 2500 2501 if (desired_and_not_enabled || content_protection_type_changed) 2502 intel_hdcp_enable(state, encoder, crtc_state, conn_state); 2503 } 2504 2505 void intel_hdcp_component_fini(struct drm_i915_private *i915) 2506 { 2507 mutex_lock(&i915->display.hdcp.hdcp_mutex); 2508 if (!i915->display.hdcp.comp_added) { 2509 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 2510 return; 2511 } 2512 2513 i915->display.hdcp.comp_added = false; 2514 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 2515 2516 if (intel_hdcp_gsc_cs_required(i915)) 2517 intel_hdcp_gsc_fini(i915); 2518 else 2519 component_del(i915->drm.dev, &i915_hdcp_ops); 2520 } 2521 2522 void intel_hdcp_cleanup(struct intel_connector *connector) 2523 { 2524 struct intel_hdcp *hdcp = &connector->hdcp; 2525 2526 if (!hdcp->shim) 2527 return; 2528 2529 /* 2530 * If the connector is registered, it's possible userspace could kick 2531 * off another HDCP enable, which would re-spawn the workers. 2532 */ 2533 drm_WARN_ON(connector->base.dev, 2534 connector->base.registration_state == DRM_CONNECTOR_REGISTERED); 2535 2536 /* 2537 * Now that the connector is not registered, check_work won't be run, 2538 * but cancel any outstanding instances of it 2539 */ 2540 cancel_delayed_work_sync(&hdcp->check_work); 2541 2542 /* 2543 * We don't cancel prop_work in the same way as check_work since it 2544 * requires connection_mutex which could be held while calling this 2545 * function. Instead, we rely on the connector references grabbed before 2546 * scheduling prop_work to ensure the connector is alive when prop_work 2547 * is run. So if we're in the destroy path (which is where this 2548 * function should be called), we're "guaranteed" that prop_work is not 2549 * active (tl;dr This Should Never Happen). 2550 */ 2551 drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work)); 2552 2553 mutex_lock(&hdcp->mutex); 2554 hdcp->shim = NULL; 2555 mutex_unlock(&hdcp->mutex); 2556 } 2557 2558 void intel_hdcp_atomic_check(struct drm_connector *connector, 2559 struct drm_connector_state *old_state, 2560 struct drm_connector_state *new_state) 2561 { 2562 u64 old_cp = old_state->content_protection; 2563 u64 new_cp = new_state->content_protection; 2564 struct drm_crtc_state *crtc_state; 2565 2566 if (!new_state->crtc) { 2567 /* 2568 * If the connector is being disabled with CP enabled, mark it 2569 * desired so it's re-enabled when the connector is brought back 2570 */ 2571 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) 2572 new_state->content_protection = 2573 DRM_MODE_CONTENT_PROTECTION_DESIRED; 2574 return; 2575 } 2576 2577 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 2578 new_state->crtc); 2579 /* 2580 * Fix the HDCP uapi content protection state in case of modeset. 2581 * FIXME: As per HDCP content protection property uapi doc, an uevent() 2582 * need to be sent if there is transition from ENABLED->DESIRED. 2583 */ 2584 if (drm_atomic_crtc_needs_modeset(crtc_state) && 2585 (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED && 2586 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) 2587 new_state->content_protection = 2588 DRM_MODE_CONTENT_PROTECTION_DESIRED; 2589 2590 /* 2591 * Nothing to do if the state didn't change, or HDCP was activated since 2592 * the last commit. And also no change in hdcp content type. 2593 */ 2594 if (old_cp == new_cp || 2595 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && 2596 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) { 2597 if (old_state->hdcp_content_type == 2598 new_state->hdcp_content_type) 2599 return; 2600 } 2601 2602 crtc_state->mode_changed = true; 2603 } 2604 2605 /* Handles the CP_IRQ raised from the DP HDCP sink */ 2606 void intel_hdcp_handle_cp_irq(struct intel_connector *connector) 2607 { 2608 struct intel_hdcp *hdcp = &connector->hdcp; 2609 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2610 2611 if (!hdcp->shim) 2612 return; 2613 2614 atomic_inc(&connector->hdcp.cp_irq_count); 2615 wake_up_all(&connector->hdcp.cp_irq_queue); 2616 2617 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0); 2618 } 2619