1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright (C) 2017 Google, Inc. 4 * Copyright _ 2017-2019, Intel Corporation. 5 * 6 * Authors: 7 * Sean Paul <seanpaul@chromium.org> 8 * Ramalingam C <ramalingam.c@intel.com> 9 */ 10 11 #include <linux/component.h> 12 #include <linux/i2c.h> 13 #include <linux/random.h> 14 15 #include <drm/display/drm_hdcp_helper.h> 16 #include <drm/i915_component.h> 17 18 #include "i915_drv.h" 19 #include "i915_reg.h" 20 #include "intel_connector.h" 21 #include "intel_de.h" 22 #include "intel_display_power.h" 23 #include "intel_display_power_well.h" 24 #include "intel_display_types.h" 25 #include "intel_hdcp.h" 26 #include "intel_hdcp_gsc.h" 27 #include "intel_hdcp_regs.h" 28 #include "intel_pcode.h" 29 30 #define KEY_LOAD_TRIES 5 31 #define HDCP2_LC_RETRY_CNT 3 32 33 static int intel_conn_to_vcpi(struct drm_atomic_state *state, 34 struct intel_connector *connector) 35 { 36 struct drm_dp_mst_topology_mgr *mgr; 37 struct drm_dp_mst_atomic_payload *payload; 38 struct drm_dp_mst_topology_state *mst_state; 39 int vcpi = 0; 40 41 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */ 42 if (!connector->port) 43 return 0; 44 mgr = connector->port->mgr; 45 46 drm_modeset_lock(&mgr->base.lock, state->acquire_ctx); 47 mst_state = to_drm_dp_mst_topology_state(mgr->base.state); 48 payload = drm_atomic_get_mst_payload_state(mst_state, connector->port); 49 if (drm_WARN_ON(mgr->dev, !payload)) 50 goto out; 51 52 vcpi = payload->vcpi; 53 if (drm_WARN_ON(mgr->dev, vcpi < 0)) { 54 vcpi = 0; 55 goto out; 56 } 57 out: 58 return vcpi; 59 } 60 61 /* 62 * intel_hdcp_required_content_stream selects the most highest common possible HDCP 63 * content_type for all streams in DP MST topology because security f/w doesn't 64 * have any provision to mark content_type for each stream separately, it marks 65 * all available streams with the content_type proivided at the time of port 66 * authentication. This may prohibit the userspace to use type1 content on 67 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in 68 * DP MST topology. Though it is not compulsory, security fw should change its 69 * policy to mark different content_types for different streams. 70 */ 71 static void 72 intel_hdcp_required_content_stream(struct intel_digital_port *dig_port) 73 { 74 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 75 bool enforce_type0 = false; 76 int k; 77 78 if (dig_port->hdcp_auth_status) 79 return; 80 81 if (!dig_port->hdcp_mst_type1_capable) 82 enforce_type0 = true; 83 84 /* 85 * Apply common protection level across all streams in DP MST Topology. 86 * Use highest supported content type for all streams in DP MST Topology. 87 */ 88 for (k = 0; k < data->k; k++) 89 data->streams[k].stream_type = 90 enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1; 91 } 92 93 static void intel_hdcp_prepare_streams(struct intel_connector *connector) 94 { 95 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 96 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 97 struct intel_hdcp *hdcp = &connector->hdcp; 98 99 if (!intel_encoder_is_mst(intel_attached_encoder(connector))) { 100 data->streams[0].stream_type = hdcp->content_type; 101 } else { 102 intel_hdcp_required_content_stream(dig_port); 103 } 104 } 105 106 static 107 bool intel_hdcp_is_ksv_valid(u8 *ksv) 108 { 109 int i, ones = 0; 110 /* KSV has 20 1's and 20 0's */ 111 for (i = 0; i < DRM_HDCP_KSV_LEN; i++) 112 ones += hweight8(ksv[i]); 113 if (ones != 20) 114 return false; 115 116 return true; 117 } 118 119 static 120 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, 121 const struct intel_hdcp_shim *shim, u8 *bksv) 122 { 123 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 124 int ret, i, tries = 2; 125 126 /* HDCP spec states that we must retry the bksv if it is invalid */ 127 for (i = 0; i < tries; i++) { 128 ret = shim->read_bksv(dig_port, bksv); 129 if (ret) 130 return ret; 131 if (intel_hdcp_is_ksv_valid(bksv)) 132 break; 133 } 134 if (i == tries) { 135 drm_dbg_kms(&i915->drm, "Bksv is invalid\n"); 136 return -ENODEV; 137 } 138 139 return 0; 140 } 141 142 /* Is HDCP1.4 capable on Platform and Sink */ 143 bool intel_hdcp_capable(struct intel_connector *connector) 144 { 145 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 146 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 147 bool capable = false; 148 u8 bksv[5]; 149 150 if (!shim) 151 return capable; 152 153 if (shim->hdcp_capable) { 154 shim->hdcp_capable(dig_port, &capable); 155 } else { 156 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv)) 157 capable = true; 158 } 159 160 return capable; 161 } 162 163 /* Is HDCP2.2 capable on Platform and Sink */ 164 bool intel_hdcp2_capable(struct intel_connector *connector) 165 { 166 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 167 struct drm_i915_private *i915 = to_i915(connector->base.dev); 168 struct intel_hdcp *hdcp = &connector->hdcp; 169 bool capable = false; 170 171 /* I915 support for HDCP2.2 */ 172 if (!hdcp->hdcp2_supported) 173 return false; 174 175 /* If MTL+ make sure gsc is loaded and proxy is setup */ 176 if (intel_hdcp_gsc_cs_required(i915)) { 177 struct intel_gt *gt = i915->media_gt; 178 struct intel_gsc_uc *gsc = gt ? >->uc.gsc : NULL; 179 180 if (!gsc || !intel_uc_fw_is_running(&gsc->fw)) { 181 drm_dbg_kms(&i915->drm, 182 "GSC components required for HDCP2.2 are not ready\n"); 183 return false; 184 } 185 } 186 187 /* MEI/GSC interface is solid depending on which is used */ 188 mutex_lock(&i915->display.hdcp.hdcp_mutex); 189 if (!i915->display.hdcp.comp_added || !i915->display.hdcp.arbiter) { 190 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 191 return false; 192 } 193 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 194 195 /* Sink's capability for HDCP2.2 */ 196 hdcp->shim->hdcp_2_2_capable(dig_port, &capable); 197 198 return capable; 199 } 200 201 static bool intel_hdcp_in_use(struct drm_i915_private *i915, 202 enum transcoder cpu_transcoder, enum port port) 203 { 204 return intel_de_read(i915, 205 HDCP_STATUS(i915, cpu_transcoder, port)) & 206 HDCP_STATUS_ENC; 207 } 208 209 static bool intel_hdcp2_in_use(struct drm_i915_private *i915, 210 enum transcoder cpu_transcoder, enum port port) 211 { 212 return intel_de_read(i915, 213 HDCP2_STATUS(i915, cpu_transcoder, port)) & 214 LINK_ENCRYPTION_STATUS; 215 } 216 217 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, 218 const struct intel_hdcp_shim *shim) 219 { 220 int ret, read_ret; 221 bool ksv_ready; 222 223 /* Poll for ksv list ready (spec says max time allowed is 5s) */ 224 ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port, 225 &ksv_ready), 226 read_ret || ksv_ready, 5 * 1000 * 1000, 1000, 227 100 * 1000); 228 if (ret) 229 return ret; 230 if (read_ret) 231 return read_ret; 232 if (!ksv_ready) 233 return -ETIMEDOUT; 234 235 return 0; 236 } 237 238 static bool hdcp_key_loadable(struct drm_i915_private *i915) 239 { 240 enum i915_power_well_id id; 241 intel_wakeref_t wakeref; 242 bool enabled = false; 243 244 /* 245 * On HSW and BDW, Display HW loads the Key as soon as Display resumes. 246 * On all BXT+, SW can load the keys only when the PW#1 is turned on. 247 */ 248 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 249 id = HSW_DISP_PW_GLOBAL; 250 else 251 id = SKL_DISP_PW_1; 252 253 /* PG1 (power well #1) needs to be enabled */ 254 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 255 enabled = intel_display_power_well_is_enabled(i915, id); 256 257 /* 258 * Another req for hdcp key loadability is enabled state of pll for 259 * cdclk. Without active crtc we wont land here. So we are assuming that 260 * cdclk is already on. 261 */ 262 263 return enabled; 264 } 265 266 static void intel_hdcp_clear_keys(struct drm_i915_private *i915) 267 { 268 intel_de_write(i915, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); 269 intel_de_write(i915, HDCP_KEY_STATUS, 270 HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); 271 } 272 273 static int intel_hdcp_load_keys(struct drm_i915_private *i915) 274 { 275 int ret; 276 u32 val; 277 278 val = intel_de_read(i915, HDCP_KEY_STATUS); 279 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) 280 return 0; 281 282 /* 283 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes 284 * out of reset. So if Key is not already loaded, its an error state. 285 */ 286 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 287 if (!(intel_de_read(i915, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) 288 return -ENXIO; 289 290 /* 291 * Initiate loading the HDCP key from fuses. 292 * 293 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display 294 * version 9 platforms (minus BXT) differ in the key load trigger 295 * process from other platforms. These platforms use the GT Driver 296 * Mailbox interface. 297 */ 298 if (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)) { 299 ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1); 300 if (ret) { 301 drm_err(&i915->drm, 302 "Failed to initiate HDCP key load (%d)\n", 303 ret); 304 return ret; 305 } 306 } else { 307 intel_de_write(i915, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); 308 } 309 310 /* Wait for the keys to load (500us) */ 311 ret = __intel_wait_for_register(&i915->uncore, HDCP_KEY_STATUS, 312 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, 313 10, 1, &val); 314 if (ret) 315 return ret; 316 else if (!(val & HDCP_KEY_LOAD_STATUS)) 317 return -ENXIO; 318 319 /* Send Aksv over to PCH display for use in authentication */ 320 intel_de_write(i915, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); 321 322 return 0; 323 } 324 325 /* Returns updated SHA-1 index */ 326 static int intel_write_sha_text(struct drm_i915_private *i915, u32 sha_text) 327 { 328 intel_de_write(i915, HDCP_SHA_TEXT, sha_text); 329 if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { 330 drm_err(&i915->drm, "Timed out waiting for SHA1 ready\n"); 331 return -ETIMEDOUT; 332 } 333 return 0; 334 } 335 336 static 337 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915, 338 enum transcoder cpu_transcoder, enum port port) 339 { 340 if (DISPLAY_VER(i915) >= 12) { 341 switch (cpu_transcoder) { 342 case TRANSCODER_A: 343 return HDCP_TRANSA_REP_PRESENT | 344 HDCP_TRANSA_SHA1_M0; 345 case TRANSCODER_B: 346 return HDCP_TRANSB_REP_PRESENT | 347 HDCP_TRANSB_SHA1_M0; 348 case TRANSCODER_C: 349 return HDCP_TRANSC_REP_PRESENT | 350 HDCP_TRANSC_SHA1_M0; 351 case TRANSCODER_D: 352 return HDCP_TRANSD_REP_PRESENT | 353 HDCP_TRANSD_SHA1_M0; 354 default: 355 drm_err(&i915->drm, "Unknown transcoder %d\n", 356 cpu_transcoder); 357 return -EINVAL; 358 } 359 } 360 361 switch (port) { 362 case PORT_A: 363 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; 364 case PORT_B: 365 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0; 366 case PORT_C: 367 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0; 368 case PORT_D: 369 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0; 370 case PORT_E: 371 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; 372 default: 373 drm_err(&i915->drm, "Unknown port %d\n", port); 374 return -EINVAL; 375 } 376 } 377 378 static 379 int intel_hdcp_validate_v_prime(struct intel_connector *connector, 380 const struct intel_hdcp_shim *shim, 381 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) 382 { 383 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 384 struct drm_i915_private *i915 = to_i915(connector->base.dev); 385 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 386 enum port port = dig_port->base.port; 387 u32 vprime, sha_text, sha_leftovers, rep_ctl; 388 int ret, i, j, sha_idx; 389 390 /* Process V' values from the receiver */ 391 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { 392 ret = shim->read_v_prime_part(dig_port, i, &vprime); 393 if (ret) 394 return ret; 395 intel_de_write(i915, HDCP_SHA_V_PRIME(i), vprime); 396 } 397 398 /* 399 * We need to write the concatenation of all device KSVs, BINFO (DP) || 400 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte 401 * stream is written via the HDCP_SHA_TEXT register in 32-bit 402 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This 403 * index will keep track of our progress through the 64 bytes as well as 404 * helping us work the 40-bit KSVs through our 32-bit register. 405 * 406 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian 407 */ 408 sha_idx = 0; 409 sha_text = 0; 410 sha_leftovers = 0; 411 rep_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port); 412 intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 413 for (i = 0; i < num_downstream; i++) { 414 unsigned int sha_empty; 415 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; 416 417 /* Fill up the empty slots in sha_text and write it out */ 418 sha_empty = sizeof(sha_text) - sha_leftovers; 419 for (j = 0; j < sha_empty; j++) { 420 u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8); 421 sha_text |= ksv[j] << off; 422 } 423 424 ret = intel_write_sha_text(i915, sha_text); 425 if (ret < 0) 426 return ret; 427 428 /* Programming guide writes this every 64 bytes */ 429 sha_idx += sizeof(sha_text); 430 if (!(sha_idx % 64)) 431 intel_de_write(i915, HDCP_REP_CTL, 432 rep_ctl | HDCP_SHA1_TEXT_32); 433 434 /* Store the leftover bytes from the ksv in sha_text */ 435 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty; 436 sha_text = 0; 437 for (j = 0; j < sha_leftovers; j++) 438 sha_text |= ksv[sha_empty + j] << 439 ((sizeof(sha_text) - j - 1) * 8); 440 441 /* 442 * If we still have room in sha_text for more data, continue. 443 * Otherwise, write it out immediately. 444 */ 445 if (sizeof(sha_text) > sha_leftovers) 446 continue; 447 448 ret = intel_write_sha_text(i915, sha_text); 449 if (ret < 0) 450 return ret; 451 sha_leftovers = 0; 452 sha_text = 0; 453 sha_idx += sizeof(sha_text); 454 } 455 456 /* 457 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many 458 * bytes are leftover from the last ksv, we might be able to fit them 459 * all in sha_text (first 2 cases), or we might need to split them up 460 * into 2 writes (last 2 cases). 461 */ 462 if (sha_leftovers == 0) { 463 /* Write 16 bits of text, 16 bits of M0 */ 464 intel_de_write(i915, HDCP_REP_CTL, 465 rep_ctl | HDCP_SHA1_TEXT_16); 466 ret = intel_write_sha_text(i915, 467 bstatus[0] << 8 | bstatus[1]); 468 if (ret < 0) 469 return ret; 470 sha_idx += sizeof(sha_text); 471 472 /* Write 32 bits of M0 */ 473 intel_de_write(i915, HDCP_REP_CTL, 474 rep_ctl | HDCP_SHA1_TEXT_0); 475 ret = intel_write_sha_text(i915, 0); 476 if (ret < 0) 477 return ret; 478 sha_idx += sizeof(sha_text); 479 480 /* Write 16 bits of M0 */ 481 intel_de_write(i915, HDCP_REP_CTL, 482 rep_ctl | HDCP_SHA1_TEXT_16); 483 ret = intel_write_sha_text(i915, 0); 484 if (ret < 0) 485 return ret; 486 sha_idx += sizeof(sha_text); 487 488 } else if (sha_leftovers == 1) { 489 /* Write 24 bits of text, 8 bits of M0 */ 490 intel_de_write(i915, HDCP_REP_CTL, 491 rep_ctl | HDCP_SHA1_TEXT_24); 492 sha_text |= bstatus[0] << 16 | bstatus[1] << 8; 493 /* Only 24-bits of data, must be in the LSB */ 494 sha_text = (sha_text & 0xffffff00) >> 8; 495 ret = intel_write_sha_text(i915, sha_text); 496 if (ret < 0) 497 return ret; 498 sha_idx += sizeof(sha_text); 499 500 /* Write 32 bits of M0 */ 501 intel_de_write(i915, HDCP_REP_CTL, 502 rep_ctl | HDCP_SHA1_TEXT_0); 503 ret = intel_write_sha_text(i915, 0); 504 if (ret < 0) 505 return ret; 506 sha_idx += sizeof(sha_text); 507 508 /* Write 24 bits of M0 */ 509 intel_de_write(i915, HDCP_REP_CTL, 510 rep_ctl | HDCP_SHA1_TEXT_8); 511 ret = intel_write_sha_text(i915, 0); 512 if (ret < 0) 513 return ret; 514 sha_idx += sizeof(sha_text); 515 516 } else if (sha_leftovers == 2) { 517 /* Write 32 bits of text */ 518 intel_de_write(i915, HDCP_REP_CTL, 519 rep_ctl | HDCP_SHA1_TEXT_32); 520 sha_text |= bstatus[0] << 8 | bstatus[1]; 521 ret = intel_write_sha_text(i915, sha_text); 522 if (ret < 0) 523 return ret; 524 sha_idx += sizeof(sha_text); 525 526 /* Write 64 bits of M0 */ 527 intel_de_write(i915, HDCP_REP_CTL, 528 rep_ctl | HDCP_SHA1_TEXT_0); 529 for (i = 0; i < 2; i++) { 530 ret = intel_write_sha_text(i915, 0); 531 if (ret < 0) 532 return ret; 533 sha_idx += sizeof(sha_text); 534 } 535 536 /* 537 * Terminate the SHA-1 stream by hand. For the other leftover 538 * cases this is appended by the hardware. 539 */ 540 intel_de_write(i915, HDCP_REP_CTL, 541 rep_ctl | HDCP_SHA1_TEXT_32); 542 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24; 543 ret = intel_write_sha_text(i915, sha_text); 544 if (ret < 0) 545 return ret; 546 sha_idx += sizeof(sha_text); 547 } else if (sha_leftovers == 3) { 548 /* Write 32 bits of text (filled from LSB) */ 549 intel_de_write(i915, HDCP_REP_CTL, 550 rep_ctl | HDCP_SHA1_TEXT_32); 551 sha_text |= bstatus[0]; 552 ret = intel_write_sha_text(i915, sha_text); 553 if (ret < 0) 554 return ret; 555 sha_idx += sizeof(sha_text); 556 557 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */ 558 intel_de_write(i915, HDCP_REP_CTL, 559 rep_ctl | HDCP_SHA1_TEXT_8); 560 ret = intel_write_sha_text(i915, bstatus[1]); 561 if (ret < 0) 562 return ret; 563 sha_idx += sizeof(sha_text); 564 565 /* Write 32 bits of M0 */ 566 intel_de_write(i915, HDCP_REP_CTL, 567 rep_ctl | HDCP_SHA1_TEXT_0); 568 ret = intel_write_sha_text(i915, 0); 569 if (ret < 0) 570 return ret; 571 sha_idx += sizeof(sha_text); 572 573 /* Write 8 bits of M0 */ 574 intel_de_write(i915, HDCP_REP_CTL, 575 rep_ctl | HDCP_SHA1_TEXT_24); 576 ret = intel_write_sha_text(i915, 0); 577 if (ret < 0) 578 return ret; 579 sha_idx += sizeof(sha_text); 580 } else { 581 drm_dbg_kms(&i915->drm, "Invalid number of leftovers %d\n", 582 sha_leftovers); 583 return -EINVAL; 584 } 585 586 intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 587 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ 588 while ((sha_idx % 64) < (64 - sizeof(sha_text))) { 589 ret = intel_write_sha_text(i915, 0); 590 if (ret < 0) 591 return ret; 592 sha_idx += sizeof(sha_text); 593 } 594 595 /* 596 * Last write gets the length of the concatenation in bits. That is: 597 * - 5 bytes per device 598 * - 10 bytes for BINFO/BSTATUS(2), M0(8) 599 */ 600 sha_text = (num_downstream * 5 + 10) * 8; 601 ret = intel_write_sha_text(i915, sha_text); 602 if (ret < 0) 603 return ret; 604 605 /* Tell the HW we're done with the hash and wait for it to ACK */ 606 intel_de_write(i915, HDCP_REP_CTL, 607 rep_ctl | HDCP_SHA1_COMPLETE_HASH); 608 if (intel_de_wait_for_set(i915, HDCP_REP_CTL, 609 HDCP_SHA1_COMPLETE, 1)) { 610 drm_err(&i915->drm, "Timed out waiting for SHA1 complete\n"); 611 return -ETIMEDOUT; 612 } 613 if (!(intel_de_read(i915, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { 614 drm_dbg_kms(&i915->drm, "SHA-1 mismatch, HDCP failed\n"); 615 return -ENXIO; 616 } 617 618 return 0; 619 } 620 621 /* Implements Part 2 of the HDCP authorization procedure */ 622 static 623 int intel_hdcp_auth_downstream(struct intel_connector *connector) 624 { 625 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 626 struct drm_i915_private *i915 = to_i915(connector->base.dev); 627 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 628 u8 bstatus[2], num_downstream, *ksv_fifo; 629 int ret, i, tries = 3; 630 631 ret = intel_hdcp_poll_ksv_fifo(dig_port, shim); 632 if (ret) { 633 drm_dbg_kms(&i915->drm, 634 "KSV list failed to become ready (%d)\n", ret); 635 return ret; 636 } 637 638 ret = shim->read_bstatus(dig_port, bstatus); 639 if (ret) 640 return ret; 641 642 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || 643 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { 644 drm_dbg_kms(&i915->drm, "Max Topology Limit Exceeded\n"); 645 return -EPERM; 646 } 647 648 /* 649 * When repeater reports 0 device count, HDCP1.4 spec allows disabling 650 * the HDCP encryption. That implies that repeater can't have its own 651 * display. As there is no consumption of encrypted content in the 652 * repeater with 0 downstream devices, we are failing the 653 * authentication. 654 */ 655 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); 656 if (num_downstream == 0) { 657 drm_dbg_kms(&i915->drm, 658 "Repeater with zero downstream devices\n"); 659 return -EINVAL; 660 } 661 662 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); 663 if (!ksv_fifo) { 664 drm_dbg_kms(&i915->drm, "Out of mem: ksv_fifo\n"); 665 return -ENOMEM; 666 } 667 668 ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo); 669 if (ret) 670 goto err; 671 672 if (drm_hdcp_check_ksvs_revoked(&i915->drm, ksv_fifo, 673 num_downstream) > 0) { 674 drm_err(&i915->drm, "Revoked Ksv(s) in ksv_fifo\n"); 675 ret = -EPERM; 676 goto err; 677 } 678 679 /* 680 * When V prime mismatches, DP Spec mandates re-read of 681 * V prime atleast twice. 682 */ 683 for (i = 0; i < tries; i++) { 684 ret = intel_hdcp_validate_v_prime(connector, shim, 685 ksv_fifo, num_downstream, 686 bstatus); 687 if (!ret) 688 break; 689 } 690 691 if (i == tries) { 692 drm_dbg_kms(&i915->drm, 693 "V Prime validation failed.(%d)\n", ret); 694 goto err; 695 } 696 697 drm_dbg_kms(&i915->drm, "HDCP is enabled (%d downstream devices)\n", 698 num_downstream); 699 ret = 0; 700 err: 701 kfree(ksv_fifo); 702 return ret; 703 } 704 705 /* Implements Part 1 of the HDCP authorization procedure */ 706 static int intel_hdcp_auth(struct intel_connector *connector) 707 { 708 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 709 struct drm_i915_private *i915 = to_i915(connector->base.dev); 710 struct intel_hdcp *hdcp = &connector->hdcp; 711 const struct intel_hdcp_shim *shim = hdcp->shim; 712 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 713 enum port port = dig_port->base.port; 714 unsigned long r0_prime_gen_start; 715 int ret, i, tries = 2; 716 union { 717 u32 reg[2]; 718 u8 shim[DRM_HDCP_AN_LEN]; 719 } an; 720 union { 721 u32 reg[2]; 722 u8 shim[DRM_HDCP_KSV_LEN]; 723 } bksv; 724 union { 725 u32 reg; 726 u8 shim[DRM_HDCP_RI_LEN]; 727 } ri; 728 bool repeater_present, hdcp_capable; 729 730 /* 731 * Detects whether the display is HDCP capable. Although we check for 732 * valid Bksv below, the HDCP over DP spec requires that we check 733 * whether the display supports HDCP before we write An. For HDMI 734 * displays, this is not necessary. 735 */ 736 if (shim->hdcp_capable) { 737 ret = shim->hdcp_capable(dig_port, &hdcp_capable); 738 if (ret) 739 return ret; 740 if (!hdcp_capable) { 741 drm_dbg_kms(&i915->drm, 742 "Panel is not HDCP capable\n"); 743 return -EINVAL; 744 } 745 } 746 747 /* Initialize An with 2 random values and acquire it */ 748 for (i = 0; i < 2; i++) 749 intel_de_write(i915, 750 HDCP_ANINIT(i915, cpu_transcoder, port), 751 get_random_u32()); 752 intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 753 HDCP_CONF_CAPTURE_AN); 754 755 /* Wait for An to be acquired */ 756 if (intel_de_wait_for_set(i915, 757 HDCP_STATUS(i915, cpu_transcoder, port), 758 HDCP_STATUS_AN_READY, 1)) { 759 drm_err(&i915->drm, "Timed out waiting for An\n"); 760 return -ETIMEDOUT; 761 } 762 763 an.reg[0] = intel_de_read(i915, 764 HDCP_ANLO(i915, cpu_transcoder, port)); 765 an.reg[1] = intel_de_read(i915, 766 HDCP_ANHI(i915, cpu_transcoder, port)); 767 ret = shim->write_an_aksv(dig_port, an.shim); 768 if (ret) 769 return ret; 770 771 r0_prime_gen_start = jiffies; 772 773 memset(&bksv, 0, sizeof(bksv)); 774 775 ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim); 776 if (ret < 0) 777 return ret; 778 779 if (drm_hdcp_check_ksvs_revoked(&i915->drm, bksv.shim, 1) > 0) { 780 drm_err(&i915->drm, "BKSV is revoked\n"); 781 return -EPERM; 782 } 783 784 intel_de_write(i915, HDCP_BKSVLO(i915, cpu_transcoder, port), 785 bksv.reg[0]); 786 intel_de_write(i915, HDCP_BKSVHI(i915, cpu_transcoder, port), 787 bksv.reg[1]); 788 789 ret = shim->repeater_present(dig_port, &repeater_present); 790 if (ret) 791 return ret; 792 if (repeater_present) 793 intel_de_write(i915, HDCP_REP_CTL, 794 intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port)); 795 796 ret = shim->toggle_signalling(dig_port, cpu_transcoder, true); 797 if (ret) 798 return ret; 799 800 intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 801 HDCP_CONF_AUTH_AND_ENC); 802 803 /* Wait for R0 ready */ 804 if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & 805 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { 806 drm_err(&i915->drm, "Timed out waiting for R0 ready\n"); 807 return -ETIMEDOUT; 808 } 809 810 /* 811 * Wait for R0' to become available. The spec says 100ms from Aksv, but 812 * some monitors can take longer than this. We'll set the timeout at 813 * 300ms just to be sure. 814 * 815 * On DP, there's an R0_READY bit available but no such bit 816 * exists on HDMI. Since the upper-bound is the same, we'll just do 817 * the stupid thing instead of polling on one and not the other. 818 */ 819 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300); 820 821 tries = 3; 822 823 /* 824 * DP HDCP Spec mandates the two more reattempt to read R0, incase 825 * of R0 mismatch. 826 */ 827 for (i = 0; i < tries; i++) { 828 ri.reg = 0; 829 ret = shim->read_ri_prime(dig_port, ri.shim); 830 if (ret) 831 return ret; 832 intel_de_write(i915, 833 HDCP_RPRIME(i915, cpu_transcoder, port), 834 ri.reg); 835 836 /* Wait for Ri prime match */ 837 if (!wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) & 838 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) 839 break; 840 } 841 842 if (i == tries) { 843 drm_dbg_kms(&i915->drm, 844 "Timed out waiting for Ri prime match (%x)\n", 845 intel_de_read(i915, 846 HDCP_STATUS(i915, cpu_transcoder, port))); 847 return -ETIMEDOUT; 848 } 849 850 /* Wait for encryption confirmation */ 851 if (intel_de_wait_for_set(i915, 852 HDCP_STATUS(i915, cpu_transcoder, port), 853 HDCP_STATUS_ENC, 854 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 855 drm_err(&i915->drm, "Timed out waiting for encryption\n"); 856 return -ETIMEDOUT; 857 } 858 859 /* DP MST Auth Part 1 Step 2.a and Step 2.b */ 860 if (shim->stream_encryption) { 861 ret = shim->stream_encryption(connector, true); 862 if (ret) { 863 drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n", 864 connector->base.name, connector->base.base.id); 865 return ret; 866 } 867 drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n", 868 transcoder_name(hdcp->stream_transcoder)); 869 } 870 871 if (repeater_present) 872 return intel_hdcp_auth_downstream(connector); 873 874 drm_dbg_kms(&i915->drm, "HDCP is enabled (no repeater present)\n"); 875 return 0; 876 } 877 878 static int _intel_hdcp_disable(struct intel_connector *connector) 879 { 880 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 881 struct drm_i915_private *i915 = to_i915(connector->base.dev); 882 struct intel_hdcp *hdcp = &connector->hdcp; 883 enum port port = dig_port->base.port; 884 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 885 u32 repeater_ctl; 886 int ret; 887 888 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being disabled...\n", 889 connector->base.name, connector->base.base.id); 890 891 if (hdcp->shim->stream_encryption) { 892 ret = hdcp->shim->stream_encryption(connector, false); 893 if (ret) { 894 drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n", 895 connector->base.name, connector->base.base.id); 896 return ret; 897 } 898 drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n", 899 transcoder_name(hdcp->stream_transcoder)); 900 /* 901 * If there are other connectors on this port using HDCP, 902 * don't disable it until it disabled HDCP encryption for 903 * all connectors in MST topology. 904 */ 905 if (dig_port->num_hdcp_streams > 0) 906 return 0; 907 } 908 909 hdcp->hdcp_encrypted = false; 910 intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 0); 911 if (intel_de_wait_for_clear(i915, 912 HDCP_STATUS(i915, cpu_transcoder, port), 913 ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 914 drm_err(&i915->drm, 915 "Failed to disable HDCP, timeout clearing status\n"); 916 return -ETIMEDOUT; 917 } 918 919 repeater_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, 920 port); 921 intel_de_rmw(i915, HDCP_REP_CTL, repeater_ctl, 0); 922 923 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); 924 if (ret) { 925 drm_err(&i915->drm, "Failed to disable HDCP signalling\n"); 926 return ret; 927 } 928 929 drm_dbg_kms(&i915->drm, "HDCP is disabled\n"); 930 return 0; 931 } 932 933 static int _intel_hdcp_enable(struct intel_connector *connector) 934 { 935 struct drm_i915_private *i915 = to_i915(connector->base.dev); 936 struct intel_hdcp *hdcp = &connector->hdcp; 937 int i, ret, tries = 3; 938 939 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being enabled...\n", 940 connector->base.name, connector->base.base.id); 941 942 if (!hdcp_key_loadable(i915)) { 943 drm_err(&i915->drm, "HDCP key Load is not possible\n"); 944 return -ENXIO; 945 } 946 947 for (i = 0; i < KEY_LOAD_TRIES; i++) { 948 ret = intel_hdcp_load_keys(i915); 949 if (!ret) 950 break; 951 intel_hdcp_clear_keys(i915); 952 } 953 if (ret) { 954 drm_err(&i915->drm, "Could not load HDCP keys, (%d)\n", 955 ret); 956 return ret; 957 } 958 959 /* Incase of authentication failures, HDCP spec expects reauth. */ 960 for (i = 0; i < tries; i++) { 961 ret = intel_hdcp_auth(connector); 962 if (!ret) { 963 hdcp->hdcp_encrypted = true; 964 return 0; 965 } 966 967 drm_dbg_kms(&i915->drm, "HDCP Auth failure (%d)\n", ret); 968 969 /* Ensuring HDCP encryption and signalling are stopped. */ 970 _intel_hdcp_disable(connector); 971 } 972 973 drm_dbg_kms(&i915->drm, 974 "HDCP authentication failed (%d tries/%d)\n", tries, ret); 975 return ret; 976 } 977 978 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) 979 { 980 return container_of(hdcp, struct intel_connector, hdcp); 981 } 982 983 static void intel_hdcp_update_value(struct intel_connector *connector, 984 u64 value, bool update_property) 985 { 986 struct drm_device *dev = connector->base.dev; 987 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 988 struct intel_hdcp *hdcp = &connector->hdcp; 989 struct drm_i915_private *i915 = to_i915(connector->base.dev); 990 991 drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex)); 992 993 if (hdcp->value == value) 994 return; 995 996 drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex)); 997 998 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 999 if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0)) 1000 dig_port->num_hdcp_streams--; 1001 } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1002 dig_port->num_hdcp_streams++; 1003 } 1004 1005 hdcp->value = value; 1006 if (update_property) { 1007 drm_connector_get(&connector->base); 1008 queue_work(i915->unordered_wq, &hdcp->prop_work); 1009 } 1010 } 1011 1012 /* Implements Part 3 of the HDCP authorization procedure */ 1013 static int intel_hdcp_check_link(struct intel_connector *connector) 1014 { 1015 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1016 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1017 struct intel_hdcp *hdcp = &connector->hdcp; 1018 enum port port = dig_port->base.port; 1019 enum transcoder cpu_transcoder; 1020 int ret = 0; 1021 1022 mutex_lock(&hdcp->mutex); 1023 mutex_lock(&dig_port->hdcp_mutex); 1024 1025 cpu_transcoder = hdcp->cpu_transcoder; 1026 1027 /* Check_link valid only when HDCP1.4 is enabled */ 1028 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 1029 !hdcp->hdcp_encrypted) { 1030 ret = -EINVAL; 1031 goto out; 1032 } 1033 1034 if (drm_WARN_ON(&i915->drm, 1035 !intel_hdcp_in_use(i915, cpu_transcoder, port))) { 1036 drm_err(&i915->drm, 1037 "%s:%d HDCP link stopped encryption,%x\n", 1038 connector->base.name, connector->base.base.id, 1039 intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port))); 1040 ret = -ENXIO; 1041 intel_hdcp_update_value(connector, 1042 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1043 true); 1044 goto out; 1045 } 1046 1047 if (hdcp->shim->check_link(dig_port, connector)) { 1048 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 1049 intel_hdcp_update_value(connector, 1050 DRM_MODE_CONTENT_PROTECTION_ENABLED, true); 1051 } 1052 goto out; 1053 } 1054 1055 drm_dbg_kms(&i915->drm, 1056 "[%s:%d] HDCP link failed, retrying authentication\n", 1057 connector->base.name, connector->base.base.id); 1058 1059 ret = _intel_hdcp_disable(connector); 1060 if (ret) { 1061 drm_err(&i915->drm, "Failed to disable hdcp (%d)\n", ret); 1062 intel_hdcp_update_value(connector, 1063 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1064 true); 1065 goto out; 1066 } 1067 1068 ret = _intel_hdcp_enable(connector); 1069 if (ret) { 1070 drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret); 1071 intel_hdcp_update_value(connector, 1072 DRM_MODE_CONTENT_PROTECTION_DESIRED, 1073 true); 1074 goto out; 1075 } 1076 1077 out: 1078 mutex_unlock(&dig_port->hdcp_mutex); 1079 mutex_unlock(&hdcp->mutex); 1080 return ret; 1081 } 1082 1083 static void intel_hdcp_prop_work(struct work_struct *work) 1084 { 1085 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, 1086 prop_work); 1087 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 1088 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1089 1090 drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL); 1091 mutex_lock(&hdcp->mutex); 1092 1093 /* 1094 * This worker is only used to flip between ENABLED/DESIRED. Either of 1095 * those to UNDESIRED is handled by core. If value == UNDESIRED, 1096 * we're running just after hdcp has been disabled, so just exit 1097 */ 1098 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 1099 drm_hdcp_update_content_protection(&connector->base, 1100 hdcp->value); 1101 1102 mutex_unlock(&hdcp->mutex); 1103 drm_modeset_unlock(&i915->drm.mode_config.connection_mutex); 1104 1105 drm_connector_put(&connector->base); 1106 } 1107 1108 bool is_hdcp_supported(struct drm_i915_private *i915, enum port port) 1109 { 1110 return DISPLAY_RUNTIME_INFO(i915)->has_hdcp && 1111 (DISPLAY_VER(i915) >= 12 || port < PORT_E); 1112 } 1113 1114 static int 1115 hdcp2_prepare_ake_init(struct intel_connector *connector, 1116 struct hdcp2_ake_init *ake_data) 1117 { 1118 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1119 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1120 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1121 struct i915_hdcp_arbiter *arbiter; 1122 int ret; 1123 1124 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1125 arbiter = i915->display.hdcp.arbiter; 1126 1127 if (!arbiter || !arbiter->ops) { 1128 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1129 return -EINVAL; 1130 } 1131 1132 ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data); 1133 if (ret) 1134 drm_dbg_kms(&i915->drm, "Prepare_ake_init failed. %d\n", 1135 ret); 1136 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1137 1138 return ret; 1139 } 1140 1141 static int 1142 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, 1143 struct hdcp2_ake_send_cert *rx_cert, 1144 bool *paired, 1145 struct hdcp2_ake_no_stored_km *ek_pub_km, 1146 size_t *msg_sz) 1147 { 1148 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1149 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1150 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1151 struct i915_hdcp_arbiter *arbiter; 1152 int ret; 1153 1154 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1155 arbiter = i915->display.hdcp.arbiter; 1156 1157 if (!arbiter || !arbiter->ops) { 1158 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1159 return -EINVAL; 1160 } 1161 1162 ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data, 1163 rx_cert, paired, 1164 ek_pub_km, msg_sz); 1165 if (ret < 0) 1166 drm_dbg_kms(&i915->drm, "Verify rx_cert failed. %d\n", 1167 ret); 1168 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1169 1170 return ret; 1171 } 1172 1173 static int hdcp2_verify_hprime(struct intel_connector *connector, 1174 struct hdcp2_ake_send_hprime *rx_hprime) 1175 { 1176 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1177 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1178 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1179 struct i915_hdcp_arbiter *arbiter; 1180 int ret; 1181 1182 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1183 arbiter = i915->display.hdcp.arbiter; 1184 1185 if (!arbiter || !arbiter->ops) { 1186 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1187 return -EINVAL; 1188 } 1189 1190 ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime); 1191 if (ret < 0) 1192 drm_dbg_kms(&i915->drm, "Verify hprime failed. %d\n", ret); 1193 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1194 1195 return ret; 1196 } 1197 1198 static int 1199 hdcp2_store_pairing_info(struct intel_connector *connector, 1200 struct hdcp2_ake_send_pairing_info *pairing_info) 1201 { 1202 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1203 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1204 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1205 struct i915_hdcp_arbiter *arbiter; 1206 int ret; 1207 1208 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1209 arbiter = i915->display.hdcp.arbiter; 1210 1211 if (!arbiter || !arbiter->ops) { 1212 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1213 return -EINVAL; 1214 } 1215 1216 ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info); 1217 if (ret < 0) 1218 drm_dbg_kms(&i915->drm, "Store pairing info failed. %d\n", 1219 ret); 1220 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1221 1222 return ret; 1223 } 1224 1225 static int 1226 hdcp2_prepare_lc_init(struct intel_connector *connector, 1227 struct hdcp2_lc_init *lc_init) 1228 { 1229 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1230 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1231 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1232 struct i915_hdcp_arbiter *arbiter; 1233 int ret; 1234 1235 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1236 arbiter = i915->display.hdcp.arbiter; 1237 1238 if (!arbiter || !arbiter->ops) { 1239 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1240 return -EINVAL; 1241 } 1242 1243 ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init); 1244 if (ret < 0) 1245 drm_dbg_kms(&i915->drm, "Prepare lc_init failed. %d\n", 1246 ret); 1247 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1248 1249 return ret; 1250 } 1251 1252 static int 1253 hdcp2_verify_lprime(struct intel_connector *connector, 1254 struct hdcp2_lc_send_lprime *rx_lprime) 1255 { 1256 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1257 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1258 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1259 struct i915_hdcp_arbiter *arbiter; 1260 int ret; 1261 1262 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1263 arbiter = i915->display.hdcp.arbiter; 1264 1265 if (!arbiter || !arbiter->ops) { 1266 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1267 return -EINVAL; 1268 } 1269 1270 ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime); 1271 if (ret < 0) 1272 drm_dbg_kms(&i915->drm, "Verify L_Prime failed. %d\n", 1273 ret); 1274 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1275 1276 return ret; 1277 } 1278 1279 static int hdcp2_prepare_skey(struct intel_connector *connector, 1280 struct hdcp2_ske_send_eks *ske_data) 1281 { 1282 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1283 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1284 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1285 struct i915_hdcp_arbiter *arbiter; 1286 int ret; 1287 1288 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1289 arbiter = i915->display.hdcp.arbiter; 1290 1291 if (!arbiter || !arbiter->ops) { 1292 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1293 return -EINVAL; 1294 } 1295 1296 ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data); 1297 if (ret < 0) 1298 drm_dbg_kms(&i915->drm, "Get session key failed. %d\n", 1299 ret); 1300 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1301 1302 return ret; 1303 } 1304 1305 static int 1306 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, 1307 struct hdcp2_rep_send_receiverid_list 1308 *rep_topology, 1309 struct hdcp2_rep_send_ack *rep_send_ack) 1310 { 1311 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1312 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1313 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1314 struct i915_hdcp_arbiter *arbiter; 1315 int ret; 1316 1317 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1318 arbiter = i915->display.hdcp.arbiter; 1319 1320 if (!arbiter || !arbiter->ops) { 1321 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1322 return -EINVAL; 1323 } 1324 1325 ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev, 1326 data, 1327 rep_topology, 1328 rep_send_ack); 1329 if (ret < 0) 1330 drm_dbg_kms(&i915->drm, 1331 "Verify rep topology failed. %d\n", ret); 1332 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1333 1334 return ret; 1335 } 1336 1337 static int 1338 hdcp2_verify_mprime(struct intel_connector *connector, 1339 struct hdcp2_rep_stream_ready *stream_ready) 1340 { 1341 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1342 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1343 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1344 struct i915_hdcp_arbiter *arbiter; 1345 int ret; 1346 1347 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1348 arbiter = i915->display.hdcp.arbiter; 1349 1350 if (!arbiter || !arbiter->ops) { 1351 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1352 return -EINVAL; 1353 } 1354 1355 ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready); 1356 if (ret < 0) 1357 drm_dbg_kms(&i915->drm, "Verify mprime failed. %d\n", ret); 1358 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1359 1360 return ret; 1361 } 1362 1363 static int hdcp2_authenticate_port(struct intel_connector *connector) 1364 { 1365 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1366 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1367 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1368 struct i915_hdcp_arbiter *arbiter; 1369 int ret; 1370 1371 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1372 arbiter = i915->display.hdcp.arbiter; 1373 1374 if (!arbiter || !arbiter->ops) { 1375 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1376 return -EINVAL; 1377 } 1378 1379 ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data); 1380 if (ret < 0) 1381 drm_dbg_kms(&i915->drm, "Enable hdcp auth failed. %d\n", 1382 ret); 1383 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1384 1385 return ret; 1386 } 1387 1388 static int hdcp2_close_session(struct intel_connector *connector) 1389 { 1390 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1391 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1392 struct i915_hdcp_arbiter *arbiter; 1393 int ret; 1394 1395 mutex_lock(&i915->display.hdcp.hdcp_mutex); 1396 arbiter = i915->display.hdcp.arbiter; 1397 1398 if (!arbiter || !arbiter->ops) { 1399 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1400 return -EINVAL; 1401 } 1402 1403 ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev, 1404 &dig_port->hdcp_port_data); 1405 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 1406 1407 return ret; 1408 } 1409 1410 static int hdcp2_deauthenticate_port(struct intel_connector *connector) 1411 { 1412 return hdcp2_close_session(connector); 1413 } 1414 1415 /* Authentication flow starts from here */ 1416 static int hdcp2_authentication_key_exchange(struct intel_connector *connector) 1417 { 1418 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1419 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1420 struct intel_hdcp *hdcp = &connector->hdcp; 1421 union { 1422 struct hdcp2_ake_init ake_init; 1423 struct hdcp2_ake_send_cert send_cert; 1424 struct hdcp2_ake_no_stored_km no_stored_km; 1425 struct hdcp2_ake_send_hprime send_hprime; 1426 struct hdcp2_ake_send_pairing_info pairing_info; 1427 } msgs; 1428 const struct intel_hdcp_shim *shim = hdcp->shim; 1429 size_t size; 1430 int ret; 1431 1432 /* Init for seq_num */ 1433 hdcp->seq_num_v = 0; 1434 hdcp->seq_num_m = 0; 1435 1436 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init); 1437 if (ret < 0) 1438 return ret; 1439 1440 ret = shim->write_2_2_msg(dig_port, &msgs.ake_init, 1441 sizeof(msgs.ake_init)); 1442 if (ret < 0) 1443 return ret; 1444 1445 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT, 1446 &msgs.send_cert, sizeof(msgs.send_cert)); 1447 if (ret < 0) 1448 return ret; 1449 1450 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { 1451 drm_dbg_kms(&i915->drm, "cert.rx_caps dont claim HDCP2.2\n"); 1452 return -EINVAL; 1453 } 1454 1455 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); 1456 1457 if (drm_hdcp_check_ksvs_revoked(&i915->drm, 1458 msgs.send_cert.cert_rx.receiver_id, 1459 1) > 0) { 1460 drm_err(&i915->drm, "Receiver ID is revoked\n"); 1461 return -EPERM; 1462 } 1463 1464 /* 1465 * Here msgs.no_stored_km will hold msgs corresponding to the km 1466 * stored also. 1467 */ 1468 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert, 1469 &hdcp->is_paired, 1470 &msgs.no_stored_km, &size); 1471 if (ret < 0) 1472 return ret; 1473 1474 ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size); 1475 if (ret < 0) 1476 return ret; 1477 1478 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME, 1479 &msgs.send_hprime, sizeof(msgs.send_hprime)); 1480 if (ret < 0) 1481 return ret; 1482 1483 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime); 1484 if (ret < 0) 1485 return ret; 1486 1487 if (!hdcp->is_paired) { 1488 /* Pairing is required */ 1489 ret = shim->read_2_2_msg(dig_port, 1490 HDCP_2_2_AKE_SEND_PAIRING_INFO, 1491 &msgs.pairing_info, 1492 sizeof(msgs.pairing_info)); 1493 if (ret < 0) 1494 return ret; 1495 1496 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info); 1497 if (ret < 0) 1498 return ret; 1499 hdcp->is_paired = true; 1500 } 1501 1502 return 0; 1503 } 1504 1505 static int hdcp2_locality_check(struct intel_connector *connector) 1506 { 1507 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1508 struct intel_hdcp *hdcp = &connector->hdcp; 1509 union { 1510 struct hdcp2_lc_init lc_init; 1511 struct hdcp2_lc_send_lprime send_lprime; 1512 } msgs; 1513 const struct intel_hdcp_shim *shim = hdcp->shim; 1514 int tries = HDCP2_LC_RETRY_CNT, ret, i; 1515 1516 for (i = 0; i < tries; i++) { 1517 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init); 1518 if (ret < 0) 1519 continue; 1520 1521 ret = shim->write_2_2_msg(dig_port, &msgs.lc_init, 1522 sizeof(msgs.lc_init)); 1523 if (ret < 0) 1524 continue; 1525 1526 ret = shim->read_2_2_msg(dig_port, 1527 HDCP_2_2_LC_SEND_LPRIME, 1528 &msgs.send_lprime, 1529 sizeof(msgs.send_lprime)); 1530 if (ret < 0) 1531 continue; 1532 1533 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime); 1534 if (!ret) 1535 break; 1536 } 1537 1538 return ret; 1539 } 1540 1541 static int hdcp2_session_key_exchange(struct intel_connector *connector) 1542 { 1543 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1544 struct intel_hdcp *hdcp = &connector->hdcp; 1545 struct hdcp2_ske_send_eks send_eks; 1546 int ret; 1547 1548 ret = hdcp2_prepare_skey(connector, &send_eks); 1549 if (ret < 0) 1550 return ret; 1551 1552 ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks, 1553 sizeof(send_eks)); 1554 if (ret < 0) 1555 return ret; 1556 1557 return 0; 1558 } 1559 1560 static 1561 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1562 { 1563 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1564 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1565 struct intel_hdcp *hdcp = &connector->hdcp; 1566 union { 1567 struct hdcp2_rep_stream_manage stream_manage; 1568 struct hdcp2_rep_stream_ready stream_ready; 1569 } msgs; 1570 const struct intel_hdcp_shim *shim = hdcp->shim; 1571 int ret, streams_size_delta, i; 1572 1573 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) 1574 return -ERANGE; 1575 1576 /* Prepare RepeaterAuth_Stream_Manage msg */ 1577 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE; 1578 drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m); 1579 1580 msgs.stream_manage.k = cpu_to_be16(data->k); 1581 1582 for (i = 0; i < data->k; i++) { 1583 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id; 1584 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type; 1585 } 1586 1587 streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) * 1588 sizeof(struct hdcp2_streamid_type); 1589 /* Send it to Repeater */ 1590 ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage, 1591 sizeof(msgs.stream_manage) - streams_size_delta); 1592 if (ret < 0) 1593 goto out; 1594 1595 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY, 1596 &msgs.stream_ready, sizeof(msgs.stream_ready)); 1597 if (ret < 0) 1598 goto out; 1599 1600 data->seq_num_m = hdcp->seq_num_m; 1601 1602 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready); 1603 1604 out: 1605 hdcp->seq_num_m++; 1606 1607 return ret; 1608 } 1609 1610 static 1611 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) 1612 { 1613 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1614 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1615 struct intel_hdcp *hdcp = &connector->hdcp; 1616 union { 1617 struct hdcp2_rep_send_receiverid_list recvid_list; 1618 struct hdcp2_rep_send_ack rep_ack; 1619 } msgs; 1620 const struct intel_hdcp_shim *shim = hdcp->shim; 1621 u32 seq_num_v, device_cnt; 1622 u8 *rx_info; 1623 int ret; 1624 1625 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST, 1626 &msgs.recvid_list, sizeof(msgs.recvid_list)); 1627 if (ret < 0) 1628 return ret; 1629 1630 rx_info = msgs.recvid_list.rx_info; 1631 1632 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || 1633 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { 1634 drm_dbg_kms(&i915->drm, "Topology Max Size Exceeded\n"); 1635 return -EINVAL; 1636 } 1637 1638 /* 1639 * MST topology is not Type 1 capable if it contains a downstream 1640 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant. 1641 */ 1642 dig_port->hdcp_mst_type1_capable = 1643 !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) && 1644 !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]); 1645 1646 /* Converting and Storing the seq_num_v to local variable as DWORD */ 1647 seq_num_v = 1648 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); 1649 1650 if (!hdcp->hdcp2_encrypted && seq_num_v) { 1651 drm_dbg_kms(&i915->drm, 1652 "Non zero Seq_num_v at first RecvId_List msg\n"); 1653 return -EINVAL; 1654 } 1655 1656 if (seq_num_v < hdcp->seq_num_v) { 1657 /* Roll over of the seq_num_v from repeater. Reauthenticate. */ 1658 drm_dbg_kms(&i915->drm, "Seq_num_v roll over.\n"); 1659 return -EINVAL; 1660 } 1661 1662 device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 1663 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 1664 if (drm_hdcp_check_ksvs_revoked(&i915->drm, 1665 msgs.recvid_list.receiver_ids, 1666 device_cnt) > 0) { 1667 drm_err(&i915->drm, "Revoked receiver ID(s) is in list\n"); 1668 return -EPERM; 1669 } 1670 1671 ret = hdcp2_verify_rep_topology_prepare_ack(connector, 1672 &msgs.recvid_list, 1673 &msgs.rep_ack); 1674 if (ret < 0) 1675 return ret; 1676 1677 hdcp->seq_num_v = seq_num_v; 1678 ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack, 1679 sizeof(msgs.rep_ack)); 1680 if (ret < 0) 1681 return ret; 1682 1683 return 0; 1684 } 1685 1686 static int hdcp2_authenticate_sink(struct intel_connector *connector) 1687 { 1688 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1689 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1690 struct intel_hdcp *hdcp = &connector->hdcp; 1691 const struct intel_hdcp_shim *shim = hdcp->shim; 1692 int ret; 1693 1694 ret = hdcp2_authentication_key_exchange(connector); 1695 if (ret < 0) { 1696 drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret); 1697 return ret; 1698 } 1699 1700 ret = hdcp2_locality_check(connector); 1701 if (ret < 0) { 1702 drm_dbg_kms(&i915->drm, 1703 "Locality Check failed. Err : %d\n", ret); 1704 return ret; 1705 } 1706 1707 ret = hdcp2_session_key_exchange(connector); 1708 if (ret < 0) { 1709 drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret); 1710 return ret; 1711 } 1712 1713 if (shim->config_stream_type) { 1714 ret = shim->config_stream_type(dig_port, 1715 hdcp->is_repeater, 1716 hdcp->content_type); 1717 if (ret < 0) 1718 return ret; 1719 } 1720 1721 if (hdcp->is_repeater) { 1722 ret = hdcp2_authenticate_repeater_topology(connector); 1723 if (ret < 0) { 1724 drm_dbg_kms(&i915->drm, 1725 "Repeater Auth Failed. Err: %d\n", ret); 1726 return ret; 1727 } 1728 } 1729 1730 return ret; 1731 } 1732 1733 static int hdcp2_enable_stream_encryption(struct intel_connector *connector) 1734 { 1735 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1736 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1737 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1738 struct intel_hdcp *hdcp = &connector->hdcp; 1739 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1740 enum port port = dig_port->base.port; 1741 int ret = 0; 1742 1743 if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & 1744 LINK_ENCRYPTION_STATUS)) { 1745 drm_err(&i915->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n", 1746 connector->base.name, connector->base.base.id); 1747 ret = -EPERM; 1748 goto link_recover; 1749 } 1750 1751 if (hdcp->shim->stream_2_2_encryption) { 1752 ret = hdcp->shim->stream_2_2_encryption(connector, true); 1753 if (ret) { 1754 drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n", 1755 connector->base.name, connector->base.base.id); 1756 return ret; 1757 } 1758 drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n", 1759 transcoder_name(hdcp->stream_transcoder)); 1760 } 1761 1762 return 0; 1763 1764 link_recover: 1765 if (hdcp2_deauthenticate_port(connector) < 0) 1766 drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); 1767 1768 dig_port->hdcp_auth_status = false; 1769 data->k = 0; 1770 1771 return ret; 1772 } 1773 1774 static int hdcp2_enable_encryption(struct intel_connector *connector) 1775 { 1776 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1777 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1778 struct intel_hdcp *hdcp = &connector->hdcp; 1779 enum port port = dig_port->base.port; 1780 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1781 int ret; 1782 1783 drm_WARN_ON(&i915->drm, 1784 intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & 1785 LINK_ENCRYPTION_STATUS); 1786 if (hdcp->shim->toggle_signalling) { 1787 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, 1788 true); 1789 if (ret) { 1790 drm_err(&i915->drm, 1791 "Failed to enable HDCP signalling. %d\n", 1792 ret); 1793 return ret; 1794 } 1795 } 1796 1797 if (intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & 1798 LINK_AUTH_STATUS) 1799 /* Link is Authenticated. Now set for Encryption */ 1800 intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port), 1801 0, CTL_LINK_ENCRYPTION_REQ); 1802 1803 ret = intel_de_wait_for_set(i915, 1804 HDCP2_STATUS(i915, cpu_transcoder, 1805 port), 1806 LINK_ENCRYPTION_STATUS, 1807 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1808 dig_port->hdcp_auth_status = true; 1809 1810 return ret; 1811 } 1812 1813 static int hdcp2_disable_encryption(struct intel_connector *connector) 1814 { 1815 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1816 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1817 struct intel_hdcp *hdcp = &connector->hdcp; 1818 enum port port = dig_port->base.port; 1819 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1820 int ret; 1821 1822 drm_WARN_ON(&i915->drm, !(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) & 1823 LINK_ENCRYPTION_STATUS)); 1824 1825 intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port), 1826 CTL_LINK_ENCRYPTION_REQ, 0); 1827 1828 ret = intel_de_wait_for_clear(i915, 1829 HDCP2_STATUS(i915, cpu_transcoder, 1830 port), 1831 LINK_ENCRYPTION_STATUS, 1832 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1833 if (ret == -ETIMEDOUT) 1834 drm_dbg_kms(&i915->drm, "Disable Encryption Timedout"); 1835 1836 if (hdcp->shim->toggle_signalling) { 1837 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, 1838 false); 1839 if (ret) { 1840 drm_err(&i915->drm, 1841 "Failed to disable HDCP signalling. %d\n", 1842 ret); 1843 return ret; 1844 } 1845 } 1846 1847 return ret; 1848 } 1849 1850 static int 1851 hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1852 { 1853 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1854 int i, tries = 3, ret; 1855 1856 if (!connector->hdcp.is_repeater) 1857 return 0; 1858 1859 for (i = 0; i < tries; i++) { 1860 ret = _hdcp2_propagate_stream_management_info(connector); 1861 if (!ret) 1862 break; 1863 1864 /* Lets restart the auth incase of seq_num_m roll over */ 1865 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { 1866 drm_dbg_kms(&i915->drm, 1867 "seq_num_m roll over.(%d)\n", ret); 1868 break; 1869 } 1870 1871 drm_dbg_kms(&i915->drm, 1872 "HDCP2 stream management %d of %d Failed.(%d)\n", 1873 i + 1, tries, ret); 1874 } 1875 1876 return ret; 1877 } 1878 1879 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) 1880 { 1881 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1882 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1883 int ret = 0, i, tries = 3; 1884 1885 for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) { 1886 ret = hdcp2_authenticate_sink(connector); 1887 if (!ret) { 1888 intel_hdcp_prepare_streams(connector); 1889 1890 ret = hdcp2_propagate_stream_management_info(connector); 1891 if (ret) { 1892 drm_dbg_kms(&i915->drm, 1893 "Stream management failed.(%d)\n", 1894 ret); 1895 break; 1896 } 1897 1898 ret = hdcp2_authenticate_port(connector); 1899 if (!ret) 1900 break; 1901 drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n", 1902 ret); 1903 } 1904 1905 /* Clearing the mei hdcp session */ 1906 drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", 1907 i + 1, tries, ret); 1908 if (hdcp2_deauthenticate_port(connector) < 0) 1909 drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); 1910 } 1911 1912 if (!ret && !dig_port->hdcp_auth_status) { 1913 /* 1914 * Ensuring the required 200mSec min time interval between 1915 * Session Key Exchange and encryption. 1916 */ 1917 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); 1918 ret = hdcp2_enable_encryption(connector); 1919 if (ret < 0) { 1920 drm_dbg_kms(&i915->drm, 1921 "Encryption Enable Failed.(%d)\n", ret); 1922 if (hdcp2_deauthenticate_port(connector) < 0) 1923 drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); 1924 } 1925 } 1926 1927 if (!ret) 1928 ret = hdcp2_enable_stream_encryption(connector); 1929 1930 return ret; 1931 } 1932 1933 static int _intel_hdcp2_enable(struct intel_connector *connector) 1934 { 1935 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1936 struct intel_hdcp *hdcp = &connector->hdcp; 1937 int ret; 1938 1939 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n", 1940 connector->base.name, connector->base.base.id, 1941 hdcp->content_type); 1942 1943 ret = hdcp2_authenticate_and_encrypt(connector); 1944 if (ret) { 1945 drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", 1946 hdcp->content_type, ret); 1947 return ret; 1948 } 1949 1950 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n", 1951 connector->base.name, connector->base.base.id, 1952 hdcp->content_type); 1953 1954 hdcp->hdcp2_encrypted = true; 1955 return 0; 1956 } 1957 1958 static int 1959 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery) 1960 { 1961 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1962 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1963 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1964 struct intel_hdcp *hdcp = &connector->hdcp; 1965 int ret; 1966 1967 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n", 1968 connector->base.name, connector->base.base.id); 1969 1970 if (hdcp->shim->stream_2_2_encryption) { 1971 ret = hdcp->shim->stream_2_2_encryption(connector, false); 1972 if (ret) { 1973 drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n", 1974 connector->base.name, connector->base.base.id); 1975 return ret; 1976 } 1977 drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n", 1978 transcoder_name(hdcp->stream_transcoder)); 1979 1980 if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery) 1981 return 0; 1982 } 1983 1984 ret = hdcp2_disable_encryption(connector); 1985 1986 if (hdcp2_deauthenticate_port(connector) < 0) 1987 drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); 1988 1989 connector->hdcp.hdcp2_encrypted = false; 1990 dig_port->hdcp_auth_status = false; 1991 data->k = 0; 1992 1993 return ret; 1994 } 1995 1996 /* Implements the Link Integrity Check for HDCP2.2 */ 1997 static int intel_hdcp2_check_link(struct intel_connector *connector) 1998 { 1999 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2000 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2001 struct intel_hdcp *hdcp = &connector->hdcp; 2002 enum port port = dig_port->base.port; 2003 enum transcoder cpu_transcoder; 2004 int ret = 0; 2005 2006 mutex_lock(&hdcp->mutex); 2007 mutex_lock(&dig_port->hdcp_mutex); 2008 cpu_transcoder = hdcp->cpu_transcoder; 2009 2010 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ 2011 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 2012 !hdcp->hdcp2_encrypted) { 2013 ret = -EINVAL; 2014 goto out; 2015 } 2016 2017 if (drm_WARN_ON(&i915->drm, 2018 !intel_hdcp2_in_use(i915, cpu_transcoder, port))) { 2019 drm_err(&i915->drm, 2020 "HDCP2.2 link stopped the encryption, %x\n", 2021 intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port))); 2022 ret = -ENXIO; 2023 _intel_hdcp2_disable(connector, true); 2024 intel_hdcp_update_value(connector, 2025 DRM_MODE_CONTENT_PROTECTION_DESIRED, 2026 true); 2027 goto out; 2028 } 2029 2030 ret = hdcp->shim->check_2_2_link(dig_port, connector); 2031 if (ret == HDCP_LINK_PROTECTED) { 2032 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 2033 intel_hdcp_update_value(connector, 2034 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2035 true); 2036 } 2037 goto out; 2038 } 2039 2040 if (ret == HDCP_TOPOLOGY_CHANGE) { 2041 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 2042 goto out; 2043 2044 drm_dbg_kms(&i915->drm, 2045 "HDCP2.2 Downstream topology change\n"); 2046 ret = hdcp2_authenticate_repeater_topology(connector); 2047 if (!ret) { 2048 intel_hdcp_update_value(connector, 2049 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2050 true); 2051 goto out; 2052 } 2053 drm_dbg_kms(&i915->drm, 2054 "[%s:%d] Repeater topology auth failed.(%d)\n", 2055 connector->base.name, connector->base.base.id, 2056 ret); 2057 } else { 2058 drm_dbg_kms(&i915->drm, 2059 "[%s:%d] HDCP2.2 link failed, retrying auth\n", 2060 connector->base.name, connector->base.base.id); 2061 } 2062 2063 ret = _intel_hdcp2_disable(connector, true); 2064 if (ret) { 2065 drm_err(&i915->drm, 2066 "[%s:%d] Failed to disable hdcp2.2 (%d)\n", 2067 connector->base.name, connector->base.base.id, ret); 2068 intel_hdcp_update_value(connector, 2069 DRM_MODE_CONTENT_PROTECTION_DESIRED, true); 2070 goto out; 2071 } 2072 2073 ret = _intel_hdcp2_enable(connector); 2074 if (ret) { 2075 drm_dbg_kms(&i915->drm, 2076 "[%s:%d] Failed to enable hdcp2.2 (%d)\n", 2077 connector->base.name, connector->base.base.id, 2078 ret); 2079 intel_hdcp_update_value(connector, 2080 DRM_MODE_CONTENT_PROTECTION_DESIRED, 2081 true); 2082 goto out; 2083 } 2084 2085 out: 2086 mutex_unlock(&dig_port->hdcp_mutex); 2087 mutex_unlock(&hdcp->mutex); 2088 return ret; 2089 } 2090 2091 static void intel_hdcp_check_work(struct work_struct *work) 2092 { 2093 struct intel_hdcp *hdcp = container_of(to_delayed_work(work), 2094 struct intel_hdcp, 2095 check_work); 2096 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 2097 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2098 2099 if (drm_connector_is_unregistered(&connector->base)) 2100 return; 2101 2102 if (!intel_hdcp2_check_link(connector)) 2103 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2104 DRM_HDCP2_CHECK_PERIOD_MS); 2105 else if (!intel_hdcp_check_link(connector)) 2106 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2107 DRM_HDCP_CHECK_PERIOD_MS); 2108 } 2109 2110 static int i915_hdcp_component_bind(struct device *i915_kdev, 2111 struct device *mei_kdev, void *data) 2112 { 2113 struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); 2114 2115 drm_dbg(&i915->drm, "I915 HDCP comp bind\n"); 2116 mutex_lock(&i915->display.hdcp.hdcp_mutex); 2117 i915->display.hdcp.arbiter = (struct i915_hdcp_arbiter *)data; 2118 i915->display.hdcp.arbiter->hdcp_dev = mei_kdev; 2119 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 2120 2121 return 0; 2122 } 2123 2124 static void i915_hdcp_component_unbind(struct device *i915_kdev, 2125 struct device *mei_kdev, void *data) 2126 { 2127 struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); 2128 2129 drm_dbg(&i915->drm, "I915 HDCP comp unbind\n"); 2130 mutex_lock(&i915->display.hdcp.hdcp_mutex); 2131 i915->display.hdcp.arbiter = NULL; 2132 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 2133 } 2134 2135 static const struct component_ops i915_hdcp_ops = { 2136 .bind = i915_hdcp_component_bind, 2137 .unbind = i915_hdcp_component_unbind, 2138 }; 2139 2140 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port) 2141 { 2142 switch (port) { 2143 case PORT_A: 2144 return HDCP_DDI_A; 2145 case PORT_B ... PORT_F: 2146 return (enum hdcp_ddi)port; 2147 default: 2148 return HDCP_DDI_INVALID_PORT; 2149 } 2150 } 2151 2152 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder) 2153 { 2154 switch (cpu_transcoder) { 2155 case TRANSCODER_A ... TRANSCODER_D: 2156 return (enum hdcp_transcoder)(cpu_transcoder | 0x10); 2157 default: /* eDP, DSI TRANSCODERS are non HDCP capable */ 2158 return HDCP_INVALID_TRANSCODER; 2159 } 2160 } 2161 2162 static int initialize_hdcp_port_data(struct intel_connector *connector, 2163 struct intel_digital_port *dig_port, 2164 const struct intel_hdcp_shim *shim) 2165 { 2166 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2167 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 2168 enum port port = dig_port->base.port; 2169 2170 if (DISPLAY_VER(i915) < 12) 2171 data->hdcp_ddi = intel_get_hdcp_ddi_index(port); 2172 else 2173 /* 2174 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled 2175 * with zero(INVALID PORT index). 2176 */ 2177 data->hdcp_ddi = HDCP_DDI_INVALID_PORT; 2178 2179 /* 2180 * As associated transcoder is set and modified at modeset, here hdcp_transcoder 2181 * is initialized to zero (invalid transcoder index). This will be 2182 * retained for <Gen12 forever. 2183 */ 2184 data->hdcp_transcoder = HDCP_INVALID_TRANSCODER; 2185 2186 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; 2187 data->protocol = (u8)shim->protocol; 2188 2189 if (!data->streams) 2190 data->streams = kcalloc(INTEL_NUM_PIPES(i915), 2191 sizeof(struct hdcp2_streamid_type), 2192 GFP_KERNEL); 2193 if (!data->streams) { 2194 drm_err(&i915->drm, "Out of Memory\n"); 2195 return -ENOMEM; 2196 } 2197 2198 return 0; 2199 } 2200 2201 static bool is_hdcp2_supported(struct drm_i915_private *i915) 2202 { 2203 if (intel_hdcp_gsc_cs_required(i915)) 2204 return true; 2205 2206 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) 2207 return false; 2208 2209 return (DISPLAY_VER(i915) >= 10 || 2210 IS_KABYLAKE(i915) || 2211 IS_COFFEELAKE(i915) || 2212 IS_COMETLAKE(i915)); 2213 } 2214 2215 void intel_hdcp_component_init(struct drm_i915_private *i915) 2216 { 2217 int ret; 2218 2219 if (!is_hdcp2_supported(i915)) 2220 return; 2221 2222 mutex_lock(&i915->display.hdcp.hdcp_mutex); 2223 drm_WARN_ON(&i915->drm, i915->display.hdcp.comp_added); 2224 2225 i915->display.hdcp.comp_added = true; 2226 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 2227 if (intel_hdcp_gsc_cs_required(i915)) 2228 ret = intel_hdcp_gsc_init(i915); 2229 else 2230 ret = component_add_typed(i915->drm.dev, &i915_hdcp_ops, 2231 I915_COMPONENT_HDCP); 2232 2233 if (ret < 0) { 2234 drm_dbg_kms(&i915->drm, "Failed at fw component add(%d)\n", 2235 ret); 2236 mutex_lock(&i915->display.hdcp.hdcp_mutex); 2237 i915->display.hdcp.comp_added = false; 2238 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 2239 return; 2240 } 2241 } 2242 2243 static void intel_hdcp2_init(struct intel_connector *connector, 2244 struct intel_digital_port *dig_port, 2245 const struct intel_hdcp_shim *shim) 2246 { 2247 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2248 struct intel_hdcp *hdcp = &connector->hdcp; 2249 int ret; 2250 2251 ret = initialize_hdcp_port_data(connector, dig_port, shim); 2252 if (ret) { 2253 drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n"); 2254 return; 2255 } 2256 2257 hdcp->hdcp2_supported = true; 2258 } 2259 2260 int intel_hdcp_init(struct intel_connector *connector, 2261 struct intel_digital_port *dig_port, 2262 const struct intel_hdcp_shim *shim) 2263 { 2264 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2265 struct intel_hdcp *hdcp = &connector->hdcp; 2266 int ret; 2267 2268 if (!shim) 2269 return -EINVAL; 2270 2271 if (is_hdcp2_supported(i915)) 2272 intel_hdcp2_init(connector, dig_port, shim); 2273 2274 ret = 2275 drm_connector_attach_content_protection_property(&connector->base, 2276 hdcp->hdcp2_supported); 2277 if (ret) { 2278 hdcp->hdcp2_supported = false; 2279 kfree(dig_port->hdcp_port_data.streams); 2280 return ret; 2281 } 2282 2283 hdcp->shim = shim; 2284 mutex_init(&hdcp->mutex); 2285 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); 2286 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); 2287 init_waitqueue_head(&hdcp->cp_irq_queue); 2288 2289 return 0; 2290 } 2291 2292 static int 2293 intel_hdcp_set_streams(struct intel_digital_port *dig_port, 2294 struct intel_atomic_state *state) 2295 { 2296 struct drm_connector_list_iter conn_iter; 2297 struct intel_digital_port *conn_dig_port; 2298 struct intel_connector *connector; 2299 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 2300 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 2301 2302 if (!intel_encoder_is_mst(&dig_port->base)) { 2303 data->k = 1; 2304 data->streams[0].stream_id = 0; 2305 return 0; 2306 } 2307 2308 data->k = 0; 2309 2310 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 2311 for_each_intel_connector_iter(connector, &conn_iter) { 2312 if (connector->base.status == connector_status_disconnected) 2313 continue; 2314 2315 if (!intel_encoder_is_mst(intel_attached_encoder(connector))) 2316 continue; 2317 2318 conn_dig_port = intel_attached_dig_port(connector); 2319 if (conn_dig_port != dig_port) 2320 continue; 2321 2322 data->streams[data->k].stream_id = 2323 intel_conn_to_vcpi(&state->base, connector); 2324 data->k++; 2325 2326 /* if there is only one active stream */ 2327 if (dig_port->dp.active_mst_links <= 1) 2328 break; 2329 } 2330 drm_connector_list_iter_end(&conn_iter); 2331 2332 if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0)) 2333 return -EINVAL; 2334 2335 return 0; 2336 } 2337 2338 int intel_hdcp_enable(struct intel_atomic_state *state, 2339 struct intel_encoder *encoder, 2340 const struct intel_crtc_state *pipe_config, 2341 const struct drm_connector_state *conn_state) 2342 { 2343 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2344 struct intel_connector *connector = 2345 to_intel_connector(conn_state->connector); 2346 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2347 struct intel_hdcp *hdcp = &connector->hdcp; 2348 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; 2349 int ret = -EINVAL; 2350 2351 if (!hdcp->shim) 2352 return -ENOENT; 2353 2354 if (!connector->encoder) { 2355 drm_err(&i915->drm, "[%s:%d] encoder is not initialized\n", 2356 connector->base.name, connector->base.base.id); 2357 return -ENODEV; 2358 } 2359 2360 mutex_lock(&hdcp->mutex); 2361 mutex_lock(&dig_port->hdcp_mutex); 2362 drm_WARN_ON(&i915->drm, 2363 hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); 2364 hdcp->content_type = (u8)conn_state->hdcp_content_type; 2365 2366 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) { 2367 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder; 2368 hdcp->stream_transcoder = pipe_config->cpu_transcoder; 2369 } else { 2370 hdcp->cpu_transcoder = pipe_config->cpu_transcoder; 2371 hdcp->stream_transcoder = INVALID_TRANSCODER; 2372 } 2373 2374 if (DISPLAY_VER(i915) >= 12) 2375 dig_port->hdcp_port_data.hdcp_transcoder = 2376 intel_get_hdcp_transcoder(hdcp->cpu_transcoder); 2377 2378 /* 2379 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup 2380 * is capable of HDCP2.2, it is preferred to use HDCP2.2. 2381 */ 2382 if (intel_hdcp2_capable(connector)) { 2383 ret = intel_hdcp_set_streams(dig_port, state); 2384 if (!ret) { 2385 ret = _intel_hdcp2_enable(connector); 2386 if (!ret) 2387 check_link_interval = 2388 DRM_HDCP2_CHECK_PERIOD_MS; 2389 } else { 2390 drm_dbg_kms(&i915->drm, 2391 "Set content streams failed: (%d)\n", 2392 ret); 2393 } 2394 } 2395 2396 /* 2397 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will 2398 * be attempted. 2399 */ 2400 if (ret && intel_hdcp_capable(connector) && 2401 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) { 2402 ret = _intel_hdcp_enable(connector); 2403 } 2404 2405 if (!ret) { 2406 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 2407 check_link_interval); 2408 intel_hdcp_update_value(connector, 2409 DRM_MODE_CONTENT_PROTECTION_ENABLED, 2410 true); 2411 } 2412 2413 mutex_unlock(&dig_port->hdcp_mutex); 2414 mutex_unlock(&hdcp->mutex); 2415 return ret; 2416 } 2417 2418 int intel_hdcp_disable(struct intel_connector *connector) 2419 { 2420 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 2421 struct intel_hdcp *hdcp = &connector->hdcp; 2422 int ret = 0; 2423 2424 if (!hdcp->shim) 2425 return -ENOENT; 2426 2427 mutex_lock(&hdcp->mutex); 2428 mutex_lock(&dig_port->hdcp_mutex); 2429 2430 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 2431 goto out; 2432 2433 intel_hdcp_update_value(connector, 2434 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false); 2435 if (hdcp->hdcp2_encrypted) 2436 ret = _intel_hdcp2_disable(connector, false); 2437 else if (hdcp->hdcp_encrypted) 2438 ret = _intel_hdcp_disable(connector); 2439 2440 out: 2441 mutex_unlock(&dig_port->hdcp_mutex); 2442 mutex_unlock(&hdcp->mutex); 2443 cancel_delayed_work_sync(&hdcp->check_work); 2444 return ret; 2445 } 2446 2447 void intel_hdcp_update_pipe(struct intel_atomic_state *state, 2448 struct intel_encoder *encoder, 2449 const struct intel_crtc_state *crtc_state, 2450 const struct drm_connector_state *conn_state) 2451 { 2452 struct intel_connector *connector = 2453 to_intel_connector(conn_state->connector); 2454 struct intel_hdcp *hdcp = &connector->hdcp; 2455 bool content_protection_type_changed, desired_and_not_enabled = false; 2456 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2457 2458 if (!connector->hdcp.shim) 2459 return; 2460 2461 content_protection_type_changed = 2462 (conn_state->hdcp_content_type != hdcp->content_type && 2463 conn_state->content_protection != 2464 DRM_MODE_CONTENT_PROTECTION_UNDESIRED); 2465 2466 /* 2467 * During the HDCP encryption session if Type change is requested, 2468 * disable the HDCP and reenable it with new TYPE value. 2469 */ 2470 if (conn_state->content_protection == 2471 DRM_MODE_CONTENT_PROTECTION_UNDESIRED || 2472 content_protection_type_changed) 2473 intel_hdcp_disable(connector); 2474 2475 /* 2476 * Mark the hdcp state as DESIRED after the hdcp disable of type 2477 * change procedure. 2478 */ 2479 if (content_protection_type_changed) { 2480 mutex_lock(&hdcp->mutex); 2481 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 2482 drm_connector_get(&connector->base); 2483 queue_work(i915->unordered_wq, &hdcp->prop_work); 2484 mutex_unlock(&hdcp->mutex); 2485 } 2486 2487 if (conn_state->content_protection == 2488 DRM_MODE_CONTENT_PROTECTION_DESIRED) { 2489 mutex_lock(&hdcp->mutex); 2490 /* Avoid enabling hdcp, if it already ENABLED */ 2491 desired_and_not_enabled = 2492 hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED; 2493 mutex_unlock(&hdcp->mutex); 2494 /* 2495 * If HDCP already ENABLED and CP property is DESIRED, schedule 2496 * prop_work to update correct CP property to user space. 2497 */ 2498 if (!desired_and_not_enabled && !content_protection_type_changed) { 2499 drm_connector_get(&connector->base); 2500 queue_work(i915->unordered_wq, &hdcp->prop_work); 2501 } 2502 } 2503 2504 if (desired_and_not_enabled || content_protection_type_changed) 2505 intel_hdcp_enable(state, encoder, crtc_state, conn_state); 2506 } 2507 2508 void intel_hdcp_component_fini(struct drm_i915_private *i915) 2509 { 2510 mutex_lock(&i915->display.hdcp.hdcp_mutex); 2511 if (!i915->display.hdcp.comp_added) { 2512 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 2513 return; 2514 } 2515 2516 i915->display.hdcp.comp_added = false; 2517 mutex_unlock(&i915->display.hdcp.hdcp_mutex); 2518 2519 if (intel_hdcp_gsc_cs_required(i915)) 2520 intel_hdcp_gsc_fini(i915); 2521 else 2522 component_del(i915->drm.dev, &i915_hdcp_ops); 2523 } 2524 2525 void intel_hdcp_cleanup(struct intel_connector *connector) 2526 { 2527 struct intel_hdcp *hdcp = &connector->hdcp; 2528 2529 if (!hdcp->shim) 2530 return; 2531 2532 /* 2533 * If the connector is registered, it's possible userspace could kick 2534 * off another HDCP enable, which would re-spawn the workers. 2535 */ 2536 drm_WARN_ON(connector->base.dev, 2537 connector->base.registration_state == DRM_CONNECTOR_REGISTERED); 2538 2539 /* 2540 * Now that the connector is not registered, check_work won't be run, 2541 * but cancel any outstanding instances of it 2542 */ 2543 cancel_delayed_work_sync(&hdcp->check_work); 2544 2545 /* 2546 * We don't cancel prop_work in the same way as check_work since it 2547 * requires connection_mutex which could be held while calling this 2548 * function. Instead, we rely on the connector references grabbed before 2549 * scheduling prop_work to ensure the connector is alive when prop_work 2550 * is run. So if we're in the destroy path (which is where this 2551 * function should be called), we're "guaranteed" that prop_work is not 2552 * active (tl;dr This Should Never Happen). 2553 */ 2554 drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work)); 2555 2556 mutex_lock(&hdcp->mutex); 2557 hdcp->shim = NULL; 2558 mutex_unlock(&hdcp->mutex); 2559 } 2560 2561 void intel_hdcp_atomic_check(struct drm_connector *connector, 2562 struct drm_connector_state *old_state, 2563 struct drm_connector_state *new_state) 2564 { 2565 u64 old_cp = old_state->content_protection; 2566 u64 new_cp = new_state->content_protection; 2567 struct drm_crtc_state *crtc_state; 2568 2569 if (!new_state->crtc) { 2570 /* 2571 * If the connector is being disabled with CP enabled, mark it 2572 * desired so it's re-enabled when the connector is brought back 2573 */ 2574 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) 2575 new_state->content_protection = 2576 DRM_MODE_CONTENT_PROTECTION_DESIRED; 2577 return; 2578 } 2579 2580 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 2581 new_state->crtc); 2582 /* 2583 * Fix the HDCP uapi content protection state in case of modeset. 2584 * FIXME: As per HDCP content protection property uapi doc, an uevent() 2585 * need to be sent if there is transition from ENABLED->DESIRED. 2586 */ 2587 if (drm_atomic_crtc_needs_modeset(crtc_state) && 2588 (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED && 2589 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) 2590 new_state->content_protection = 2591 DRM_MODE_CONTENT_PROTECTION_DESIRED; 2592 2593 /* 2594 * Nothing to do if the state didn't change, or HDCP was activated since 2595 * the last commit. And also no change in hdcp content type. 2596 */ 2597 if (old_cp == new_cp || 2598 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && 2599 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) { 2600 if (old_state->hdcp_content_type == 2601 new_state->hdcp_content_type) 2602 return; 2603 } 2604 2605 crtc_state->mode_changed = true; 2606 } 2607 2608 /* Handles the CP_IRQ raised from the DP HDCP sink */ 2609 void intel_hdcp_handle_cp_irq(struct intel_connector *connector) 2610 { 2611 struct intel_hdcp *hdcp = &connector->hdcp; 2612 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2613 2614 if (!hdcp->shim) 2615 return; 2616 2617 atomic_inc(&connector->hdcp.cp_irq_count); 2618 wake_up_all(&connector->hdcp.cp_irq_queue); 2619 2620 queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0); 2621 } 2622