1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright (C) 2017 Google, Inc. 4 * 5 * Authors: 6 * Sean Paul <seanpaul@chromium.org> 7 */ 8 9 #include <linux/component.h> 10 #include <linux/i2c.h> 11 #include <linux/random.h> 12 13 #include <drm/drm_hdcp.h> 14 #include <drm/i915_component.h> 15 16 #include "i915_reg.h" 17 #include "intel_drv.h" 18 #include "intel_hdcp.h" 19 #include "intel_sideband.h" 20 21 #define KEY_LOAD_TRIES 5 22 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50 23 #define HDCP2_LC_RETRY_CNT 3 24 25 static 26 bool intel_hdcp_is_ksv_valid(u8 *ksv) 27 { 28 int i, ones = 0; 29 /* KSV has 20 1's and 20 0's */ 30 for (i = 0; i < DRM_HDCP_KSV_LEN; i++) 31 ones += hweight8(ksv[i]); 32 if (ones != 20) 33 return false; 34 35 return true; 36 } 37 38 static 39 int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port, 40 const struct intel_hdcp_shim *shim, u8 *bksv) 41 { 42 int ret, i, tries = 2; 43 44 /* HDCP spec states that we must retry the bksv if it is invalid */ 45 for (i = 0; i < tries; i++) { 46 ret = shim->read_bksv(intel_dig_port, bksv); 47 if (ret) 48 return ret; 49 if (intel_hdcp_is_ksv_valid(bksv)) 50 break; 51 } 52 if (i == tries) { 53 DRM_DEBUG_KMS("Bksv is invalid\n"); 54 return -ENODEV; 55 } 56 57 return 0; 58 } 59 60 /* Is HDCP1.4 capable on Platform and Sink */ 61 bool intel_hdcp_capable(struct intel_connector *connector) 62 { 63 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 64 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 65 bool capable = false; 66 u8 bksv[5]; 67 68 if (!shim) 69 return capable; 70 71 if (shim->hdcp_capable) { 72 shim->hdcp_capable(intel_dig_port, &capable); 73 } else { 74 if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv)) 75 capable = true; 76 } 77 78 return capable; 79 } 80 81 /* Is HDCP2.2 capable on Platform and Sink */ 82 bool intel_hdcp2_capable(struct intel_connector *connector) 83 { 84 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 85 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 86 struct intel_hdcp *hdcp = &connector->hdcp; 87 bool capable = false; 88 89 /* I915 support for HDCP2.2 */ 90 if (!hdcp->hdcp2_supported) 91 return false; 92 93 /* MEI interface is solid */ 94 mutex_lock(&dev_priv->hdcp_comp_mutex); 95 if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) { 96 mutex_unlock(&dev_priv->hdcp_comp_mutex); 97 return false; 98 } 99 mutex_unlock(&dev_priv->hdcp_comp_mutex); 100 101 /* Sink's capability for HDCP2.2 */ 102 hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable); 103 104 return capable; 105 } 106 107 static inline bool intel_hdcp_in_use(struct intel_connector *connector) 108 { 109 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 110 enum port port = connector->encoder->port; 111 u32 reg; 112 113 reg = I915_READ(PORT_HDCP_STATUS(port)); 114 return reg & HDCP_STATUS_ENC; 115 } 116 117 static inline bool intel_hdcp2_in_use(struct intel_connector *connector) 118 { 119 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 120 enum port port = connector->encoder->port; 121 u32 reg; 122 123 reg = I915_READ(HDCP2_STATUS_DDI(port)); 124 return reg & LINK_ENCRYPTION_STATUS; 125 } 126 127 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, 128 const struct intel_hdcp_shim *shim) 129 { 130 int ret, read_ret; 131 bool ksv_ready; 132 133 /* Poll for ksv list ready (spec says max time allowed is 5s) */ 134 ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port, 135 &ksv_ready), 136 read_ret || ksv_ready, 5 * 1000 * 1000, 1000, 137 100 * 1000); 138 if (ret) 139 return ret; 140 if (read_ret) 141 return read_ret; 142 if (!ksv_ready) 143 return -ETIMEDOUT; 144 145 return 0; 146 } 147 148 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) 149 { 150 struct i915_power_domains *power_domains = &dev_priv->power_domains; 151 struct i915_power_well *power_well; 152 enum i915_power_well_id id; 153 bool enabled = false; 154 155 /* 156 * On HSW and BDW, Display HW loads the Key as soon as Display resumes. 157 * On all BXT+, SW can load the keys only when the PW#1 is turned on. 158 */ 159 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 160 id = HSW_DISP_PW_GLOBAL; 161 else 162 id = SKL_DISP_PW_1; 163 164 mutex_lock(&power_domains->lock); 165 166 /* PG1 (power well #1) needs to be enabled */ 167 for_each_power_well(dev_priv, power_well) { 168 if (power_well->desc->id == id) { 169 enabled = power_well->desc->ops->is_enabled(dev_priv, 170 power_well); 171 break; 172 } 173 } 174 mutex_unlock(&power_domains->lock); 175 176 /* 177 * Another req for hdcp key loadability is enabled state of pll for 178 * cdclk. Without active crtc we wont land here. So we are assuming that 179 * cdclk is already on. 180 */ 181 182 return enabled; 183 } 184 185 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv) 186 { 187 I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); 188 I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | 189 HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); 190 } 191 192 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) 193 { 194 int ret; 195 u32 val; 196 197 val = I915_READ(HDCP_KEY_STATUS); 198 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) 199 return 0; 200 201 /* 202 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes 203 * out of reset. So if Key is not already loaded, its an error state. 204 */ 205 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 206 if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) 207 return -ENXIO; 208 209 /* 210 * Initiate loading the HDCP key from fuses. 211 * 212 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9 213 * platforms except BXT and GLK, differ in the key load trigger process 214 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f. 215 */ 216 if (IS_GEN9_BC(dev_priv)) { 217 ret = sandybridge_pcode_write(dev_priv, 218 SKL_PCODE_LOAD_HDCP_KEYS, 1); 219 if (ret) { 220 DRM_ERROR("Failed to initiate HDCP key load (%d)\n", 221 ret); 222 return ret; 223 } 224 } else { 225 I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); 226 } 227 228 /* Wait for the keys to load (500us) */ 229 ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS, 230 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, 231 10, 1, &val); 232 if (ret) 233 return ret; 234 else if (!(val & HDCP_KEY_LOAD_STATUS)) 235 return -ENXIO; 236 237 /* Send Aksv over to PCH display for use in authentication */ 238 I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); 239 240 return 0; 241 } 242 243 /* Returns updated SHA-1 index */ 244 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text) 245 { 246 I915_WRITE(HDCP_SHA_TEXT, sha_text); 247 if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL, 248 HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) { 249 DRM_ERROR("Timed out waiting for SHA1 ready\n"); 250 return -ETIMEDOUT; 251 } 252 return 0; 253 } 254 255 static 256 u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port) 257 { 258 enum port port = intel_dig_port->base.port; 259 switch (port) { 260 case PORT_A: 261 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; 262 case PORT_B: 263 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0; 264 case PORT_C: 265 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0; 266 case PORT_D: 267 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0; 268 case PORT_E: 269 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; 270 default: 271 break; 272 } 273 DRM_ERROR("Unknown port %d\n", port); 274 return -EINVAL; 275 } 276 277 static 278 int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, 279 const struct intel_hdcp_shim *shim, 280 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) 281 { 282 struct drm_i915_private *dev_priv; 283 u32 vprime, sha_text, sha_leftovers, rep_ctl; 284 int ret, i, j, sha_idx; 285 286 dev_priv = intel_dig_port->base.base.dev->dev_private; 287 288 /* Process V' values from the receiver */ 289 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { 290 ret = shim->read_v_prime_part(intel_dig_port, i, &vprime); 291 if (ret) 292 return ret; 293 I915_WRITE(HDCP_SHA_V_PRIME(i), vprime); 294 } 295 296 /* 297 * We need to write the concatenation of all device KSVs, BINFO (DP) || 298 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte 299 * stream is written via the HDCP_SHA_TEXT register in 32-bit 300 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This 301 * index will keep track of our progress through the 64 bytes as well as 302 * helping us work the 40-bit KSVs through our 32-bit register. 303 * 304 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian 305 */ 306 sha_idx = 0; 307 sha_text = 0; 308 sha_leftovers = 0; 309 rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port); 310 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 311 for (i = 0; i < num_downstream; i++) { 312 unsigned int sha_empty; 313 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; 314 315 /* Fill up the empty slots in sha_text and write it out */ 316 sha_empty = sizeof(sha_text) - sha_leftovers; 317 for (j = 0; j < sha_empty; j++) 318 sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8); 319 320 ret = intel_write_sha_text(dev_priv, sha_text); 321 if (ret < 0) 322 return ret; 323 324 /* Programming guide writes this every 64 bytes */ 325 sha_idx += sizeof(sha_text); 326 if (!(sha_idx % 64)) 327 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 328 329 /* Store the leftover bytes from the ksv in sha_text */ 330 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty; 331 sha_text = 0; 332 for (j = 0; j < sha_leftovers; j++) 333 sha_text |= ksv[sha_empty + j] << 334 ((sizeof(sha_text) - j - 1) * 8); 335 336 /* 337 * If we still have room in sha_text for more data, continue. 338 * Otherwise, write it out immediately. 339 */ 340 if (sizeof(sha_text) > sha_leftovers) 341 continue; 342 343 ret = intel_write_sha_text(dev_priv, sha_text); 344 if (ret < 0) 345 return ret; 346 sha_leftovers = 0; 347 sha_text = 0; 348 sha_idx += sizeof(sha_text); 349 } 350 351 /* 352 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many 353 * bytes are leftover from the last ksv, we might be able to fit them 354 * all in sha_text (first 2 cases), or we might need to split them up 355 * into 2 writes (last 2 cases). 356 */ 357 if (sha_leftovers == 0) { 358 /* Write 16 bits of text, 16 bits of M0 */ 359 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); 360 ret = intel_write_sha_text(dev_priv, 361 bstatus[0] << 8 | bstatus[1]); 362 if (ret < 0) 363 return ret; 364 sha_idx += sizeof(sha_text); 365 366 /* Write 32 bits of M0 */ 367 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); 368 ret = intel_write_sha_text(dev_priv, 0); 369 if (ret < 0) 370 return ret; 371 sha_idx += sizeof(sha_text); 372 373 /* Write 16 bits of M0 */ 374 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16); 375 ret = intel_write_sha_text(dev_priv, 0); 376 if (ret < 0) 377 return ret; 378 sha_idx += sizeof(sha_text); 379 380 } else if (sha_leftovers == 1) { 381 /* Write 24 bits of text, 8 bits of M0 */ 382 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); 383 sha_text |= bstatus[0] << 16 | bstatus[1] << 8; 384 /* Only 24-bits of data, must be in the LSB */ 385 sha_text = (sha_text & 0xffffff00) >> 8; 386 ret = intel_write_sha_text(dev_priv, sha_text); 387 if (ret < 0) 388 return ret; 389 sha_idx += sizeof(sha_text); 390 391 /* Write 32 bits of M0 */ 392 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); 393 ret = intel_write_sha_text(dev_priv, 0); 394 if (ret < 0) 395 return ret; 396 sha_idx += sizeof(sha_text); 397 398 /* Write 24 bits of M0 */ 399 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); 400 ret = intel_write_sha_text(dev_priv, 0); 401 if (ret < 0) 402 return ret; 403 sha_idx += sizeof(sha_text); 404 405 } else if (sha_leftovers == 2) { 406 /* Write 32 bits of text */ 407 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 408 sha_text |= bstatus[0] << 24 | bstatus[1] << 16; 409 ret = intel_write_sha_text(dev_priv, sha_text); 410 if (ret < 0) 411 return ret; 412 sha_idx += sizeof(sha_text); 413 414 /* Write 64 bits of M0 */ 415 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); 416 for (i = 0; i < 2; i++) { 417 ret = intel_write_sha_text(dev_priv, 0); 418 if (ret < 0) 419 return ret; 420 sha_idx += sizeof(sha_text); 421 } 422 } else if (sha_leftovers == 3) { 423 /* Write 32 bits of text */ 424 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 425 sha_text |= bstatus[0] << 24; 426 ret = intel_write_sha_text(dev_priv, sha_text); 427 if (ret < 0) 428 return ret; 429 sha_idx += sizeof(sha_text); 430 431 /* Write 8 bits of text, 24 bits of M0 */ 432 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8); 433 ret = intel_write_sha_text(dev_priv, bstatus[1]); 434 if (ret < 0) 435 return ret; 436 sha_idx += sizeof(sha_text); 437 438 /* Write 32 bits of M0 */ 439 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0); 440 ret = intel_write_sha_text(dev_priv, 0); 441 if (ret < 0) 442 return ret; 443 sha_idx += sizeof(sha_text); 444 445 /* Write 8 bits of M0 */ 446 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24); 447 ret = intel_write_sha_text(dev_priv, 0); 448 if (ret < 0) 449 return ret; 450 sha_idx += sizeof(sha_text); 451 } else { 452 DRM_DEBUG_KMS("Invalid number of leftovers %d\n", 453 sha_leftovers); 454 return -EINVAL; 455 } 456 457 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 458 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ 459 while ((sha_idx % 64) < (64 - sizeof(sha_text))) { 460 ret = intel_write_sha_text(dev_priv, 0); 461 if (ret < 0) 462 return ret; 463 sha_idx += sizeof(sha_text); 464 } 465 466 /* 467 * Last write gets the length of the concatenation in bits. That is: 468 * - 5 bytes per device 469 * - 10 bytes for BINFO/BSTATUS(2), M0(8) 470 */ 471 sha_text = (num_downstream * 5 + 10) * 8; 472 ret = intel_write_sha_text(dev_priv, sha_text); 473 if (ret < 0) 474 return ret; 475 476 /* Tell the HW we're done with the hash and wait for it to ACK */ 477 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH); 478 if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL, 479 HDCP_SHA1_COMPLETE, 480 HDCP_SHA1_COMPLETE, 1)) { 481 DRM_ERROR("Timed out waiting for SHA1 complete\n"); 482 return -ETIMEDOUT; 483 } 484 if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { 485 DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n"); 486 return -ENXIO; 487 } 488 489 return 0; 490 } 491 492 /* Implements Part 2 of the HDCP authorization procedure */ 493 static 494 int intel_hdcp_auth_downstream(struct intel_connector *connector) 495 { 496 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 497 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 498 struct drm_device *dev = connector->base.dev; 499 u8 bstatus[2], num_downstream, *ksv_fifo; 500 int ret, i, tries = 3; 501 502 ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); 503 if (ret) { 504 DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret); 505 return ret; 506 } 507 508 ret = shim->read_bstatus(intel_dig_port, bstatus); 509 if (ret) 510 return ret; 511 512 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || 513 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { 514 DRM_DEBUG_KMS("Max Topology Limit Exceeded\n"); 515 return -EPERM; 516 } 517 518 /* 519 * When repeater reports 0 device count, HDCP1.4 spec allows disabling 520 * the HDCP encryption. That implies that repeater can't have its own 521 * display. As there is no consumption of encrypted content in the 522 * repeater with 0 downstream devices, we are failing the 523 * authentication. 524 */ 525 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); 526 if (num_downstream == 0) 527 return -EINVAL; 528 529 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); 530 if (!ksv_fifo) 531 return -ENOMEM; 532 533 ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo); 534 if (ret) 535 goto err; 536 537 if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) { 538 DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n"); 539 return -EPERM; 540 } 541 542 /* 543 * When V prime mismatches, DP Spec mandates re-read of 544 * V prime atleast twice. 545 */ 546 for (i = 0; i < tries; i++) { 547 ret = intel_hdcp_validate_v_prime(intel_dig_port, shim, 548 ksv_fifo, num_downstream, 549 bstatus); 550 if (!ret) 551 break; 552 } 553 554 if (i == tries) { 555 DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret); 556 goto err; 557 } 558 559 DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n", 560 num_downstream); 561 ret = 0; 562 err: 563 kfree(ksv_fifo); 564 return ret; 565 } 566 567 /* Implements Part 1 of the HDCP authorization procedure */ 568 static int intel_hdcp_auth(struct intel_connector *connector) 569 { 570 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 571 struct intel_hdcp *hdcp = &connector->hdcp; 572 struct drm_device *dev = connector->base.dev; 573 const struct intel_hdcp_shim *shim = hdcp->shim; 574 struct drm_i915_private *dev_priv; 575 enum port port; 576 unsigned long r0_prime_gen_start; 577 int ret, i, tries = 2; 578 union { 579 u32 reg[2]; 580 u8 shim[DRM_HDCP_AN_LEN]; 581 } an; 582 union { 583 u32 reg[2]; 584 u8 shim[DRM_HDCP_KSV_LEN]; 585 } bksv; 586 union { 587 u32 reg; 588 u8 shim[DRM_HDCP_RI_LEN]; 589 } ri; 590 bool repeater_present, hdcp_capable; 591 592 dev_priv = intel_dig_port->base.base.dev->dev_private; 593 594 port = intel_dig_port->base.port; 595 596 /* 597 * Detects whether the display is HDCP capable. Although we check for 598 * valid Bksv below, the HDCP over DP spec requires that we check 599 * whether the display supports HDCP before we write An. For HDMI 600 * displays, this is not necessary. 601 */ 602 if (shim->hdcp_capable) { 603 ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable); 604 if (ret) 605 return ret; 606 if (!hdcp_capable) { 607 DRM_DEBUG_KMS("Panel is not HDCP capable\n"); 608 return -EINVAL; 609 } 610 } 611 612 /* Initialize An with 2 random values and acquire it */ 613 for (i = 0; i < 2; i++) 614 I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32()); 615 I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN); 616 617 /* Wait for An to be acquired */ 618 if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port), 619 HDCP_STATUS_AN_READY, 620 HDCP_STATUS_AN_READY, 1)) { 621 DRM_ERROR("Timed out waiting for An\n"); 622 return -ETIMEDOUT; 623 } 624 625 an.reg[0] = I915_READ(PORT_HDCP_ANLO(port)); 626 an.reg[1] = I915_READ(PORT_HDCP_ANHI(port)); 627 ret = shim->write_an_aksv(intel_dig_port, an.shim); 628 if (ret) 629 return ret; 630 631 r0_prime_gen_start = jiffies; 632 633 memset(&bksv, 0, sizeof(bksv)); 634 635 ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim); 636 if (ret < 0) 637 return ret; 638 639 if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) { 640 DRM_ERROR("BKSV is revoked\n"); 641 return -EPERM; 642 } 643 644 I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]); 645 I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]); 646 647 ret = shim->repeater_present(intel_dig_port, &repeater_present); 648 if (ret) 649 return ret; 650 if (repeater_present) 651 I915_WRITE(HDCP_REP_CTL, 652 intel_hdcp_get_repeater_ctl(intel_dig_port)); 653 654 ret = shim->toggle_signalling(intel_dig_port, true); 655 if (ret) 656 return ret; 657 658 I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC); 659 660 /* Wait for R0 ready */ 661 if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) & 662 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { 663 DRM_ERROR("Timed out waiting for R0 ready\n"); 664 return -ETIMEDOUT; 665 } 666 667 /* 668 * Wait for R0' to become available. The spec says 100ms from Aksv, but 669 * some monitors can take longer than this. We'll set the timeout at 670 * 300ms just to be sure. 671 * 672 * On DP, there's an R0_READY bit available but no such bit 673 * exists on HDMI. Since the upper-bound is the same, we'll just do 674 * the stupid thing instead of polling on one and not the other. 675 */ 676 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300); 677 678 tries = 3; 679 680 /* 681 * DP HDCP Spec mandates the two more reattempt to read R0, incase 682 * of R0 mismatch. 683 */ 684 for (i = 0; i < tries; i++) { 685 ri.reg = 0; 686 ret = shim->read_ri_prime(intel_dig_port, ri.shim); 687 if (ret) 688 return ret; 689 I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg); 690 691 /* Wait for Ri prime match */ 692 if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) & 693 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) 694 break; 695 } 696 697 if (i == tries) { 698 DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n", 699 I915_READ(PORT_HDCP_STATUS(port))); 700 return -ETIMEDOUT; 701 } 702 703 /* Wait for encryption confirmation */ 704 if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port), 705 HDCP_STATUS_ENC, HDCP_STATUS_ENC, 706 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 707 DRM_ERROR("Timed out waiting for encryption\n"); 708 return -ETIMEDOUT; 709 } 710 711 /* 712 * XXX: If we have MST-connected devices, we need to enable encryption 713 * on those as well. 714 */ 715 716 if (repeater_present) 717 return intel_hdcp_auth_downstream(connector); 718 719 DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n"); 720 return 0; 721 } 722 723 static int _intel_hdcp_disable(struct intel_connector *connector) 724 { 725 struct intel_hdcp *hdcp = &connector->hdcp; 726 struct drm_i915_private *dev_priv = connector->base.dev->dev_private; 727 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 728 enum port port = intel_dig_port->base.port; 729 int ret; 730 731 DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n", 732 connector->base.name, connector->base.base.id); 733 734 hdcp->hdcp_encrypted = false; 735 I915_WRITE(PORT_HDCP_CONF(port), 0); 736 if (intel_wait_for_register(&dev_priv->uncore, 737 PORT_HDCP_STATUS(port), ~0, 0, 738 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 739 DRM_ERROR("Failed to disable HDCP, timeout clearing status\n"); 740 return -ETIMEDOUT; 741 } 742 743 ret = hdcp->shim->toggle_signalling(intel_dig_port, false); 744 if (ret) { 745 DRM_ERROR("Failed to disable HDCP signalling\n"); 746 return ret; 747 } 748 749 DRM_DEBUG_KMS("HDCP is disabled\n"); 750 return 0; 751 } 752 753 static int _intel_hdcp_enable(struct intel_connector *connector) 754 { 755 struct intel_hdcp *hdcp = &connector->hdcp; 756 struct drm_i915_private *dev_priv = connector->base.dev->dev_private; 757 int i, ret, tries = 3; 758 759 DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n", 760 connector->base.name, connector->base.base.id); 761 762 if (!hdcp_key_loadable(dev_priv)) { 763 DRM_ERROR("HDCP key Load is not possible\n"); 764 return -ENXIO; 765 } 766 767 for (i = 0; i < KEY_LOAD_TRIES; i++) { 768 ret = intel_hdcp_load_keys(dev_priv); 769 if (!ret) 770 break; 771 intel_hdcp_clear_keys(dev_priv); 772 } 773 if (ret) { 774 DRM_ERROR("Could not load HDCP keys, (%d)\n", ret); 775 return ret; 776 } 777 778 /* Incase of authentication failures, HDCP spec expects reauth. */ 779 for (i = 0; i < tries; i++) { 780 ret = intel_hdcp_auth(connector); 781 if (!ret) { 782 hdcp->hdcp_encrypted = true; 783 return 0; 784 } 785 786 DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret); 787 788 /* Ensuring HDCP encryption and signalling are stopped. */ 789 _intel_hdcp_disable(connector); 790 } 791 792 DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret); 793 return ret; 794 } 795 796 static inline 797 struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) 798 { 799 return container_of(hdcp, struct intel_connector, hdcp); 800 } 801 802 /* Implements Part 3 of the HDCP authorization procedure */ 803 static int intel_hdcp_check_link(struct intel_connector *connector) 804 { 805 struct intel_hdcp *hdcp = &connector->hdcp; 806 struct drm_i915_private *dev_priv = connector->base.dev->dev_private; 807 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 808 enum port port = intel_dig_port->base.port; 809 int ret = 0; 810 811 mutex_lock(&hdcp->mutex); 812 813 /* Check_link valid only when HDCP1.4 is enabled */ 814 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 815 !hdcp->hdcp_encrypted) { 816 ret = -EINVAL; 817 goto out; 818 } 819 820 if (WARN_ON(!intel_hdcp_in_use(connector))) { 821 DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n", 822 connector->base.name, connector->base.base.id, 823 I915_READ(PORT_HDCP_STATUS(port))); 824 ret = -ENXIO; 825 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 826 schedule_work(&hdcp->prop_work); 827 goto out; 828 } 829 830 if (hdcp->shim->check_link(intel_dig_port)) { 831 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 832 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; 833 schedule_work(&hdcp->prop_work); 834 } 835 goto out; 836 } 837 838 DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n", 839 connector->base.name, connector->base.base.id); 840 841 ret = _intel_hdcp_disable(connector); 842 if (ret) { 843 DRM_ERROR("Failed to disable hdcp (%d)\n", ret); 844 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 845 schedule_work(&hdcp->prop_work); 846 goto out; 847 } 848 849 ret = _intel_hdcp_enable(connector); 850 if (ret) { 851 DRM_ERROR("Failed to enable hdcp (%d)\n", ret); 852 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 853 schedule_work(&hdcp->prop_work); 854 goto out; 855 } 856 857 out: 858 mutex_unlock(&hdcp->mutex); 859 return ret; 860 } 861 862 static void intel_hdcp_prop_work(struct work_struct *work) 863 { 864 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, 865 prop_work); 866 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 867 struct drm_device *dev = connector->base.dev; 868 869 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 870 mutex_lock(&hdcp->mutex); 871 872 /* 873 * This worker is only used to flip between ENABLED/DESIRED. Either of 874 * those to UNDESIRED is handled by core. If value == UNDESIRED, 875 * we're running just after hdcp has been disabled, so just exit 876 */ 877 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 878 drm_hdcp_update_content_protection(&connector->base, 879 hdcp->value); 880 881 mutex_unlock(&hdcp->mutex); 882 drm_modeset_unlock(&dev->mode_config.connection_mutex); 883 } 884 885 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) 886 { 887 /* PORT E doesn't have HDCP, and PORT F is disabled */ 888 return INTEL_GEN(dev_priv) >= 9 && port < PORT_E; 889 } 890 891 static int 892 hdcp2_prepare_ake_init(struct intel_connector *connector, 893 struct hdcp2_ake_init *ake_data) 894 { 895 struct hdcp_port_data *data = &connector->hdcp.port_data; 896 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 897 struct i915_hdcp_comp_master *comp; 898 int ret; 899 900 mutex_lock(&dev_priv->hdcp_comp_mutex); 901 comp = dev_priv->hdcp_master; 902 903 if (!comp || !comp->ops) { 904 mutex_unlock(&dev_priv->hdcp_comp_mutex); 905 return -EINVAL; 906 } 907 908 ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data); 909 if (ret) 910 DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret); 911 mutex_unlock(&dev_priv->hdcp_comp_mutex); 912 913 return ret; 914 } 915 916 static int 917 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, 918 struct hdcp2_ake_send_cert *rx_cert, 919 bool *paired, 920 struct hdcp2_ake_no_stored_km *ek_pub_km, 921 size_t *msg_sz) 922 { 923 struct hdcp_port_data *data = &connector->hdcp.port_data; 924 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 925 struct i915_hdcp_comp_master *comp; 926 int ret; 927 928 mutex_lock(&dev_priv->hdcp_comp_mutex); 929 comp = dev_priv->hdcp_master; 930 931 if (!comp || !comp->ops) { 932 mutex_unlock(&dev_priv->hdcp_comp_mutex); 933 return -EINVAL; 934 } 935 936 ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data, 937 rx_cert, paired, 938 ek_pub_km, msg_sz); 939 if (ret < 0) 940 DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret); 941 mutex_unlock(&dev_priv->hdcp_comp_mutex); 942 943 return ret; 944 } 945 946 static int hdcp2_verify_hprime(struct intel_connector *connector, 947 struct hdcp2_ake_send_hprime *rx_hprime) 948 { 949 struct hdcp_port_data *data = &connector->hdcp.port_data; 950 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 951 struct i915_hdcp_comp_master *comp; 952 int ret; 953 954 mutex_lock(&dev_priv->hdcp_comp_mutex); 955 comp = dev_priv->hdcp_master; 956 957 if (!comp || !comp->ops) { 958 mutex_unlock(&dev_priv->hdcp_comp_mutex); 959 return -EINVAL; 960 } 961 962 ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime); 963 if (ret < 0) 964 DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret); 965 mutex_unlock(&dev_priv->hdcp_comp_mutex); 966 967 return ret; 968 } 969 970 static int 971 hdcp2_store_pairing_info(struct intel_connector *connector, 972 struct hdcp2_ake_send_pairing_info *pairing_info) 973 { 974 struct hdcp_port_data *data = &connector->hdcp.port_data; 975 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 976 struct i915_hdcp_comp_master *comp; 977 int ret; 978 979 mutex_lock(&dev_priv->hdcp_comp_mutex); 980 comp = dev_priv->hdcp_master; 981 982 if (!comp || !comp->ops) { 983 mutex_unlock(&dev_priv->hdcp_comp_mutex); 984 return -EINVAL; 985 } 986 987 ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info); 988 if (ret < 0) 989 DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret); 990 mutex_unlock(&dev_priv->hdcp_comp_mutex); 991 992 return ret; 993 } 994 995 static int 996 hdcp2_prepare_lc_init(struct intel_connector *connector, 997 struct hdcp2_lc_init *lc_init) 998 { 999 struct hdcp_port_data *data = &connector->hdcp.port_data; 1000 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1001 struct i915_hdcp_comp_master *comp; 1002 int ret; 1003 1004 mutex_lock(&dev_priv->hdcp_comp_mutex); 1005 comp = dev_priv->hdcp_master; 1006 1007 if (!comp || !comp->ops) { 1008 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1009 return -EINVAL; 1010 } 1011 1012 ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init); 1013 if (ret < 0) 1014 DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret); 1015 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1016 1017 return ret; 1018 } 1019 1020 static int 1021 hdcp2_verify_lprime(struct intel_connector *connector, 1022 struct hdcp2_lc_send_lprime *rx_lprime) 1023 { 1024 struct hdcp_port_data *data = &connector->hdcp.port_data; 1025 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1026 struct i915_hdcp_comp_master *comp; 1027 int ret; 1028 1029 mutex_lock(&dev_priv->hdcp_comp_mutex); 1030 comp = dev_priv->hdcp_master; 1031 1032 if (!comp || !comp->ops) { 1033 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1034 return -EINVAL; 1035 } 1036 1037 ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime); 1038 if (ret < 0) 1039 DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret); 1040 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1041 1042 return ret; 1043 } 1044 1045 static int hdcp2_prepare_skey(struct intel_connector *connector, 1046 struct hdcp2_ske_send_eks *ske_data) 1047 { 1048 struct hdcp_port_data *data = &connector->hdcp.port_data; 1049 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1050 struct i915_hdcp_comp_master *comp; 1051 int ret; 1052 1053 mutex_lock(&dev_priv->hdcp_comp_mutex); 1054 comp = dev_priv->hdcp_master; 1055 1056 if (!comp || !comp->ops) { 1057 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1058 return -EINVAL; 1059 } 1060 1061 ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data); 1062 if (ret < 0) 1063 DRM_DEBUG_KMS("Get session key failed. %d\n", ret); 1064 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1065 1066 return ret; 1067 } 1068 1069 static int 1070 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, 1071 struct hdcp2_rep_send_receiverid_list 1072 *rep_topology, 1073 struct hdcp2_rep_send_ack *rep_send_ack) 1074 { 1075 struct hdcp_port_data *data = &connector->hdcp.port_data; 1076 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1077 struct i915_hdcp_comp_master *comp; 1078 int ret; 1079 1080 mutex_lock(&dev_priv->hdcp_comp_mutex); 1081 comp = dev_priv->hdcp_master; 1082 1083 if (!comp || !comp->ops) { 1084 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1085 return -EINVAL; 1086 } 1087 1088 ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data, 1089 rep_topology, 1090 rep_send_ack); 1091 if (ret < 0) 1092 DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret); 1093 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1094 1095 return ret; 1096 } 1097 1098 static int 1099 hdcp2_verify_mprime(struct intel_connector *connector, 1100 struct hdcp2_rep_stream_ready *stream_ready) 1101 { 1102 struct hdcp_port_data *data = &connector->hdcp.port_data; 1103 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1104 struct i915_hdcp_comp_master *comp; 1105 int ret; 1106 1107 mutex_lock(&dev_priv->hdcp_comp_mutex); 1108 comp = dev_priv->hdcp_master; 1109 1110 if (!comp || !comp->ops) { 1111 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1112 return -EINVAL; 1113 } 1114 1115 ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready); 1116 if (ret < 0) 1117 DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret); 1118 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1119 1120 return ret; 1121 } 1122 1123 static int hdcp2_authenticate_port(struct intel_connector *connector) 1124 { 1125 struct hdcp_port_data *data = &connector->hdcp.port_data; 1126 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1127 struct i915_hdcp_comp_master *comp; 1128 int ret; 1129 1130 mutex_lock(&dev_priv->hdcp_comp_mutex); 1131 comp = dev_priv->hdcp_master; 1132 1133 if (!comp || !comp->ops) { 1134 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1135 return -EINVAL; 1136 } 1137 1138 ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data); 1139 if (ret < 0) 1140 DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret); 1141 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1142 1143 return ret; 1144 } 1145 1146 static int hdcp2_close_mei_session(struct intel_connector *connector) 1147 { 1148 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1149 struct i915_hdcp_comp_master *comp; 1150 int ret; 1151 1152 mutex_lock(&dev_priv->hdcp_comp_mutex); 1153 comp = dev_priv->hdcp_master; 1154 1155 if (!comp || !comp->ops) { 1156 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1157 return -EINVAL; 1158 } 1159 1160 ret = comp->ops->close_hdcp_session(comp->mei_dev, 1161 &connector->hdcp.port_data); 1162 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1163 1164 return ret; 1165 } 1166 1167 static int hdcp2_deauthenticate_port(struct intel_connector *connector) 1168 { 1169 return hdcp2_close_mei_session(connector); 1170 } 1171 1172 /* Authentication flow starts from here */ 1173 static int hdcp2_authentication_key_exchange(struct intel_connector *connector) 1174 { 1175 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 1176 struct intel_hdcp *hdcp = &connector->hdcp; 1177 struct drm_device *dev = connector->base.dev; 1178 union { 1179 struct hdcp2_ake_init ake_init; 1180 struct hdcp2_ake_send_cert send_cert; 1181 struct hdcp2_ake_no_stored_km no_stored_km; 1182 struct hdcp2_ake_send_hprime send_hprime; 1183 struct hdcp2_ake_send_pairing_info pairing_info; 1184 } msgs; 1185 const struct intel_hdcp_shim *shim = hdcp->shim; 1186 size_t size; 1187 int ret; 1188 1189 /* Init for seq_num */ 1190 hdcp->seq_num_v = 0; 1191 hdcp->seq_num_m = 0; 1192 1193 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init); 1194 if (ret < 0) 1195 return ret; 1196 1197 ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init, 1198 sizeof(msgs.ake_init)); 1199 if (ret < 0) 1200 return ret; 1201 1202 ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT, 1203 &msgs.send_cert, sizeof(msgs.send_cert)); 1204 if (ret < 0) 1205 return ret; 1206 1207 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) 1208 return -EINVAL; 1209 1210 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); 1211 1212 if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id, 1213 1)) { 1214 DRM_ERROR("Receiver ID is revoked\n"); 1215 return -EPERM; 1216 } 1217 1218 /* 1219 * Here msgs.no_stored_km will hold msgs corresponding to the km 1220 * stored also. 1221 */ 1222 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert, 1223 &hdcp->is_paired, 1224 &msgs.no_stored_km, &size); 1225 if (ret < 0) 1226 return ret; 1227 1228 ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size); 1229 if (ret < 0) 1230 return ret; 1231 1232 ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME, 1233 &msgs.send_hprime, sizeof(msgs.send_hprime)); 1234 if (ret < 0) 1235 return ret; 1236 1237 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime); 1238 if (ret < 0) 1239 return ret; 1240 1241 if (!hdcp->is_paired) { 1242 /* Pairing is required */ 1243 ret = shim->read_2_2_msg(intel_dig_port, 1244 HDCP_2_2_AKE_SEND_PAIRING_INFO, 1245 &msgs.pairing_info, 1246 sizeof(msgs.pairing_info)); 1247 if (ret < 0) 1248 return ret; 1249 1250 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info); 1251 if (ret < 0) 1252 return ret; 1253 hdcp->is_paired = true; 1254 } 1255 1256 return 0; 1257 } 1258 1259 static int hdcp2_locality_check(struct intel_connector *connector) 1260 { 1261 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 1262 struct intel_hdcp *hdcp = &connector->hdcp; 1263 union { 1264 struct hdcp2_lc_init lc_init; 1265 struct hdcp2_lc_send_lprime send_lprime; 1266 } msgs; 1267 const struct intel_hdcp_shim *shim = hdcp->shim; 1268 int tries = HDCP2_LC_RETRY_CNT, ret, i; 1269 1270 for (i = 0; i < tries; i++) { 1271 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init); 1272 if (ret < 0) 1273 continue; 1274 1275 ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init, 1276 sizeof(msgs.lc_init)); 1277 if (ret < 0) 1278 continue; 1279 1280 ret = shim->read_2_2_msg(intel_dig_port, 1281 HDCP_2_2_LC_SEND_LPRIME, 1282 &msgs.send_lprime, 1283 sizeof(msgs.send_lprime)); 1284 if (ret < 0) 1285 continue; 1286 1287 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime); 1288 if (!ret) 1289 break; 1290 } 1291 1292 return ret; 1293 } 1294 1295 static int hdcp2_session_key_exchange(struct intel_connector *connector) 1296 { 1297 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 1298 struct intel_hdcp *hdcp = &connector->hdcp; 1299 struct hdcp2_ske_send_eks send_eks; 1300 int ret; 1301 1302 ret = hdcp2_prepare_skey(connector, &send_eks); 1303 if (ret < 0) 1304 return ret; 1305 1306 ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks, 1307 sizeof(send_eks)); 1308 if (ret < 0) 1309 return ret; 1310 1311 return 0; 1312 } 1313 1314 static 1315 int hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1316 { 1317 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 1318 struct intel_hdcp *hdcp = &connector->hdcp; 1319 union { 1320 struct hdcp2_rep_stream_manage stream_manage; 1321 struct hdcp2_rep_stream_ready stream_ready; 1322 } msgs; 1323 const struct intel_hdcp_shim *shim = hdcp->shim; 1324 int ret; 1325 1326 /* Prepare RepeaterAuth_Stream_Manage msg */ 1327 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE; 1328 drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m); 1329 1330 /* K no of streams is fixed as 1. Stored as big-endian. */ 1331 msgs.stream_manage.k = cpu_to_be16(1); 1332 1333 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */ 1334 msgs.stream_manage.streams[0].stream_id = 0; 1335 msgs.stream_manage.streams[0].stream_type = hdcp->content_type; 1336 1337 /* Send it to Repeater */ 1338 ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage, 1339 sizeof(msgs.stream_manage)); 1340 if (ret < 0) 1341 return ret; 1342 1343 ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY, 1344 &msgs.stream_ready, sizeof(msgs.stream_ready)); 1345 if (ret < 0) 1346 return ret; 1347 1348 hdcp->port_data.seq_num_m = hdcp->seq_num_m; 1349 hdcp->port_data.streams[0].stream_type = hdcp->content_type; 1350 1351 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready); 1352 if (ret < 0) 1353 return ret; 1354 1355 hdcp->seq_num_m++; 1356 1357 if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { 1358 DRM_DEBUG_KMS("seq_num_m roll over.\n"); 1359 return -1; 1360 } 1361 1362 return 0; 1363 } 1364 1365 static 1366 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) 1367 { 1368 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 1369 struct intel_hdcp *hdcp = &connector->hdcp; 1370 struct drm_device *dev = connector->base.dev; 1371 union { 1372 struct hdcp2_rep_send_receiverid_list recvid_list; 1373 struct hdcp2_rep_send_ack rep_ack; 1374 } msgs; 1375 const struct intel_hdcp_shim *shim = hdcp->shim; 1376 u32 seq_num_v, device_cnt; 1377 u8 *rx_info; 1378 int ret; 1379 1380 ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST, 1381 &msgs.recvid_list, sizeof(msgs.recvid_list)); 1382 if (ret < 0) 1383 return ret; 1384 1385 rx_info = msgs.recvid_list.rx_info; 1386 1387 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || 1388 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { 1389 DRM_DEBUG_KMS("Topology Max Size Exceeded\n"); 1390 return -EINVAL; 1391 } 1392 1393 /* Converting and Storing the seq_num_v to local variable as DWORD */ 1394 seq_num_v = 1395 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); 1396 1397 if (seq_num_v < hdcp->seq_num_v) { 1398 /* Roll over of the seq_num_v from repeater. Reauthenticate. */ 1399 DRM_DEBUG_KMS("Seq_num_v roll over.\n"); 1400 return -EINVAL; 1401 } 1402 1403 device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 1404 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 1405 if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids, 1406 device_cnt)) { 1407 DRM_ERROR("Revoked receiver ID(s) is in list\n"); 1408 return -EPERM; 1409 } 1410 1411 ret = hdcp2_verify_rep_topology_prepare_ack(connector, 1412 &msgs.recvid_list, 1413 &msgs.rep_ack); 1414 if (ret < 0) 1415 return ret; 1416 1417 hdcp->seq_num_v = seq_num_v; 1418 ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack, 1419 sizeof(msgs.rep_ack)); 1420 if (ret < 0) 1421 return ret; 1422 1423 return 0; 1424 } 1425 1426 static int hdcp2_authenticate_repeater(struct intel_connector *connector) 1427 { 1428 int ret; 1429 1430 ret = hdcp2_authenticate_repeater_topology(connector); 1431 if (ret < 0) 1432 return ret; 1433 1434 return hdcp2_propagate_stream_management_info(connector); 1435 } 1436 1437 static int hdcp2_authenticate_sink(struct intel_connector *connector) 1438 { 1439 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 1440 struct intel_hdcp *hdcp = &connector->hdcp; 1441 const struct intel_hdcp_shim *shim = hdcp->shim; 1442 int ret; 1443 1444 ret = hdcp2_authentication_key_exchange(connector); 1445 if (ret < 0) { 1446 DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret); 1447 return ret; 1448 } 1449 1450 ret = hdcp2_locality_check(connector); 1451 if (ret < 0) { 1452 DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret); 1453 return ret; 1454 } 1455 1456 ret = hdcp2_session_key_exchange(connector); 1457 if (ret < 0) { 1458 DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret); 1459 return ret; 1460 } 1461 1462 if (shim->config_stream_type) { 1463 ret = shim->config_stream_type(intel_dig_port, 1464 hdcp->is_repeater, 1465 hdcp->content_type); 1466 if (ret < 0) 1467 return ret; 1468 } 1469 1470 if (hdcp->is_repeater) { 1471 ret = hdcp2_authenticate_repeater(connector); 1472 if (ret < 0) { 1473 DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret); 1474 return ret; 1475 } 1476 } 1477 1478 hdcp->port_data.streams[0].stream_type = hdcp->content_type; 1479 ret = hdcp2_authenticate_port(connector); 1480 if (ret < 0) 1481 return ret; 1482 1483 return ret; 1484 } 1485 1486 static int hdcp2_enable_encryption(struct intel_connector *connector) 1487 { 1488 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 1489 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1490 struct intel_hdcp *hdcp = &connector->hdcp; 1491 enum port port = connector->encoder->port; 1492 int ret; 1493 1494 WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS); 1495 1496 if (hdcp->shim->toggle_signalling) { 1497 ret = hdcp->shim->toggle_signalling(intel_dig_port, true); 1498 if (ret) { 1499 DRM_ERROR("Failed to enable HDCP signalling. %d\n", 1500 ret); 1501 return ret; 1502 } 1503 } 1504 1505 if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) { 1506 /* Link is Authenticated. Now set for Encryption */ 1507 I915_WRITE(HDCP2_CTL_DDI(port), 1508 I915_READ(HDCP2_CTL_DDI(port)) | 1509 CTL_LINK_ENCRYPTION_REQ); 1510 } 1511 1512 ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port), 1513 LINK_ENCRYPTION_STATUS, 1514 LINK_ENCRYPTION_STATUS, 1515 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1516 1517 return ret; 1518 } 1519 1520 static int hdcp2_disable_encryption(struct intel_connector *connector) 1521 { 1522 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 1523 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1524 struct intel_hdcp *hdcp = &connector->hdcp; 1525 enum port port = connector->encoder->port; 1526 int ret; 1527 1528 WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS)); 1529 1530 I915_WRITE(HDCP2_CTL_DDI(port), 1531 I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ); 1532 1533 ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port), 1534 LINK_ENCRYPTION_STATUS, 0x0, 1535 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1536 if (ret == -ETIMEDOUT) 1537 DRM_DEBUG_KMS("Disable Encryption Timedout"); 1538 1539 if (hdcp->shim->toggle_signalling) { 1540 ret = hdcp->shim->toggle_signalling(intel_dig_port, false); 1541 if (ret) { 1542 DRM_ERROR("Failed to disable HDCP signalling. %d\n", 1543 ret); 1544 return ret; 1545 } 1546 } 1547 1548 return ret; 1549 } 1550 1551 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) 1552 { 1553 int ret, i, tries = 3; 1554 1555 for (i = 0; i < tries; i++) { 1556 ret = hdcp2_authenticate_sink(connector); 1557 if (!ret) 1558 break; 1559 1560 /* Clearing the mei hdcp session */ 1561 DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n", 1562 i + 1, tries, ret); 1563 if (hdcp2_deauthenticate_port(connector) < 0) 1564 DRM_DEBUG_KMS("Port deauth failed.\n"); 1565 } 1566 1567 if (i != tries) { 1568 /* 1569 * Ensuring the required 200mSec min time interval between 1570 * Session Key Exchange and encryption. 1571 */ 1572 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); 1573 ret = hdcp2_enable_encryption(connector); 1574 if (ret < 0) { 1575 DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret); 1576 if (hdcp2_deauthenticate_port(connector) < 0) 1577 DRM_DEBUG_KMS("Port deauth failed.\n"); 1578 } 1579 } 1580 1581 return ret; 1582 } 1583 1584 static int _intel_hdcp2_enable(struct intel_connector *connector) 1585 { 1586 struct intel_hdcp *hdcp = &connector->hdcp; 1587 int ret; 1588 1589 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n", 1590 connector->base.name, connector->base.base.id, 1591 hdcp->content_type); 1592 1593 ret = hdcp2_authenticate_and_encrypt(connector); 1594 if (ret) { 1595 DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n", 1596 hdcp->content_type, ret); 1597 return ret; 1598 } 1599 1600 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n", 1601 connector->base.name, connector->base.base.id, 1602 hdcp->content_type); 1603 1604 hdcp->hdcp2_encrypted = true; 1605 return 0; 1606 } 1607 1608 static int _intel_hdcp2_disable(struct intel_connector *connector) 1609 { 1610 int ret; 1611 1612 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n", 1613 connector->base.name, connector->base.base.id); 1614 1615 ret = hdcp2_disable_encryption(connector); 1616 1617 if (hdcp2_deauthenticate_port(connector) < 0) 1618 DRM_DEBUG_KMS("Port deauth failed.\n"); 1619 1620 connector->hdcp.hdcp2_encrypted = false; 1621 1622 return ret; 1623 } 1624 1625 /* Implements the Link Integrity Check for HDCP2.2 */ 1626 static int intel_hdcp2_check_link(struct intel_connector *connector) 1627 { 1628 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); 1629 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1630 struct intel_hdcp *hdcp = &connector->hdcp; 1631 enum port port = connector->encoder->port; 1632 int ret = 0; 1633 1634 mutex_lock(&hdcp->mutex); 1635 1636 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ 1637 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 1638 !hdcp->hdcp2_encrypted) { 1639 ret = -EINVAL; 1640 goto out; 1641 } 1642 1643 if (WARN_ON(!intel_hdcp2_in_use(connector))) { 1644 DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n", 1645 I915_READ(HDCP2_STATUS_DDI(port))); 1646 ret = -ENXIO; 1647 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 1648 schedule_work(&hdcp->prop_work); 1649 goto out; 1650 } 1651 1652 ret = hdcp->shim->check_2_2_link(intel_dig_port); 1653 if (ret == HDCP_LINK_PROTECTED) { 1654 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 1655 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; 1656 schedule_work(&hdcp->prop_work); 1657 } 1658 goto out; 1659 } 1660 1661 if (ret == HDCP_TOPOLOGY_CHANGE) { 1662 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 1663 goto out; 1664 1665 DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n"); 1666 ret = hdcp2_authenticate_repeater_topology(connector); 1667 if (!ret) { 1668 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; 1669 schedule_work(&hdcp->prop_work); 1670 goto out; 1671 } 1672 DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n", 1673 connector->base.name, connector->base.base.id, 1674 ret); 1675 } else { 1676 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n", 1677 connector->base.name, connector->base.base.id); 1678 } 1679 1680 ret = _intel_hdcp2_disable(connector); 1681 if (ret) { 1682 DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n", 1683 connector->base.name, connector->base.base.id, ret); 1684 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 1685 schedule_work(&hdcp->prop_work); 1686 goto out; 1687 } 1688 1689 ret = _intel_hdcp2_enable(connector); 1690 if (ret) { 1691 DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n", 1692 connector->base.name, connector->base.base.id, 1693 ret); 1694 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 1695 schedule_work(&hdcp->prop_work); 1696 goto out; 1697 } 1698 1699 out: 1700 mutex_unlock(&hdcp->mutex); 1701 return ret; 1702 } 1703 1704 static void intel_hdcp_check_work(struct work_struct *work) 1705 { 1706 struct intel_hdcp *hdcp = container_of(to_delayed_work(work), 1707 struct intel_hdcp, 1708 check_work); 1709 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 1710 1711 if (!intel_hdcp2_check_link(connector)) 1712 schedule_delayed_work(&hdcp->check_work, 1713 DRM_HDCP2_CHECK_PERIOD_MS); 1714 else if (!intel_hdcp_check_link(connector)) 1715 schedule_delayed_work(&hdcp->check_work, 1716 DRM_HDCP_CHECK_PERIOD_MS); 1717 } 1718 1719 static int i915_hdcp_component_bind(struct device *i915_kdev, 1720 struct device *mei_kdev, void *data) 1721 { 1722 struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); 1723 1724 DRM_DEBUG("I915 HDCP comp bind\n"); 1725 mutex_lock(&dev_priv->hdcp_comp_mutex); 1726 dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data; 1727 dev_priv->hdcp_master->mei_dev = mei_kdev; 1728 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1729 1730 return 0; 1731 } 1732 1733 static void i915_hdcp_component_unbind(struct device *i915_kdev, 1734 struct device *mei_kdev, void *data) 1735 { 1736 struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); 1737 1738 DRM_DEBUG("I915 HDCP comp unbind\n"); 1739 mutex_lock(&dev_priv->hdcp_comp_mutex); 1740 dev_priv->hdcp_master = NULL; 1741 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1742 } 1743 1744 static const struct component_ops i915_hdcp_component_ops = { 1745 .bind = i915_hdcp_component_bind, 1746 .unbind = i915_hdcp_component_unbind, 1747 }; 1748 1749 static inline int initialize_hdcp_port_data(struct intel_connector *connector, 1750 const struct intel_hdcp_shim *shim) 1751 { 1752 struct intel_hdcp *hdcp = &connector->hdcp; 1753 struct hdcp_port_data *data = &hdcp->port_data; 1754 1755 data->port = connector->encoder->port; 1756 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; 1757 data->protocol = (u8)shim->protocol; 1758 1759 data->k = 1; 1760 if (!data->streams) 1761 data->streams = kcalloc(data->k, 1762 sizeof(struct hdcp2_streamid_type), 1763 GFP_KERNEL); 1764 if (!data->streams) { 1765 DRM_ERROR("Out of Memory\n"); 1766 return -ENOMEM; 1767 } 1768 1769 data->streams[0].stream_id = 0; 1770 data->streams[0].stream_type = hdcp->content_type; 1771 1772 return 0; 1773 } 1774 1775 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv) 1776 { 1777 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) 1778 return false; 1779 1780 return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || 1781 IS_KABYLAKE(dev_priv)); 1782 } 1783 1784 void intel_hdcp_component_init(struct drm_i915_private *dev_priv) 1785 { 1786 int ret; 1787 1788 if (!is_hdcp2_supported(dev_priv)) 1789 return; 1790 1791 mutex_lock(&dev_priv->hdcp_comp_mutex); 1792 WARN_ON(dev_priv->hdcp_comp_added); 1793 1794 dev_priv->hdcp_comp_added = true; 1795 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1796 ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops, 1797 I915_COMPONENT_HDCP); 1798 if (ret < 0) { 1799 DRM_DEBUG_KMS("Failed at component add(%d)\n", ret); 1800 mutex_lock(&dev_priv->hdcp_comp_mutex); 1801 dev_priv->hdcp_comp_added = false; 1802 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1803 return; 1804 } 1805 } 1806 1807 static void intel_hdcp2_init(struct intel_connector *connector, 1808 const struct intel_hdcp_shim *shim) 1809 { 1810 struct intel_hdcp *hdcp = &connector->hdcp; 1811 int ret; 1812 1813 ret = initialize_hdcp_port_data(connector, shim); 1814 if (ret) { 1815 DRM_DEBUG_KMS("Mei hdcp data init failed\n"); 1816 return; 1817 } 1818 1819 hdcp->hdcp2_supported = true; 1820 } 1821 1822 int intel_hdcp_init(struct intel_connector *connector, 1823 const struct intel_hdcp_shim *shim) 1824 { 1825 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1826 struct intel_hdcp *hdcp = &connector->hdcp; 1827 int ret; 1828 1829 if (!shim) 1830 return -EINVAL; 1831 1832 if (is_hdcp2_supported(dev_priv)) 1833 intel_hdcp2_init(connector, shim); 1834 1835 ret = 1836 drm_connector_attach_content_protection_property(&connector->base, 1837 hdcp->hdcp2_supported); 1838 if (ret) { 1839 hdcp->hdcp2_supported = false; 1840 kfree(hdcp->port_data.streams); 1841 return ret; 1842 } 1843 1844 hdcp->shim = shim; 1845 mutex_init(&hdcp->mutex); 1846 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); 1847 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); 1848 init_waitqueue_head(&hdcp->cp_irq_queue); 1849 1850 return 0; 1851 } 1852 1853 int intel_hdcp_enable(struct intel_connector *connector, u8 content_type) 1854 { 1855 struct intel_hdcp *hdcp = &connector->hdcp; 1856 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; 1857 int ret = -EINVAL; 1858 1859 if (!hdcp->shim) 1860 return -ENOENT; 1861 1862 mutex_lock(&hdcp->mutex); 1863 WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); 1864 hdcp->content_type = content_type; 1865 1866 /* 1867 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup 1868 * is capable of HDCP2.2, it is preferred to use HDCP2.2. 1869 */ 1870 if (intel_hdcp2_capable(connector)) { 1871 ret = _intel_hdcp2_enable(connector); 1872 if (!ret) 1873 check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS; 1874 } 1875 1876 /* 1877 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will 1878 * be attempted. 1879 */ 1880 if (ret && intel_hdcp_capable(connector) && 1881 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) { 1882 ret = _intel_hdcp_enable(connector); 1883 } 1884 1885 if (!ret) { 1886 schedule_delayed_work(&hdcp->check_work, check_link_interval); 1887 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; 1888 schedule_work(&hdcp->prop_work); 1889 } 1890 1891 mutex_unlock(&hdcp->mutex); 1892 return ret; 1893 } 1894 1895 int intel_hdcp_disable(struct intel_connector *connector) 1896 { 1897 struct intel_hdcp *hdcp = &connector->hdcp; 1898 int ret = 0; 1899 1900 if (!hdcp->shim) 1901 return -ENOENT; 1902 1903 mutex_lock(&hdcp->mutex); 1904 1905 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 1906 hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; 1907 if (hdcp->hdcp2_encrypted) 1908 ret = _intel_hdcp2_disable(connector); 1909 else if (hdcp->hdcp_encrypted) 1910 ret = _intel_hdcp_disable(connector); 1911 } 1912 1913 mutex_unlock(&hdcp->mutex); 1914 cancel_delayed_work_sync(&hdcp->check_work); 1915 return ret; 1916 } 1917 1918 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv) 1919 { 1920 mutex_lock(&dev_priv->hdcp_comp_mutex); 1921 if (!dev_priv->hdcp_comp_added) { 1922 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1923 return; 1924 } 1925 1926 dev_priv->hdcp_comp_added = false; 1927 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1928 1929 component_del(dev_priv->drm.dev, &i915_hdcp_component_ops); 1930 } 1931 1932 void intel_hdcp_cleanup(struct intel_connector *connector) 1933 { 1934 if (!connector->hdcp.shim) 1935 return; 1936 1937 mutex_lock(&connector->hdcp.mutex); 1938 kfree(connector->hdcp.port_data.streams); 1939 mutex_unlock(&connector->hdcp.mutex); 1940 } 1941 1942 void intel_hdcp_atomic_check(struct drm_connector *connector, 1943 struct drm_connector_state *old_state, 1944 struct drm_connector_state *new_state) 1945 { 1946 u64 old_cp = old_state->content_protection; 1947 u64 new_cp = new_state->content_protection; 1948 struct drm_crtc_state *crtc_state; 1949 1950 if (!new_state->crtc) { 1951 /* 1952 * If the connector is being disabled with CP enabled, mark it 1953 * desired so it's re-enabled when the connector is brought back 1954 */ 1955 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) 1956 new_state->content_protection = 1957 DRM_MODE_CONTENT_PROTECTION_DESIRED; 1958 return; 1959 } 1960 1961 /* 1962 * Nothing to do if the state didn't change, or HDCP was activated since 1963 * the last commit. And also no change in hdcp content type. 1964 */ 1965 if (old_cp == new_cp || 1966 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && 1967 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) { 1968 if (old_state->hdcp_content_type == 1969 new_state->hdcp_content_type) 1970 return; 1971 } 1972 1973 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 1974 new_state->crtc); 1975 crtc_state->mode_changed = true; 1976 } 1977 1978 /* Handles the CP_IRQ raised from the DP HDCP sink */ 1979 void intel_hdcp_handle_cp_irq(struct intel_connector *connector) 1980 { 1981 struct intel_hdcp *hdcp = &connector->hdcp; 1982 1983 if (!hdcp->shim) 1984 return; 1985 1986 atomic_inc(&connector->hdcp.cp_irq_count); 1987 wake_up_all(&connector->hdcp.cp_irq_queue); 1988 1989 schedule_delayed_work(&hdcp->check_work, 0); 1990 } 1991