1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright (C) 2017 Google, Inc. 4 * Copyright _ 2017-2019, Intel Corporation. 5 * 6 * Authors: 7 * Sean Paul <seanpaul@chromium.org> 8 * Ramalingam C <ramalingam.c@intel.com> 9 */ 10 11 #include <linux/component.h> 12 #include <linux/i2c.h> 13 #include <linux/random.h> 14 15 #include <drm/drm_hdcp.h> 16 #include <drm/i915_component.h> 17 18 #include "i915_reg.h" 19 #include "intel_display_power.h" 20 #include "intel_display_types.h" 21 #include "intel_hdcp.h" 22 #include "intel_sideband.h" 23 #include "intel_connector.h" 24 25 #define KEY_LOAD_TRIES 5 26 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50 27 #define HDCP2_LC_RETRY_CNT 3 28 29 static 30 bool intel_hdcp_is_ksv_valid(u8 *ksv) 31 { 32 int i, ones = 0; 33 /* KSV has 20 1's and 20 0's */ 34 for (i = 0; i < DRM_HDCP_KSV_LEN; i++) 35 ones += hweight8(ksv[i]); 36 if (ones != 20) 37 return false; 38 39 return true; 40 } 41 42 static 43 int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port, 44 const struct intel_hdcp_shim *shim, u8 *bksv) 45 { 46 struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); 47 int ret, i, tries = 2; 48 49 /* HDCP spec states that we must retry the bksv if it is invalid */ 50 for (i = 0; i < tries; i++) { 51 ret = shim->read_bksv(intel_dig_port, bksv); 52 if (ret) 53 return ret; 54 if (intel_hdcp_is_ksv_valid(bksv)) 55 break; 56 } 57 if (i == tries) { 58 drm_dbg_kms(&i915->drm, "Bksv is invalid\n"); 59 return -ENODEV; 60 } 61 62 return 0; 63 } 64 65 /* Is HDCP1.4 capable on Platform and Sink */ 66 bool intel_hdcp_capable(struct intel_connector *connector) 67 { 68 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 69 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 70 bool capable = false; 71 u8 bksv[5]; 72 73 if (!shim) 74 return capable; 75 76 if (shim->hdcp_capable) { 77 shim->hdcp_capable(intel_dig_port, &capable); 78 } else { 79 if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv)) 80 capable = true; 81 } 82 83 return capable; 84 } 85 86 /* Is HDCP2.2 capable on Platform and Sink */ 87 bool intel_hdcp2_capable(struct intel_connector *connector) 88 { 89 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 90 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 91 struct intel_hdcp *hdcp = &connector->hdcp; 92 bool capable = false; 93 94 /* I915 support for HDCP2.2 */ 95 if (!hdcp->hdcp2_supported) 96 return false; 97 98 /* MEI interface is solid */ 99 mutex_lock(&dev_priv->hdcp_comp_mutex); 100 if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) { 101 mutex_unlock(&dev_priv->hdcp_comp_mutex); 102 return false; 103 } 104 mutex_unlock(&dev_priv->hdcp_comp_mutex); 105 106 /* Sink's capability for HDCP2.2 */ 107 hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable); 108 109 return capable; 110 } 111 112 static inline 113 bool intel_hdcp_in_use(struct drm_i915_private *dev_priv, 114 enum transcoder cpu_transcoder, enum port port) 115 { 116 return intel_de_read(dev_priv, 117 HDCP_STATUS(dev_priv, cpu_transcoder, port)) & 118 HDCP_STATUS_ENC; 119 } 120 121 static inline 122 bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv, 123 enum transcoder cpu_transcoder, enum port port) 124 { 125 return intel_de_read(dev_priv, 126 HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & 127 LINK_ENCRYPTION_STATUS; 128 } 129 130 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, 131 const struct intel_hdcp_shim *shim) 132 { 133 int ret, read_ret; 134 bool ksv_ready; 135 136 /* Poll for ksv list ready (spec says max time allowed is 5s) */ 137 ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port, 138 &ksv_ready), 139 read_ret || ksv_ready, 5 * 1000 * 1000, 1000, 140 100 * 1000); 141 if (ret) 142 return ret; 143 if (read_ret) 144 return read_ret; 145 if (!ksv_ready) 146 return -ETIMEDOUT; 147 148 return 0; 149 } 150 151 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv) 152 { 153 struct i915_power_domains *power_domains = &dev_priv->power_domains; 154 struct i915_power_well *power_well; 155 enum i915_power_well_id id; 156 bool enabled = false; 157 158 /* 159 * On HSW and BDW, Display HW loads the Key as soon as Display resumes. 160 * On all BXT+, SW can load the keys only when the PW#1 is turned on. 161 */ 162 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 163 id = HSW_DISP_PW_GLOBAL; 164 else 165 id = SKL_DISP_PW_1; 166 167 mutex_lock(&power_domains->lock); 168 169 /* PG1 (power well #1) needs to be enabled */ 170 for_each_power_well(dev_priv, power_well) { 171 if (power_well->desc->id == id) { 172 enabled = power_well->desc->ops->is_enabled(dev_priv, 173 power_well); 174 break; 175 } 176 } 177 mutex_unlock(&power_domains->lock); 178 179 /* 180 * Another req for hdcp key loadability is enabled state of pll for 181 * cdclk. Without active crtc we wont land here. So we are assuming that 182 * cdclk is already on. 183 */ 184 185 return enabled; 186 } 187 188 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv) 189 { 190 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER); 191 intel_de_write(dev_priv, HDCP_KEY_STATUS, 192 HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE); 193 } 194 195 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) 196 { 197 int ret; 198 u32 val; 199 200 val = intel_de_read(dev_priv, HDCP_KEY_STATUS); 201 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS)) 202 return 0; 203 204 /* 205 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes 206 * out of reset. So if Key is not already loaded, its an error state. 207 */ 208 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 209 if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE)) 210 return -ENXIO; 211 212 /* 213 * Initiate loading the HDCP key from fuses. 214 * 215 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9 216 * platforms except BXT and GLK, differ in the key load trigger process 217 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f. 218 */ 219 if (IS_GEN9_BC(dev_priv)) { 220 ret = sandybridge_pcode_write(dev_priv, 221 SKL_PCODE_LOAD_HDCP_KEYS, 1); 222 if (ret) { 223 drm_err(&dev_priv->drm, 224 "Failed to initiate HDCP key load (%d)\n", 225 ret); 226 return ret; 227 } 228 } else { 229 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER); 230 } 231 232 /* Wait for the keys to load (500us) */ 233 ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS, 234 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, 235 10, 1, &val); 236 if (ret) 237 return ret; 238 else if (!(val & HDCP_KEY_LOAD_STATUS)) 239 return -ENXIO; 240 241 /* Send Aksv over to PCH display for use in authentication */ 242 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER); 243 244 return 0; 245 } 246 247 /* Returns updated SHA-1 index */ 248 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text) 249 { 250 intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text); 251 if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) { 252 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n"); 253 return -ETIMEDOUT; 254 } 255 return 0; 256 } 257 258 static 259 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv, 260 enum transcoder cpu_transcoder, enum port port) 261 { 262 if (INTEL_GEN(dev_priv) >= 12) { 263 switch (cpu_transcoder) { 264 case TRANSCODER_A: 265 return HDCP_TRANSA_REP_PRESENT | 266 HDCP_TRANSA_SHA1_M0; 267 case TRANSCODER_B: 268 return HDCP_TRANSB_REP_PRESENT | 269 HDCP_TRANSB_SHA1_M0; 270 case TRANSCODER_C: 271 return HDCP_TRANSC_REP_PRESENT | 272 HDCP_TRANSC_SHA1_M0; 273 case TRANSCODER_D: 274 return HDCP_TRANSD_REP_PRESENT | 275 HDCP_TRANSD_SHA1_M0; 276 default: 277 drm_err(&dev_priv->drm, "Unknown transcoder %d\n", 278 cpu_transcoder); 279 return -EINVAL; 280 } 281 } 282 283 switch (port) { 284 case PORT_A: 285 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; 286 case PORT_B: 287 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0; 288 case PORT_C: 289 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0; 290 case PORT_D: 291 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0; 292 case PORT_E: 293 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; 294 default: 295 drm_err(&dev_priv->drm, "Unknown port %d\n", port); 296 return -EINVAL; 297 } 298 } 299 300 static 301 int intel_hdcp_validate_v_prime(struct intel_connector *connector, 302 const struct intel_hdcp_shim *shim, 303 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) 304 { 305 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 306 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 307 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 308 enum port port = intel_dig_port->base.port; 309 u32 vprime, sha_text, sha_leftovers, rep_ctl; 310 int ret, i, j, sha_idx; 311 312 /* Process V' values from the receiver */ 313 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { 314 ret = shim->read_v_prime_part(intel_dig_port, i, &vprime); 315 if (ret) 316 return ret; 317 intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime); 318 } 319 320 /* 321 * We need to write the concatenation of all device KSVs, BINFO (DP) || 322 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte 323 * stream is written via the HDCP_SHA_TEXT register in 32-bit 324 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This 325 * index will keep track of our progress through the 64 bytes as well as 326 * helping us work the 40-bit KSVs through our 32-bit register. 327 * 328 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian 329 */ 330 sha_idx = 0; 331 sha_text = 0; 332 sha_leftovers = 0; 333 rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port); 334 intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 335 for (i = 0; i < num_downstream; i++) { 336 unsigned int sha_empty; 337 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN]; 338 339 /* Fill up the empty slots in sha_text and write it out */ 340 sha_empty = sizeof(sha_text) - sha_leftovers; 341 for (j = 0; j < sha_empty; j++) 342 sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8); 343 344 ret = intel_write_sha_text(dev_priv, sha_text); 345 if (ret < 0) 346 return ret; 347 348 /* Programming guide writes this every 64 bytes */ 349 sha_idx += sizeof(sha_text); 350 if (!(sha_idx % 64)) 351 intel_de_write(dev_priv, HDCP_REP_CTL, 352 rep_ctl | HDCP_SHA1_TEXT_32); 353 354 /* Store the leftover bytes from the ksv in sha_text */ 355 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty; 356 sha_text = 0; 357 for (j = 0; j < sha_leftovers; j++) 358 sha_text |= ksv[sha_empty + j] << 359 ((sizeof(sha_text) - j - 1) * 8); 360 361 /* 362 * If we still have room in sha_text for more data, continue. 363 * Otherwise, write it out immediately. 364 */ 365 if (sizeof(sha_text) > sha_leftovers) 366 continue; 367 368 ret = intel_write_sha_text(dev_priv, sha_text); 369 if (ret < 0) 370 return ret; 371 sha_leftovers = 0; 372 sha_text = 0; 373 sha_idx += sizeof(sha_text); 374 } 375 376 /* 377 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many 378 * bytes are leftover from the last ksv, we might be able to fit them 379 * all in sha_text (first 2 cases), or we might need to split them up 380 * into 2 writes (last 2 cases). 381 */ 382 if (sha_leftovers == 0) { 383 /* Write 16 bits of text, 16 bits of M0 */ 384 intel_de_write(dev_priv, HDCP_REP_CTL, 385 rep_ctl | HDCP_SHA1_TEXT_16); 386 ret = intel_write_sha_text(dev_priv, 387 bstatus[0] << 8 | bstatus[1]); 388 if (ret < 0) 389 return ret; 390 sha_idx += sizeof(sha_text); 391 392 /* Write 32 bits of M0 */ 393 intel_de_write(dev_priv, HDCP_REP_CTL, 394 rep_ctl | HDCP_SHA1_TEXT_0); 395 ret = intel_write_sha_text(dev_priv, 0); 396 if (ret < 0) 397 return ret; 398 sha_idx += sizeof(sha_text); 399 400 /* Write 16 bits of M0 */ 401 intel_de_write(dev_priv, HDCP_REP_CTL, 402 rep_ctl | HDCP_SHA1_TEXT_16); 403 ret = intel_write_sha_text(dev_priv, 0); 404 if (ret < 0) 405 return ret; 406 sha_idx += sizeof(sha_text); 407 408 } else if (sha_leftovers == 1) { 409 /* Write 24 bits of text, 8 bits of M0 */ 410 intel_de_write(dev_priv, HDCP_REP_CTL, 411 rep_ctl | HDCP_SHA1_TEXT_24); 412 sha_text |= bstatus[0] << 16 | bstatus[1] << 8; 413 /* Only 24-bits of data, must be in the LSB */ 414 sha_text = (sha_text & 0xffffff00) >> 8; 415 ret = intel_write_sha_text(dev_priv, sha_text); 416 if (ret < 0) 417 return ret; 418 sha_idx += sizeof(sha_text); 419 420 /* Write 32 bits of M0 */ 421 intel_de_write(dev_priv, HDCP_REP_CTL, 422 rep_ctl | HDCP_SHA1_TEXT_0); 423 ret = intel_write_sha_text(dev_priv, 0); 424 if (ret < 0) 425 return ret; 426 sha_idx += sizeof(sha_text); 427 428 /* Write 24 bits of M0 */ 429 intel_de_write(dev_priv, HDCP_REP_CTL, 430 rep_ctl | HDCP_SHA1_TEXT_8); 431 ret = intel_write_sha_text(dev_priv, 0); 432 if (ret < 0) 433 return ret; 434 sha_idx += sizeof(sha_text); 435 436 } else if (sha_leftovers == 2) { 437 /* Write 32 bits of text */ 438 intel_de_write(dev_priv, HDCP_REP_CTL, 439 rep_ctl | HDCP_SHA1_TEXT_32); 440 sha_text |= bstatus[0] << 24 | bstatus[1] << 16; 441 ret = intel_write_sha_text(dev_priv, sha_text); 442 if (ret < 0) 443 return ret; 444 sha_idx += sizeof(sha_text); 445 446 /* Write 64 bits of M0 */ 447 intel_de_write(dev_priv, HDCP_REP_CTL, 448 rep_ctl | HDCP_SHA1_TEXT_0); 449 for (i = 0; i < 2; i++) { 450 ret = intel_write_sha_text(dev_priv, 0); 451 if (ret < 0) 452 return ret; 453 sha_idx += sizeof(sha_text); 454 } 455 } else if (sha_leftovers == 3) { 456 /* Write 32 bits of text */ 457 intel_de_write(dev_priv, HDCP_REP_CTL, 458 rep_ctl | HDCP_SHA1_TEXT_32); 459 sha_text |= bstatus[0] << 24; 460 ret = intel_write_sha_text(dev_priv, sha_text); 461 if (ret < 0) 462 return ret; 463 sha_idx += sizeof(sha_text); 464 465 /* Write 8 bits of text, 24 bits of M0 */ 466 intel_de_write(dev_priv, HDCP_REP_CTL, 467 rep_ctl | HDCP_SHA1_TEXT_8); 468 ret = intel_write_sha_text(dev_priv, bstatus[1]); 469 if (ret < 0) 470 return ret; 471 sha_idx += sizeof(sha_text); 472 473 /* Write 32 bits of M0 */ 474 intel_de_write(dev_priv, HDCP_REP_CTL, 475 rep_ctl | HDCP_SHA1_TEXT_0); 476 ret = intel_write_sha_text(dev_priv, 0); 477 if (ret < 0) 478 return ret; 479 sha_idx += sizeof(sha_text); 480 481 /* Write 8 bits of M0 */ 482 intel_de_write(dev_priv, HDCP_REP_CTL, 483 rep_ctl | HDCP_SHA1_TEXT_24); 484 ret = intel_write_sha_text(dev_priv, 0); 485 if (ret < 0) 486 return ret; 487 sha_idx += sizeof(sha_text); 488 } else { 489 drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n", 490 sha_leftovers); 491 return -EINVAL; 492 } 493 494 intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); 495 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */ 496 while ((sha_idx % 64) < (64 - sizeof(sha_text))) { 497 ret = intel_write_sha_text(dev_priv, 0); 498 if (ret < 0) 499 return ret; 500 sha_idx += sizeof(sha_text); 501 } 502 503 /* 504 * Last write gets the length of the concatenation in bits. That is: 505 * - 5 bytes per device 506 * - 10 bytes for BINFO/BSTATUS(2), M0(8) 507 */ 508 sha_text = (num_downstream * 5 + 10) * 8; 509 ret = intel_write_sha_text(dev_priv, sha_text); 510 if (ret < 0) 511 return ret; 512 513 /* Tell the HW we're done with the hash and wait for it to ACK */ 514 intel_de_write(dev_priv, HDCP_REP_CTL, 515 rep_ctl | HDCP_SHA1_COMPLETE_HASH); 516 if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, 517 HDCP_SHA1_COMPLETE, 1)) { 518 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n"); 519 return -ETIMEDOUT; 520 } 521 if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { 522 drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n"); 523 return -ENXIO; 524 } 525 526 return 0; 527 } 528 529 /* Implements Part 2 of the HDCP authorization procedure */ 530 static 531 int intel_hdcp_auth_downstream(struct intel_connector *connector) 532 { 533 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 534 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 535 const struct intel_hdcp_shim *shim = connector->hdcp.shim; 536 u8 bstatus[2], num_downstream, *ksv_fifo; 537 int ret, i, tries = 3; 538 539 ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); 540 if (ret) { 541 drm_dbg_kms(&dev_priv->drm, 542 "KSV list failed to become ready (%d)\n", ret); 543 return ret; 544 } 545 546 ret = shim->read_bstatus(intel_dig_port, bstatus); 547 if (ret) 548 return ret; 549 550 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || 551 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { 552 drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n"); 553 return -EPERM; 554 } 555 556 /* 557 * When repeater reports 0 device count, HDCP1.4 spec allows disabling 558 * the HDCP encryption. That implies that repeater can't have its own 559 * display. As there is no consumption of encrypted content in the 560 * repeater with 0 downstream devices, we are failing the 561 * authentication. 562 */ 563 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); 564 if (num_downstream == 0) { 565 drm_dbg_kms(&dev_priv->drm, 566 "Repeater with zero downstream devices\n"); 567 return -EINVAL; 568 } 569 570 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); 571 if (!ksv_fifo) { 572 drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n"); 573 return -ENOMEM; 574 } 575 576 ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo); 577 if (ret) 578 goto err; 579 580 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo, 581 num_downstream)) { 582 drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n"); 583 ret = -EPERM; 584 goto err; 585 } 586 587 /* 588 * When V prime mismatches, DP Spec mandates re-read of 589 * V prime atleast twice. 590 */ 591 for (i = 0; i < tries; i++) { 592 ret = intel_hdcp_validate_v_prime(connector, shim, 593 ksv_fifo, num_downstream, 594 bstatus); 595 if (!ret) 596 break; 597 } 598 599 if (i == tries) { 600 drm_dbg_kms(&dev_priv->drm, 601 "V Prime validation failed.(%d)\n", ret); 602 goto err; 603 } 604 605 drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n", 606 num_downstream); 607 ret = 0; 608 err: 609 kfree(ksv_fifo); 610 return ret; 611 } 612 613 /* Implements Part 1 of the HDCP authorization procedure */ 614 static int intel_hdcp_auth(struct intel_connector *connector) 615 { 616 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 617 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 618 struct intel_hdcp *hdcp = &connector->hdcp; 619 const struct intel_hdcp_shim *shim = hdcp->shim; 620 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; 621 enum port port = intel_dig_port->base.port; 622 unsigned long r0_prime_gen_start; 623 int ret, i, tries = 2; 624 union { 625 u32 reg[2]; 626 u8 shim[DRM_HDCP_AN_LEN]; 627 } an; 628 union { 629 u32 reg[2]; 630 u8 shim[DRM_HDCP_KSV_LEN]; 631 } bksv; 632 union { 633 u32 reg; 634 u8 shim[DRM_HDCP_RI_LEN]; 635 } ri; 636 bool repeater_present, hdcp_capable; 637 638 /* 639 * Detects whether the display is HDCP capable. Although we check for 640 * valid Bksv below, the HDCP over DP spec requires that we check 641 * whether the display supports HDCP before we write An. For HDMI 642 * displays, this is not necessary. 643 */ 644 if (shim->hdcp_capable) { 645 ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable); 646 if (ret) 647 return ret; 648 if (!hdcp_capable) { 649 drm_dbg_kms(&dev_priv->drm, 650 "Panel is not HDCP capable\n"); 651 return -EINVAL; 652 } 653 } 654 655 /* Initialize An with 2 random values and acquire it */ 656 for (i = 0; i < 2; i++) 657 intel_de_write(dev_priv, 658 HDCP_ANINIT(dev_priv, cpu_transcoder, port), 659 get_random_u32()); 660 intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 661 HDCP_CONF_CAPTURE_AN); 662 663 /* Wait for An to be acquired */ 664 if (intel_de_wait_for_set(dev_priv, 665 HDCP_STATUS(dev_priv, cpu_transcoder, port), 666 HDCP_STATUS_AN_READY, 1)) { 667 drm_err(&dev_priv->drm, "Timed out waiting for An\n"); 668 return -ETIMEDOUT; 669 } 670 671 an.reg[0] = intel_de_read(dev_priv, 672 HDCP_ANLO(dev_priv, cpu_transcoder, port)); 673 an.reg[1] = intel_de_read(dev_priv, 674 HDCP_ANHI(dev_priv, cpu_transcoder, port)); 675 ret = shim->write_an_aksv(intel_dig_port, an.shim); 676 if (ret) 677 return ret; 678 679 r0_prime_gen_start = jiffies; 680 681 memset(&bksv, 0, sizeof(bksv)); 682 683 ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim); 684 if (ret < 0) 685 return ret; 686 687 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) { 688 drm_err(&dev_priv->drm, "BKSV is revoked\n"); 689 return -EPERM; 690 } 691 692 intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port), 693 bksv.reg[0]); 694 intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port), 695 bksv.reg[1]); 696 697 ret = shim->repeater_present(intel_dig_port, &repeater_present); 698 if (ret) 699 return ret; 700 if (repeater_present) 701 intel_de_write(dev_priv, HDCP_REP_CTL, 702 intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port)); 703 704 ret = shim->toggle_signalling(intel_dig_port, true); 705 if (ret) 706 return ret; 707 708 intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 709 HDCP_CONF_AUTH_AND_ENC); 710 711 /* Wait for R0 ready */ 712 if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) & 713 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { 714 drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n"); 715 return -ETIMEDOUT; 716 } 717 718 /* 719 * Wait for R0' to become available. The spec says 100ms from Aksv, but 720 * some monitors can take longer than this. We'll set the timeout at 721 * 300ms just to be sure. 722 * 723 * On DP, there's an R0_READY bit available but no such bit 724 * exists on HDMI. Since the upper-bound is the same, we'll just do 725 * the stupid thing instead of polling on one and not the other. 726 */ 727 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300); 728 729 tries = 3; 730 731 /* 732 * DP HDCP Spec mandates the two more reattempt to read R0, incase 733 * of R0 mismatch. 734 */ 735 for (i = 0; i < tries; i++) { 736 ri.reg = 0; 737 ret = shim->read_ri_prime(intel_dig_port, ri.shim); 738 if (ret) 739 return ret; 740 intel_de_write(dev_priv, 741 HDCP_RPRIME(dev_priv, cpu_transcoder, port), 742 ri.reg); 743 744 /* Wait for Ri prime match */ 745 if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) & 746 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) 747 break; 748 } 749 750 if (i == tries) { 751 drm_dbg_kms(&dev_priv->drm, 752 "Timed out waiting for Ri prime match (%x)\n", 753 intel_de_read(dev_priv, HDCP_STATUS(dev_priv, 754 cpu_transcoder, port))); 755 return -ETIMEDOUT; 756 } 757 758 /* Wait for encryption confirmation */ 759 if (intel_de_wait_for_set(dev_priv, 760 HDCP_STATUS(dev_priv, cpu_transcoder, port), 761 HDCP_STATUS_ENC, 762 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 763 drm_err(&dev_priv->drm, "Timed out waiting for encryption\n"); 764 return -ETIMEDOUT; 765 } 766 767 /* 768 * XXX: If we have MST-connected devices, we need to enable encryption 769 * on those as well. 770 */ 771 772 if (repeater_present) 773 return intel_hdcp_auth_downstream(connector); 774 775 drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n"); 776 return 0; 777 } 778 779 static int _intel_hdcp_disable(struct intel_connector *connector) 780 { 781 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 782 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 783 struct intel_hdcp *hdcp = &connector->hdcp; 784 enum port port = intel_dig_port->base.port; 785 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 786 int ret; 787 788 drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n", 789 connector->base.name, connector->base.base.id); 790 791 hdcp->hdcp_encrypted = false; 792 intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0); 793 if (intel_de_wait_for_clear(dev_priv, 794 HDCP_STATUS(dev_priv, cpu_transcoder, port), 795 ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { 796 drm_err(&dev_priv->drm, 797 "Failed to disable HDCP, timeout clearing status\n"); 798 return -ETIMEDOUT; 799 } 800 801 ret = hdcp->shim->toggle_signalling(intel_dig_port, false); 802 if (ret) { 803 drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n"); 804 return ret; 805 } 806 807 drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n"); 808 return 0; 809 } 810 811 static int _intel_hdcp_enable(struct intel_connector *connector) 812 { 813 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 814 struct intel_hdcp *hdcp = &connector->hdcp; 815 int i, ret, tries = 3; 816 817 drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n", 818 connector->base.name, connector->base.base.id); 819 820 if (!hdcp_key_loadable(dev_priv)) { 821 drm_err(&dev_priv->drm, "HDCP key Load is not possible\n"); 822 return -ENXIO; 823 } 824 825 for (i = 0; i < KEY_LOAD_TRIES; i++) { 826 ret = intel_hdcp_load_keys(dev_priv); 827 if (!ret) 828 break; 829 intel_hdcp_clear_keys(dev_priv); 830 } 831 if (ret) { 832 drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n", 833 ret); 834 return ret; 835 } 836 837 /* Incase of authentication failures, HDCP spec expects reauth. */ 838 for (i = 0; i < tries; i++) { 839 ret = intel_hdcp_auth(connector); 840 if (!ret) { 841 hdcp->hdcp_encrypted = true; 842 return 0; 843 } 844 845 drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret); 846 847 /* Ensuring HDCP encryption and signalling are stopped. */ 848 _intel_hdcp_disable(connector); 849 } 850 851 drm_dbg_kms(&dev_priv->drm, 852 "HDCP authentication failed (%d tries/%d)\n", tries, ret); 853 return ret; 854 } 855 856 static inline 857 struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) 858 { 859 return container_of(hdcp, struct intel_connector, hdcp); 860 } 861 862 /* Implements Part 3 of the HDCP authorization procedure */ 863 static int intel_hdcp_check_link(struct intel_connector *connector) 864 { 865 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 866 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 867 struct intel_hdcp *hdcp = &connector->hdcp; 868 enum port port = intel_dig_port->base.port; 869 enum transcoder cpu_transcoder; 870 int ret = 0; 871 872 mutex_lock(&hdcp->mutex); 873 cpu_transcoder = hdcp->cpu_transcoder; 874 875 /* Check_link valid only when HDCP1.4 is enabled */ 876 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 877 !hdcp->hdcp_encrypted) { 878 ret = -EINVAL; 879 goto out; 880 } 881 882 if (drm_WARN_ON(&dev_priv->drm, 883 !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) { 884 drm_err(&dev_priv->drm, 885 "%s:%d HDCP link stopped encryption,%x\n", 886 connector->base.name, connector->base.base.id, 887 intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port))); 888 ret = -ENXIO; 889 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 890 schedule_work(&hdcp->prop_work); 891 goto out; 892 } 893 894 if (hdcp->shim->check_link(intel_dig_port)) { 895 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 896 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; 897 schedule_work(&hdcp->prop_work); 898 } 899 goto out; 900 } 901 902 drm_dbg_kms(&dev_priv->drm, 903 "[%s:%d] HDCP link failed, retrying authentication\n", 904 connector->base.name, connector->base.base.id); 905 906 ret = _intel_hdcp_disable(connector); 907 if (ret) { 908 drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret); 909 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 910 schedule_work(&hdcp->prop_work); 911 goto out; 912 } 913 914 ret = _intel_hdcp_enable(connector); 915 if (ret) { 916 drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret); 917 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 918 schedule_work(&hdcp->prop_work); 919 goto out; 920 } 921 922 out: 923 mutex_unlock(&hdcp->mutex); 924 return ret; 925 } 926 927 static void intel_hdcp_prop_work(struct work_struct *work) 928 { 929 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, 930 prop_work); 931 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 932 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 933 934 drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL); 935 mutex_lock(&hdcp->mutex); 936 937 /* 938 * This worker is only used to flip between ENABLED/DESIRED. Either of 939 * those to UNDESIRED is handled by core. If value == UNDESIRED, 940 * we're running just after hdcp has been disabled, so just exit 941 */ 942 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 943 drm_hdcp_update_content_protection(&connector->base, 944 hdcp->value); 945 946 mutex_unlock(&hdcp->mutex); 947 drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex); 948 } 949 950 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) 951 { 952 return INTEL_INFO(dev_priv)->display.has_hdcp && 953 (INTEL_GEN(dev_priv) >= 12 || port < PORT_E); 954 } 955 956 static int 957 hdcp2_prepare_ake_init(struct intel_connector *connector, 958 struct hdcp2_ake_init *ake_data) 959 { 960 struct hdcp_port_data *data = &connector->hdcp.port_data; 961 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 962 struct i915_hdcp_comp_master *comp; 963 int ret; 964 965 mutex_lock(&dev_priv->hdcp_comp_mutex); 966 comp = dev_priv->hdcp_master; 967 968 if (!comp || !comp->ops) { 969 mutex_unlock(&dev_priv->hdcp_comp_mutex); 970 return -EINVAL; 971 } 972 973 ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data); 974 if (ret) 975 drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n", 976 ret); 977 mutex_unlock(&dev_priv->hdcp_comp_mutex); 978 979 return ret; 980 } 981 982 static int 983 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, 984 struct hdcp2_ake_send_cert *rx_cert, 985 bool *paired, 986 struct hdcp2_ake_no_stored_km *ek_pub_km, 987 size_t *msg_sz) 988 { 989 struct hdcp_port_data *data = &connector->hdcp.port_data; 990 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 991 struct i915_hdcp_comp_master *comp; 992 int ret; 993 994 mutex_lock(&dev_priv->hdcp_comp_mutex); 995 comp = dev_priv->hdcp_master; 996 997 if (!comp || !comp->ops) { 998 mutex_unlock(&dev_priv->hdcp_comp_mutex); 999 return -EINVAL; 1000 } 1001 1002 ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data, 1003 rx_cert, paired, 1004 ek_pub_km, msg_sz); 1005 if (ret < 0) 1006 drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n", 1007 ret); 1008 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1009 1010 return ret; 1011 } 1012 1013 static int hdcp2_verify_hprime(struct intel_connector *connector, 1014 struct hdcp2_ake_send_hprime *rx_hprime) 1015 { 1016 struct hdcp_port_data *data = &connector->hdcp.port_data; 1017 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1018 struct i915_hdcp_comp_master *comp; 1019 int ret; 1020 1021 mutex_lock(&dev_priv->hdcp_comp_mutex); 1022 comp = dev_priv->hdcp_master; 1023 1024 if (!comp || !comp->ops) { 1025 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1026 return -EINVAL; 1027 } 1028 1029 ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime); 1030 if (ret < 0) 1031 drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret); 1032 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1033 1034 return ret; 1035 } 1036 1037 static int 1038 hdcp2_store_pairing_info(struct intel_connector *connector, 1039 struct hdcp2_ake_send_pairing_info *pairing_info) 1040 { 1041 struct hdcp_port_data *data = &connector->hdcp.port_data; 1042 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1043 struct i915_hdcp_comp_master *comp; 1044 int ret; 1045 1046 mutex_lock(&dev_priv->hdcp_comp_mutex); 1047 comp = dev_priv->hdcp_master; 1048 1049 if (!comp || !comp->ops) { 1050 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1051 return -EINVAL; 1052 } 1053 1054 ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info); 1055 if (ret < 0) 1056 drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n", 1057 ret); 1058 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1059 1060 return ret; 1061 } 1062 1063 static int 1064 hdcp2_prepare_lc_init(struct intel_connector *connector, 1065 struct hdcp2_lc_init *lc_init) 1066 { 1067 struct hdcp_port_data *data = &connector->hdcp.port_data; 1068 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1069 struct i915_hdcp_comp_master *comp; 1070 int ret; 1071 1072 mutex_lock(&dev_priv->hdcp_comp_mutex); 1073 comp = dev_priv->hdcp_master; 1074 1075 if (!comp || !comp->ops) { 1076 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1077 return -EINVAL; 1078 } 1079 1080 ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init); 1081 if (ret < 0) 1082 drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n", 1083 ret); 1084 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1085 1086 return ret; 1087 } 1088 1089 static int 1090 hdcp2_verify_lprime(struct intel_connector *connector, 1091 struct hdcp2_lc_send_lprime *rx_lprime) 1092 { 1093 struct hdcp_port_data *data = &connector->hdcp.port_data; 1094 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1095 struct i915_hdcp_comp_master *comp; 1096 int ret; 1097 1098 mutex_lock(&dev_priv->hdcp_comp_mutex); 1099 comp = dev_priv->hdcp_master; 1100 1101 if (!comp || !comp->ops) { 1102 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1103 return -EINVAL; 1104 } 1105 1106 ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime); 1107 if (ret < 0) 1108 drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n", 1109 ret); 1110 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1111 1112 return ret; 1113 } 1114 1115 static int hdcp2_prepare_skey(struct intel_connector *connector, 1116 struct hdcp2_ske_send_eks *ske_data) 1117 { 1118 struct hdcp_port_data *data = &connector->hdcp.port_data; 1119 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1120 struct i915_hdcp_comp_master *comp; 1121 int ret; 1122 1123 mutex_lock(&dev_priv->hdcp_comp_mutex); 1124 comp = dev_priv->hdcp_master; 1125 1126 if (!comp || !comp->ops) { 1127 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1128 return -EINVAL; 1129 } 1130 1131 ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data); 1132 if (ret < 0) 1133 drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n", 1134 ret); 1135 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1136 1137 return ret; 1138 } 1139 1140 static int 1141 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, 1142 struct hdcp2_rep_send_receiverid_list 1143 *rep_topology, 1144 struct hdcp2_rep_send_ack *rep_send_ack) 1145 { 1146 struct hdcp_port_data *data = &connector->hdcp.port_data; 1147 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1148 struct i915_hdcp_comp_master *comp; 1149 int ret; 1150 1151 mutex_lock(&dev_priv->hdcp_comp_mutex); 1152 comp = dev_priv->hdcp_master; 1153 1154 if (!comp || !comp->ops) { 1155 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1156 return -EINVAL; 1157 } 1158 1159 ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data, 1160 rep_topology, 1161 rep_send_ack); 1162 if (ret < 0) 1163 drm_dbg_kms(&dev_priv->drm, 1164 "Verify rep topology failed. %d\n", ret); 1165 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1166 1167 return ret; 1168 } 1169 1170 static int 1171 hdcp2_verify_mprime(struct intel_connector *connector, 1172 struct hdcp2_rep_stream_ready *stream_ready) 1173 { 1174 struct hdcp_port_data *data = &connector->hdcp.port_data; 1175 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1176 struct i915_hdcp_comp_master *comp; 1177 int ret; 1178 1179 mutex_lock(&dev_priv->hdcp_comp_mutex); 1180 comp = dev_priv->hdcp_master; 1181 1182 if (!comp || !comp->ops) { 1183 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1184 return -EINVAL; 1185 } 1186 1187 ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready); 1188 if (ret < 0) 1189 drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret); 1190 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1191 1192 return ret; 1193 } 1194 1195 static int hdcp2_authenticate_port(struct intel_connector *connector) 1196 { 1197 struct hdcp_port_data *data = &connector->hdcp.port_data; 1198 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1199 struct i915_hdcp_comp_master *comp; 1200 int ret; 1201 1202 mutex_lock(&dev_priv->hdcp_comp_mutex); 1203 comp = dev_priv->hdcp_master; 1204 1205 if (!comp || !comp->ops) { 1206 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1207 return -EINVAL; 1208 } 1209 1210 ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data); 1211 if (ret < 0) 1212 drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n", 1213 ret); 1214 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1215 1216 return ret; 1217 } 1218 1219 static int hdcp2_close_mei_session(struct intel_connector *connector) 1220 { 1221 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1222 struct i915_hdcp_comp_master *comp; 1223 int ret; 1224 1225 mutex_lock(&dev_priv->hdcp_comp_mutex); 1226 comp = dev_priv->hdcp_master; 1227 1228 if (!comp || !comp->ops) { 1229 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1230 return -EINVAL; 1231 } 1232 1233 ret = comp->ops->close_hdcp_session(comp->mei_dev, 1234 &connector->hdcp.port_data); 1235 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1236 1237 return ret; 1238 } 1239 1240 static int hdcp2_deauthenticate_port(struct intel_connector *connector) 1241 { 1242 return hdcp2_close_mei_session(connector); 1243 } 1244 1245 /* Authentication flow starts from here */ 1246 static int hdcp2_authentication_key_exchange(struct intel_connector *connector) 1247 { 1248 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 1249 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1250 struct intel_hdcp *hdcp = &connector->hdcp; 1251 union { 1252 struct hdcp2_ake_init ake_init; 1253 struct hdcp2_ake_send_cert send_cert; 1254 struct hdcp2_ake_no_stored_km no_stored_km; 1255 struct hdcp2_ake_send_hprime send_hprime; 1256 struct hdcp2_ake_send_pairing_info pairing_info; 1257 } msgs; 1258 const struct intel_hdcp_shim *shim = hdcp->shim; 1259 size_t size; 1260 int ret; 1261 1262 /* Init for seq_num */ 1263 hdcp->seq_num_v = 0; 1264 hdcp->seq_num_m = 0; 1265 1266 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init); 1267 if (ret < 0) 1268 return ret; 1269 1270 ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init, 1271 sizeof(msgs.ake_init)); 1272 if (ret < 0) 1273 return ret; 1274 1275 ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT, 1276 &msgs.send_cert, sizeof(msgs.send_cert)); 1277 if (ret < 0) 1278 return ret; 1279 1280 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { 1281 drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n"); 1282 return -EINVAL; 1283 } 1284 1285 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); 1286 1287 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, 1288 msgs.send_cert.cert_rx.receiver_id, 1289 1)) { 1290 drm_err(&dev_priv->drm, "Receiver ID is revoked\n"); 1291 return -EPERM; 1292 } 1293 1294 /* 1295 * Here msgs.no_stored_km will hold msgs corresponding to the km 1296 * stored also. 1297 */ 1298 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert, 1299 &hdcp->is_paired, 1300 &msgs.no_stored_km, &size); 1301 if (ret < 0) 1302 return ret; 1303 1304 ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size); 1305 if (ret < 0) 1306 return ret; 1307 1308 ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME, 1309 &msgs.send_hprime, sizeof(msgs.send_hprime)); 1310 if (ret < 0) 1311 return ret; 1312 1313 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime); 1314 if (ret < 0) 1315 return ret; 1316 1317 if (!hdcp->is_paired) { 1318 /* Pairing is required */ 1319 ret = shim->read_2_2_msg(intel_dig_port, 1320 HDCP_2_2_AKE_SEND_PAIRING_INFO, 1321 &msgs.pairing_info, 1322 sizeof(msgs.pairing_info)); 1323 if (ret < 0) 1324 return ret; 1325 1326 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info); 1327 if (ret < 0) 1328 return ret; 1329 hdcp->is_paired = true; 1330 } 1331 1332 return 0; 1333 } 1334 1335 static int hdcp2_locality_check(struct intel_connector *connector) 1336 { 1337 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 1338 struct intel_hdcp *hdcp = &connector->hdcp; 1339 union { 1340 struct hdcp2_lc_init lc_init; 1341 struct hdcp2_lc_send_lprime send_lprime; 1342 } msgs; 1343 const struct intel_hdcp_shim *shim = hdcp->shim; 1344 int tries = HDCP2_LC_RETRY_CNT, ret, i; 1345 1346 for (i = 0; i < tries; i++) { 1347 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init); 1348 if (ret < 0) 1349 continue; 1350 1351 ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init, 1352 sizeof(msgs.lc_init)); 1353 if (ret < 0) 1354 continue; 1355 1356 ret = shim->read_2_2_msg(intel_dig_port, 1357 HDCP_2_2_LC_SEND_LPRIME, 1358 &msgs.send_lprime, 1359 sizeof(msgs.send_lprime)); 1360 if (ret < 0) 1361 continue; 1362 1363 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime); 1364 if (!ret) 1365 break; 1366 } 1367 1368 return ret; 1369 } 1370 1371 static int hdcp2_session_key_exchange(struct intel_connector *connector) 1372 { 1373 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 1374 struct intel_hdcp *hdcp = &connector->hdcp; 1375 struct hdcp2_ske_send_eks send_eks; 1376 int ret; 1377 1378 ret = hdcp2_prepare_skey(connector, &send_eks); 1379 if (ret < 0) 1380 return ret; 1381 1382 ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks, 1383 sizeof(send_eks)); 1384 if (ret < 0) 1385 return ret; 1386 1387 return 0; 1388 } 1389 1390 static 1391 int hdcp2_propagate_stream_management_info(struct intel_connector *connector) 1392 { 1393 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 1394 struct intel_hdcp *hdcp = &connector->hdcp; 1395 union { 1396 struct hdcp2_rep_stream_manage stream_manage; 1397 struct hdcp2_rep_stream_ready stream_ready; 1398 } msgs; 1399 const struct intel_hdcp_shim *shim = hdcp->shim; 1400 int ret; 1401 1402 /* Prepare RepeaterAuth_Stream_Manage msg */ 1403 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE; 1404 drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m); 1405 1406 /* K no of streams is fixed as 1. Stored as big-endian. */ 1407 msgs.stream_manage.k = cpu_to_be16(1); 1408 1409 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */ 1410 msgs.stream_manage.streams[0].stream_id = 0; 1411 msgs.stream_manage.streams[0].stream_type = hdcp->content_type; 1412 1413 /* Send it to Repeater */ 1414 ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage, 1415 sizeof(msgs.stream_manage)); 1416 if (ret < 0) 1417 return ret; 1418 1419 ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY, 1420 &msgs.stream_ready, sizeof(msgs.stream_ready)); 1421 if (ret < 0) 1422 return ret; 1423 1424 hdcp->port_data.seq_num_m = hdcp->seq_num_m; 1425 hdcp->port_data.streams[0].stream_type = hdcp->content_type; 1426 1427 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready); 1428 if (ret < 0) 1429 return ret; 1430 1431 hdcp->seq_num_m++; 1432 1433 if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { 1434 DRM_DEBUG_KMS("seq_num_m roll over.\n"); 1435 return -1; 1436 } 1437 1438 return 0; 1439 } 1440 1441 static 1442 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) 1443 { 1444 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 1445 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1446 struct intel_hdcp *hdcp = &connector->hdcp; 1447 union { 1448 struct hdcp2_rep_send_receiverid_list recvid_list; 1449 struct hdcp2_rep_send_ack rep_ack; 1450 } msgs; 1451 const struct intel_hdcp_shim *shim = hdcp->shim; 1452 u32 seq_num_v, device_cnt; 1453 u8 *rx_info; 1454 int ret; 1455 1456 ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST, 1457 &msgs.recvid_list, sizeof(msgs.recvid_list)); 1458 if (ret < 0) 1459 return ret; 1460 1461 rx_info = msgs.recvid_list.rx_info; 1462 1463 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || 1464 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { 1465 drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n"); 1466 return -EINVAL; 1467 } 1468 1469 /* Converting and Storing the seq_num_v to local variable as DWORD */ 1470 seq_num_v = 1471 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v); 1472 1473 if (!hdcp->hdcp2_encrypted && seq_num_v) { 1474 drm_dbg_kms(&dev_priv->drm, 1475 "Non zero Seq_num_v at first RecvId_List msg\n"); 1476 return -EINVAL; 1477 } 1478 1479 if (seq_num_v < hdcp->seq_num_v) { 1480 /* Roll over of the seq_num_v from repeater. Reauthenticate. */ 1481 drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n"); 1482 return -EINVAL; 1483 } 1484 1485 device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | 1486 HDCP_2_2_DEV_COUNT_LO(rx_info[1])); 1487 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, 1488 msgs.recvid_list.receiver_ids, 1489 device_cnt)) { 1490 drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n"); 1491 return -EPERM; 1492 } 1493 1494 ret = hdcp2_verify_rep_topology_prepare_ack(connector, 1495 &msgs.recvid_list, 1496 &msgs.rep_ack); 1497 if (ret < 0) 1498 return ret; 1499 1500 hdcp->seq_num_v = seq_num_v; 1501 ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack, 1502 sizeof(msgs.rep_ack)); 1503 if (ret < 0) 1504 return ret; 1505 1506 return 0; 1507 } 1508 1509 static int hdcp2_authenticate_repeater(struct intel_connector *connector) 1510 { 1511 int ret; 1512 1513 ret = hdcp2_authenticate_repeater_topology(connector); 1514 if (ret < 0) 1515 return ret; 1516 1517 return hdcp2_propagate_stream_management_info(connector); 1518 } 1519 1520 static int hdcp2_authenticate_sink(struct intel_connector *connector) 1521 { 1522 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 1523 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1524 struct intel_hdcp *hdcp = &connector->hdcp; 1525 const struct intel_hdcp_shim *shim = hdcp->shim; 1526 int ret; 1527 1528 ret = hdcp2_authentication_key_exchange(connector); 1529 if (ret < 0) { 1530 drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret); 1531 return ret; 1532 } 1533 1534 ret = hdcp2_locality_check(connector); 1535 if (ret < 0) { 1536 drm_dbg_kms(&i915->drm, 1537 "Locality Check failed. Err : %d\n", ret); 1538 return ret; 1539 } 1540 1541 ret = hdcp2_session_key_exchange(connector); 1542 if (ret < 0) { 1543 drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret); 1544 return ret; 1545 } 1546 1547 if (shim->config_stream_type) { 1548 ret = shim->config_stream_type(intel_dig_port, 1549 hdcp->is_repeater, 1550 hdcp->content_type); 1551 if (ret < 0) 1552 return ret; 1553 } 1554 1555 if (hdcp->is_repeater) { 1556 ret = hdcp2_authenticate_repeater(connector); 1557 if (ret < 0) { 1558 drm_dbg_kms(&i915->drm, 1559 "Repeater Auth Failed. Err: %d\n", ret); 1560 return ret; 1561 } 1562 } 1563 1564 hdcp->port_data.streams[0].stream_type = hdcp->content_type; 1565 ret = hdcp2_authenticate_port(connector); 1566 if (ret < 0) 1567 return ret; 1568 1569 return ret; 1570 } 1571 1572 static int hdcp2_enable_encryption(struct intel_connector *connector) 1573 { 1574 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 1575 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1576 struct intel_hdcp *hdcp = &connector->hdcp; 1577 enum port port = intel_dig_port->base.port; 1578 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1579 int ret; 1580 1581 drm_WARN_ON(&dev_priv->drm, 1582 intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & 1583 LINK_ENCRYPTION_STATUS); 1584 if (hdcp->shim->toggle_signalling) { 1585 ret = hdcp->shim->toggle_signalling(intel_dig_port, true); 1586 if (ret) { 1587 drm_err(&dev_priv->drm, 1588 "Failed to enable HDCP signalling. %d\n", 1589 ret); 1590 return ret; 1591 } 1592 } 1593 1594 if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & 1595 LINK_AUTH_STATUS) { 1596 /* Link is Authenticated. Now set for Encryption */ 1597 intel_de_write(dev_priv, 1598 HDCP2_CTL(dev_priv, cpu_transcoder, port), 1599 intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ); 1600 } 1601 1602 ret = intel_de_wait_for_set(dev_priv, 1603 HDCP2_STATUS(dev_priv, cpu_transcoder, 1604 port), 1605 LINK_ENCRYPTION_STATUS, 1606 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1607 1608 return ret; 1609 } 1610 1611 static int hdcp2_disable_encryption(struct intel_connector *connector) 1612 { 1613 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 1614 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1615 struct intel_hdcp *hdcp = &connector->hdcp; 1616 enum port port = intel_dig_port->base.port; 1617 enum transcoder cpu_transcoder = hdcp->cpu_transcoder; 1618 int ret; 1619 1620 drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & 1621 LINK_ENCRYPTION_STATUS)); 1622 1623 intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port), 1624 intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ); 1625 1626 ret = intel_de_wait_for_clear(dev_priv, 1627 HDCP2_STATUS(dev_priv, cpu_transcoder, 1628 port), 1629 LINK_ENCRYPTION_STATUS, 1630 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); 1631 if (ret == -ETIMEDOUT) 1632 drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout"); 1633 1634 if (hdcp->shim->toggle_signalling) { 1635 ret = hdcp->shim->toggle_signalling(intel_dig_port, false); 1636 if (ret) { 1637 drm_err(&dev_priv->drm, 1638 "Failed to disable HDCP signalling. %d\n", 1639 ret); 1640 return ret; 1641 } 1642 } 1643 1644 return ret; 1645 } 1646 1647 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) 1648 { 1649 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1650 int ret, i, tries = 3; 1651 1652 for (i = 0; i < tries; i++) { 1653 ret = hdcp2_authenticate_sink(connector); 1654 if (!ret) 1655 break; 1656 1657 /* Clearing the mei hdcp session */ 1658 drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n", 1659 i + 1, tries, ret); 1660 if (hdcp2_deauthenticate_port(connector) < 0) 1661 drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); 1662 } 1663 1664 if (i != tries) { 1665 /* 1666 * Ensuring the required 200mSec min time interval between 1667 * Session Key Exchange and encryption. 1668 */ 1669 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); 1670 ret = hdcp2_enable_encryption(connector); 1671 if (ret < 0) { 1672 drm_dbg_kms(&i915->drm, 1673 "Encryption Enable Failed.(%d)\n", ret); 1674 if (hdcp2_deauthenticate_port(connector) < 0) 1675 drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); 1676 } 1677 } 1678 1679 return ret; 1680 } 1681 1682 static int _intel_hdcp2_enable(struct intel_connector *connector) 1683 { 1684 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1685 struct intel_hdcp *hdcp = &connector->hdcp; 1686 int ret; 1687 1688 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n", 1689 connector->base.name, connector->base.base.id, 1690 hdcp->content_type); 1691 1692 ret = hdcp2_authenticate_and_encrypt(connector); 1693 if (ret) { 1694 drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n", 1695 hdcp->content_type, ret); 1696 return ret; 1697 } 1698 1699 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n", 1700 connector->base.name, connector->base.base.id, 1701 hdcp->content_type); 1702 1703 hdcp->hdcp2_encrypted = true; 1704 return 0; 1705 } 1706 1707 static int _intel_hdcp2_disable(struct intel_connector *connector) 1708 { 1709 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1710 int ret; 1711 1712 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n", 1713 connector->base.name, connector->base.base.id); 1714 1715 ret = hdcp2_disable_encryption(connector); 1716 1717 if (hdcp2_deauthenticate_port(connector) < 0) 1718 drm_dbg_kms(&i915->drm, "Port deauth failed.\n"); 1719 1720 connector->hdcp.hdcp2_encrypted = false; 1721 1722 return ret; 1723 } 1724 1725 /* Implements the Link Integrity Check for HDCP2.2 */ 1726 static int intel_hdcp2_check_link(struct intel_connector *connector) 1727 { 1728 struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); 1729 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1730 struct intel_hdcp *hdcp = &connector->hdcp; 1731 enum port port = intel_dig_port->base.port; 1732 enum transcoder cpu_transcoder; 1733 int ret = 0; 1734 1735 mutex_lock(&hdcp->mutex); 1736 cpu_transcoder = hdcp->cpu_transcoder; 1737 1738 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ 1739 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || 1740 !hdcp->hdcp2_encrypted) { 1741 ret = -EINVAL; 1742 goto out; 1743 } 1744 1745 if (drm_WARN_ON(&dev_priv->drm, 1746 !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) { 1747 drm_err(&dev_priv->drm, 1748 "HDCP2.2 link stopped the encryption, %x\n", 1749 intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port))); 1750 ret = -ENXIO; 1751 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 1752 schedule_work(&hdcp->prop_work); 1753 goto out; 1754 } 1755 1756 ret = hdcp->shim->check_2_2_link(intel_dig_port); 1757 if (ret == HDCP_LINK_PROTECTED) { 1758 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 1759 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; 1760 schedule_work(&hdcp->prop_work); 1761 } 1762 goto out; 1763 } 1764 1765 if (ret == HDCP_TOPOLOGY_CHANGE) { 1766 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) 1767 goto out; 1768 1769 drm_dbg_kms(&dev_priv->drm, 1770 "HDCP2.2 Downstream topology change\n"); 1771 ret = hdcp2_authenticate_repeater_topology(connector); 1772 if (!ret) { 1773 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; 1774 schedule_work(&hdcp->prop_work); 1775 goto out; 1776 } 1777 drm_dbg_kms(&dev_priv->drm, 1778 "[%s:%d] Repeater topology auth failed.(%d)\n", 1779 connector->base.name, connector->base.base.id, 1780 ret); 1781 } else { 1782 drm_dbg_kms(&dev_priv->drm, 1783 "[%s:%d] HDCP2.2 link failed, retrying auth\n", 1784 connector->base.name, connector->base.base.id); 1785 } 1786 1787 ret = _intel_hdcp2_disable(connector); 1788 if (ret) { 1789 drm_err(&dev_priv->drm, 1790 "[%s:%d] Failed to disable hdcp2.2 (%d)\n", 1791 connector->base.name, connector->base.base.id, ret); 1792 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 1793 schedule_work(&hdcp->prop_work); 1794 goto out; 1795 } 1796 1797 ret = _intel_hdcp2_enable(connector); 1798 if (ret) { 1799 drm_dbg_kms(&dev_priv->drm, 1800 "[%s:%d] Failed to enable hdcp2.2 (%d)\n", 1801 connector->base.name, connector->base.base.id, 1802 ret); 1803 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 1804 schedule_work(&hdcp->prop_work); 1805 goto out; 1806 } 1807 1808 out: 1809 mutex_unlock(&hdcp->mutex); 1810 return ret; 1811 } 1812 1813 static void intel_hdcp_check_work(struct work_struct *work) 1814 { 1815 struct intel_hdcp *hdcp = container_of(to_delayed_work(work), 1816 struct intel_hdcp, 1817 check_work); 1818 struct intel_connector *connector = intel_hdcp_to_connector(hdcp); 1819 1820 if (!intel_hdcp2_check_link(connector)) 1821 schedule_delayed_work(&hdcp->check_work, 1822 DRM_HDCP2_CHECK_PERIOD_MS); 1823 else if (!intel_hdcp_check_link(connector)) 1824 schedule_delayed_work(&hdcp->check_work, 1825 DRM_HDCP_CHECK_PERIOD_MS); 1826 } 1827 1828 static int i915_hdcp_component_bind(struct device *i915_kdev, 1829 struct device *mei_kdev, void *data) 1830 { 1831 struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); 1832 1833 drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n"); 1834 mutex_lock(&dev_priv->hdcp_comp_mutex); 1835 dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data; 1836 dev_priv->hdcp_master->mei_dev = mei_kdev; 1837 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1838 1839 return 0; 1840 } 1841 1842 static void i915_hdcp_component_unbind(struct device *i915_kdev, 1843 struct device *mei_kdev, void *data) 1844 { 1845 struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); 1846 1847 drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n"); 1848 mutex_lock(&dev_priv->hdcp_comp_mutex); 1849 dev_priv->hdcp_master = NULL; 1850 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1851 } 1852 1853 static const struct component_ops i915_hdcp_component_ops = { 1854 .bind = i915_hdcp_component_bind, 1855 .unbind = i915_hdcp_component_unbind, 1856 }; 1857 1858 static inline 1859 enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port) 1860 { 1861 switch (port) { 1862 case PORT_A: 1863 return MEI_DDI_A; 1864 case PORT_B ... PORT_F: 1865 return (enum mei_fw_ddi)port; 1866 default: 1867 return MEI_DDI_INVALID_PORT; 1868 } 1869 } 1870 1871 static inline 1872 enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder) 1873 { 1874 switch (cpu_transcoder) { 1875 case TRANSCODER_A ... TRANSCODER_D: 1876 return (enum mei_fw_tc)(cpu_transcoder | 0x10); 1877 default: /* eDP, DSI TRANSCODERS are non HDCP capable */ 1878 return MEI_INVALID_TRANSCODER; 1879 } 1880 } 1881 1882 static inline int initialize_hdcp_port_data(struct intel_connector *connector, 1883 const struct intel_hdcp_shim *shim) 1884 { 1885 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1886 struct intel_hdcp *hdcp = &connector->hdcp; 1887 struct hdcp_port_data *data = &hdcp->port_data; 1888 1889 if (INTEL_GEN(dev_priv) < 12) 1890 data->fw_ddi = 1891 intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port); 1892 else 1893 /* 1894 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled 1895 * with zero(INVALID PORT index). 1896 */ 1897 data->fw_ddi = MEI_DDI_INVALID_PORT; 1898 1899 /* 1900 * As associated transcoder is set and modified at modeset, here fw_tc 1901 * is initialized to zero (invalid transcoder index). This will be 1902 * retained for <Gen12 forever. 1903 */ 1904 data->fw_tc = MEI_INVALID_TRANSCODER; 1905 1906 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; 1907 data->protocol = (u8)shim->protocol; 1908 1909 data->k = 1; 1910 if (!data->streams) 1911 data->streams = kcalloc(data->k, 1912 sizeof(struct hdcp2_streamid_type), 1913 GFP_KERNEL); 1914 if (!data->streams) { 1915 drm_err(&dev_priv->drm, "Out of Memory\n"); 1916 return -ENOMEM; 1917 } 1918 1919 data->streams[0].stream_id = 0; 1920 data->streams[0].stream_type = hdcp->content_type; 1921 1922 return 0; 1923 } 1924 1925 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv) 1926 { 1927 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) 1928 return false; 1929 1930 return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || 1931 IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)); 1932 } 1933 1934 void intel_hdcp_component_init(struct drm_i915_private *dev_priv) 1935 { 1936 int ret; 1937 1938 if (!is_hdcp2_supported(dev_priv)) 1939 return; 1940 1941 mutex_lock(&dev_priv->hdcp_comp_mutex); 1942 drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added); 1943 1944 dev_priv->hdcp_comp_added = true; 1945 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1946 ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops, 1947 I915_COMPONENT_HDCP); 1948 if (ret < 0) { 1949 drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n", 1950 ret); 1951 mutex_lock(&dev_priv->hdcp_comp_mutex); 1952 dev_priv->hdcp_comp_added = false; 1953 mutex_unlock(&dev_priv->hdcp_comp_mutex); 1954 return; 1955 } 1956 } 1957 1958 static void intel_hdcp2_init(struct intel_connector *connector, 1959 const struct intel_hdcp_shim *shim) 1960 { 1961 struct drm_i915_private *i915 = to_i915(connector->base.dev); 1962 struct intel_hdcp *hdcp = &connector->hdcp; 1963 int ret; 1964 1965 ret = initialize_hdcp_port_data(connector, shim); 1966 if (ret) { 1967 drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n"); 1968 return; 1969 } 1970 1971 hdcp->hdcp2_supported = true; 1972 } 1973 1974 int intel_hdcp_init(struct intel_connector *connector, 1975 const struct intel_hdcp_shim *shim) 1976 { 1977 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1978 struct intel_hdcp *hdcp = &connector->hdcp; 1979 int ret; 1980 1981 if (!shim) 1982 return -EINVAL; 1983 1984 if (is_hdcp2_supported(dev_priv)) 1985 intel_hdcp2_init(connector, shim); 1986 1987 ret = 1988 drm_connector_attach_content_protection_property(&connector->base, 1989 hdcp->hdcp2_supported); 1990 if (ret) { 1991 hdcp->hdcp2_supported = false; 1992 kfree(hdcp->port_data.streams); 1993 return ret; 1994 } 1995 1996 hdcp->shim = shim; 1997 mutex_init(&hdcp->mutex); 1998 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); 1999 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); 2000 init_waitqueue_head(&hdcp->cp_irq_queue); 2001 2002 return 0; 2003 } 2004 2005 int intel_hdcp_enable(struct intel_connector *connector, 2006 enum transcoder cpu_transcoder, u8 content_type) 2007 { 2008 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 2009 struct intel_hdcp *hdcp = &connector->hdcp; 2010 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; 2011 int ret = -EINVAL; 2012 2013 if (!hdcp->shim) 2014 return -ENOENT; 2015 2016 mutex_lock(&hdcp->mutex); 2017 drm_WARN_ON(&dev_priv->drm, 2018 hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); 2019 hdcp->content_type = content_type; 2020 2021 if (INTEL_GEN(dev_priv) >= 12) { 2022 hdcp->cpu_transcoder = cpu_transcoder; 2023 hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder); 2024 } 2025 2026 /* 2027 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup 2028 * is capable of HDCP2.2, it is preferred to use HDCP2.2. 2029 */ 2030 if (intel_hdcp2_capable(connector)) { 2031 ret = _intel_hdcp2_enable(connector); 2032 if (!ret) 2033 check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS; 2034 } 2035 2036 /* 2037 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will 2038 * be attempted. 2039 */ 2040 if (ret && intel_hdcp_capable(connector) && 2041 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) { 2042 ret = _intel_hdcp_enable(connector); 2043 } 2044 2045 if (!ret) { 2046 schedule_delayed_work(&hdcp->check_work, check_link_interval); 2047 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; 2048 schedule_work(&hdcp->prop_work); 2049 } 2050 2051 mutex_unlock(&hdcp->mutex); 2052 return ret; 2053 } 2054 2055 int intel_hdcp_disable(struct intel_connector *connector) 2056 { 2057 struct intel_hdcp *hdcp = &connector->hdcp; 2058 int ret = 0; 2059 2060 if (!hdcp->shim) 2061 return -ENOENT; 2062 2063 mutex_lock(&hdcp->mutex); 2064 2065 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { 2066 hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; 2067 if (hdcp->hdcp2_encrypted) 2068 ret = _intel_hdcp2_disable(connector); 2069 else if (hdcp->hdcp_encrypted) 2070 ret = _intel_hdcp_disable(connector); 2071 } 2072 2073 mutex_unlock(&hdcp->mutex); 2074 cancel_delayed_work_sync(&hdcp->check_work); 2075 return ret; 2076 } 2077 2078 void intel_hdcp_update_pipe(struct intel_encoder *encoder, 2079 const struct intel_crtc_state *crtc_state, 2080 const struct drm_connector_state *conn_state) 2081 { 2082 struct intel_connector *connector = 2083 to_intel_connector(conn_state->connector); 2084 struct intel_hdcp *hdcp = &connector->hdcp; 2085 bool content_protection_type_changed = 2086 (conn_state->hdcp_content_type != hdcp->content_type && 2087 conn_state->content_protection != 2088 DRM_MODE_CONTENT_PROTECTION_UNDESIRED); 2089 2090 /* 2091 * During the HDCP encryption session if Type change is requested, 2092 * disable the HDCP and reenable it with new TYPE value. 2093 */ 2094 if (conn_state->content_protection == 2095 DRM_MODE_CONTENT_PROTECTION_UNDESIRED || 2096 content_protection_type_changed) 2097 intel_hdcp_disable(connector); 2098 2099 /* 2100 * Mark the hdcp state as DESIRED after the hdcp disable of type 2101 * change procedure. 2102 */ 2103 if (content_protection_type_changed) { 2104 mutex_lock(&hdcp->mutex); 2105 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; 2106 schedule_work(&hdcp->prop_work); 2107 mutex_unlock(&hdcp->mutex); 2108 } 2109 2110 if (conn_state->content_protection == 2111 DRM_MODE_CONTENT_PROTECTION_DESIRED || 2112 content_protection_type_changed) 2113 intel_hdcp_enable(connector, 2114 crtc_state->cpu_transcoder, 2115 (u8)conn_state->hdcp_content_type); 2116 } 2117 2118 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv) 2119 { 2120 mutex_lock(&dev_priv->hdcp_comp_mutex); 2121 if (!dev_priv->hdcp_comp_added) { 2122 mutex_unlock(&dev_priv->hdcp_comp_mutex); 2123 return; 2124 } 2125 2126 dev_priv->hdcp_comp_added = false; 2127 mutex_unlock(&dev_priv->hdcp_comp_mutex); 2128 2129 component_del(dev_priv->drm.dev, &i915_hdcp_component_ops); 2130 } 2131 2132 void intel_hdcp_cleanup(struct intel_connector *connector) 2133 { 2134 if (!connector->hdcp.shim) 2135 return; 2136 2137 mutex_lock(&connector->hdcp.mutex); 2138 kfree(connector->hdcp.port_data.streams); 2139 mutex_unlock(&connector->hdcp.mutex); 2140 } 2141 2142 void intel_hdcp_atomic_check(struct drm_connector *connector, 2143 struct drm_connector_state *old_state, 2144 struct drm_connector_state *new_state) 2145 { 2146 u64 old_cp = old_state->content_protection; 2147 u64 new_cp = new_state->content_protection; 2148 struct drm_crtc_state *crtc_state; 2149 2150 if (!new_state->crtc) { 2151 /* 2152 * If the connector is being disabled with CP enabled, mark it 2153 * desired so it's re-enabled when the connector is brought back 2154 */ 2155 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED) 2156 new_state->content_protection = 2157 DRM_MODE_CONTENT_PROTECTION_DESIRED; 2158 return; 2159 } 2160 2161 /* 2162 * Nothing to do if the state didn't change, or HDCP was activated since 2163 * the last commit. And also no change in hdcp content type. 2164 */ 2165 if (old_cp == new_cp || 2166 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && 2167 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) { 2168 if (old_state->hdcp_content_type == 2169 new_state->hdcp_content_type) 2170 return; 2171 } 2172 2173 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 2174 new_state->crtc); 2175 crtc_state->mode_changed = true; 2176 } 2177 2178 /* Handles the CP_IRQ raised from the DP HDCP sink */ 2179 void intel_hdcp_handle_cp_irq(struct intel_connector *connector) 2180 { 2181 struct intel_hdcp *hdcp = &connector->hdcp; 2182 2183 if (!hdcp->shim) 2184 return; 2185 2186 atomic_inc(&connector->hdcp.cp_irq_count); 2187 wake_up_all(&connector->hdcp.cp_irq_queue); 2188 2189 schedule_delayed_work(&hdcp->check_work, 0); 2190 } 2191