1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  *
5  * Authors:
6  * Sean Paul <seanpaul@chromium.org>
7  */
8 
9 #include <linux/component.h>
10 #include <linux/i2c.h>
11 #include <linux/random.h>
12 
13 #include <drm/drm_hdcp.h>
14 #include <drm/i915_component.h>
15 
16 #include "i915_reg.h"
17 #include "intel_display_power.h"
18 #include "intel_display_types.h"
19 #include "intel_hdcp.h"
20 #include "intel_sideband.h"
21 
22 #define KEY_LOAD_TRIES	5
23 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS	50
24 #define HDCP2_LC_RETRY_CNT			3
25 
26 static
27 bool intel_hdcp_is_ksv_valid(u8 *ksv)
28 {
29 	int i, ones = 0;
30 	/* KSV has 20 1's and 20 0's */
31 	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
32 		ones += hweight8(ksv[i]);
33 	if (ones != 20)
34 		return false;
35 
36 	return true;
37 }
38 
39 static
40 int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
41 			       const struct intel_hdcp_shim *shim, u8 *bksv)
42 {
43 	int ret, i, tries = 2;
44 
45 	/* HDCP spec states that we must retry the bksv if it is invalid */
46 	for (i = 0; i < tries; i++) {
47 		ret = shim->read_bksv(intel_dig_port, bksv);
48 		if (ret)
49 			return ret;
50 		if (intel_hdcp_is_ksv_valid(bksv))
51 			break;
52 	}
53 	if (i == tries) {
54 		DRM_DEBUG_KMS("Bksv is invalid\n");
55 		return -ENODEV;
56 	}
57 
58 	return 0;
59 }
60 
61 /* Is HDCP1.4 capable on Platform and Sink */
62 bool intel_hdcp_capable(struct intel_connector *connector)
63 {
64 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
65 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
66 	bool capable = false;
67 	u8 bksv[5];
68 
69 	if (!shim)
70 		return capable;
71 
72 	if (shim->hdcp_capable) {
73 		shim->hdcp_capable(intel_dig_port, &capable);
74 	} else {
75 		if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
76 			capable = true;
77 	}
78 
79 	return capable;
80 }
81 
82 /* Is HDCP2.2 capable on Platform and Sink */
83 bool intel_hdcp2_capable(struct intel_connector *connector)
84 {
85 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
86 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
87 	struct intel_hdcp *hdcp = &connector->hdcp;
88 	bool capable = false;
89 
90 	/* I915 support for HDCP2.2 */
91 	if (!hdcp->hdcp2_supported)
92 		return false;
93 
94 	/* MEI interface is solid */
95 	mutex_lock(&dev_priv->hdcp_comp_mutex);
96 	if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
97 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
98 		return false;
99 	}
100 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
101 
102 	/* Sink's capability for HDCP2.2 */
103 	hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
104 
105 	return capable;
106 }
107 
108 static inline bool intel_hdcp_in_use(struct intel_connector *connector)
109 {
110 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
111 	enum port port = connector->encoder->port;
112 	u32 reg;
113 
114 	reg = I915_READ(PORT_HDCP_STATUS(port));
115 	return reg & HDCP_STATUS_ENC;
116 }
117 
118 static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
119 {
120 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
121 	enum port port = connector->encoder->port;
122 	u32 reg;
123 
124 	reg = I915_READ(HDCP2_STATUS_DDI(port));
125 	return reg & LINK_ENCRYPTION_STATUS;
126 }
127 
128 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
129 				    const struct intel_hdcp_shim *shim)
130 {
131 	int ret, read_ret;
132 	bool ksv_ready;
133 
134 	/* Poll for ksv list ready (spec says max time allowed is 5s) */
135 	ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
136 							 &ksv_ready),
137 			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
138 			 100 * 1000);
139 	if (ret)
140 		return ret;
141 	if (read_ret)
142 		return read_ret;
143 	if (!ksv_ready)
144 		return -ETIMEDOUT;
145 
146 	return 0;
147 }
148 
149 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
150 {
151 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
152 	struct i915_power_well *power_well;
153 	enum i915_power_well_id id;
154 	bool enabled = false;
155 
156 	/*
157 	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
158 	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
159 	 */
160 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
161 		id = HSW_DISP_PW_GLOBAL;
162 	else
163 		id = SKL_DISP_PW_1;
164 
165 	mutex_lock(&power_domains->lock);
166 
167 	/* PG1 (power well #1) needs to be enabled */
168 	for_each_power_well(dev_priv, power_well) {
169 		if (power_well->desc->id == id) {
170 			enabled = power_well->desc->ops->is_enabled(dev_priv,
171 								    power_well);
172 			break;
173 		}
174 	}
175 	mutex_unlock(&power_domains->lock);
176 
177 	/*
178 	 * Another req for hdcp key loadability is enabled state of pll for
179 	 * cdclk. Without active crtc we wont land here. So we are assuming that
180 	 * cdclk is already on.
181 	 */
182 
183 	return enabled;
184 }
185 
186 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
187 {
188 	I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
189 	I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
190 		   HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
191 }
192 
193 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
194 {
195 	int ret;
196 	u32 val;
197 
198 	val = I915_READ(HDCP_KEY_STATUS);
199 	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
200 		return 0;
201 
202 	/*
203 	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
204 	 * out of reset. So if Key is not already loaded, its an error state.
205 	 */
206 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
207 		if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
208 			return -ENXIO;
209 
210 	/*
211 	 * Initiate loading the HDCP key from fuses.
212 	 *
213 	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
214 	 * platforms except BXT and GLK, differ in the key load trigger process
215 	 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
216 	 */
217 	if (IS_GEN9_BC(dev_priv)) {
218 		ret = sandybridge_pcode_write(dev_priv,
219 					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
220 		if (ret) {
221 			DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
222 			          ret);
223 			return ret;
224 		}
225 	} else {
226 		I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
227 	}
228 
229 	/* Wait for the keys to load (500us) */
230 	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
231 					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
232 					10, 1, &val);
233 	if (ret)
234 		return ret;
235 	else if (!(val & HDCP_KEY_LOAD_STATUS))
236 		return -ENXIO;
237 
238 	/* Send Aksv over to PCH display for use in authentication */
239 	I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
240 
241 	return 0;
242 }
243 
244 /* Returns updated SHA-1 index */
245 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
246 {
247 	I915_WRITE(HDCP_SHA_TEXT, sha_text);
248 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
249 		DRM_ERROR("Timed out waiting for SHA1 ready\n");
250 		return -ETIMEDOUT;
251 	}
252 	return 0;
253 }
254 
255 static
256 u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
257 {
258 	enum port port = intel_dig_port->base.port;
259 	switch (port) {
260 	case PORT_A:
261 		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
262 	case PORT_B:
263 		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
264 	case PORT_C:
265 		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
266 	case PORT_D:
267 		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
268 	case PORT_E:
269 		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
270 	default:
271 		break;
272 	}
273 	DRM_ERROR("Unknown port %d\n", port);
274 	return -EINVAL;
275 }
276 
277 static
278 int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
279 				const struct intel_hdcp_shim *shim,
280 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
281 {
282 	struct drm_i915_private *dev_priv;
283 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
284 	int ret, i, j, sha_idx;
285 
286 	dev_priv = intel_dig_port->base.base.dev->dev_private;
287 
288 	/* Process V' values from the receiver */
289 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
290 		ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
291 		if (ret)
292 			return ret;
293 		I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
294 	}
295 
296 	/*
297 	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
298 	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
299 	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
300 	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
301 	 * index will keep track of our progress through the 64 bytes as well as
302 	 * helping us work the 40-bit KSVs through our 32-bit register.
303 	 *
304 	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
305 	 */
306 	sha_idx = 0;
307 	sha_text = 0;
308 	sha_leftovers = 0;
309 	rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
310 	I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
311 	for (i = 0; i < num_downstream; i++) {
312 		unsigned int sha_empty;
313 		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
314 
315 		/* Fill up the empty slots in sha_text and write it out */
316 		sha_empty = sizeof(sha_text) - sha_leftovers;
317 		for (j = 0; j < sha_empty; j++)
318 			sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
319 
320 		ret = intel_write_sha_text(dev_priv, sha_text);
321 		if (ret < 0)
322 			return ret;
323 
324 		/* Programming guide writes this every 64 bytes */
325 		sha_idx += sizeof(sha_text);
326 		if (!(sha_idx % 64))
327 			I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
328 
329 		/* Store the leftover bytes from the ksv in sha_text */
330 		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
331 		sha_text = 0;
332 		for (j = 0; j < sha_leftovers; j++)
333 			sha_text |= ksv[sha_empty + j] <<
334 					((sizeof(sha_text) - j - 1) * 8);
335 
336 		/*
337 		 * If we still have room in sha_text for more data, continue.
338 		 * Otherwise, write it out immediately.
339 		 */
340 		if (sizeof(sha_text) > sha_leftovers)
341 			continue;
342 
343 		ret = intel_write_sha_text(dev_priv, sha_text);
344 		if (ret < 0)
345 			return ret;
346 		sha_leftovers = 0;
347 		sha_text = 0;
348 		sha_idx += sizeof(sha_text);
349 	}
350 
351 	/*
352 	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
353 	 * bytes are leftover from the last ksv, we might be able to fit them
354 	 * all in sha_text (first 2 cases), or we might need to split them up
355 	 * into 2 writes (last 2 cases).
356 	 */
357 	if (sha_leftovers == 0) {
358 		/* Write 16 bits of text, 16 bits of M0 */
359 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
360 		ret = intel_write_sha_text(dev_priv,
361 					   bstatus[0] << 8 | bstatus[1]);
362 		if (ret < 0)
363 			return ret;
364 		sha_idx += sizeof(sha_text);
365 
366 		/* Write 32 bits of M0 */
367 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
368 		ret = intel_write_sha_text(dev_priv, 0);
369 		if (ret < 0)
370 			return ret;
371 		sha_idx += sizeof(sha_text);
372 
373 		/* Write 16 bits of M0 */
374 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
375 		ret = intel_write_sha_text(dev_priv, 0);
376 		if (ret < 0)
377 			return ret;
378 		sha_idx += sizeof(sha_text);
379 
380 	} else if (sha_leftovers == 1) {
381 		/* Write 24 bits of text, 8 bits of M0 */
382 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
383 		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
384 		/* Only 24-bits of data, must be in the LSB */
385 		sha_text = (sha_text & 0xffffff00) >> 8;
386 		ret = intel_write_sha_text(dev_priv, sha_text);
387 		if (ret < 0)
388 			return ret;
389 		sha_idx += sizeof(sha_text);
390 
391 		/* Write 32 bits of M0 */
392 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
393 		ret = intel_write_sha_text(dev_priv, 0);
394 		if (ret < 0)
395 			return ret;
396 		sha_idx += sizeof(sha_text);
397 
398 		/* Write 24 bits of M0 */
399 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
400 		ret = intel_write_sha_text(dev_priv, 0);
401 		if (ret < 0)
402 			return ret;
403 		sha_idx += sizeof(sha_text);
404 
405 	} else if (sha_leftovers == 2) {
406 		/* Write 32 bits of text */
407 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
408 		sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
409 		ret = intel_write_sha_text(dev_priv, sha_text);
410 		if (ret < 0)
411 			return ret;
412 		sha_idx += sizeof(sha_text);
413 
414 		/* Write 64 bits of M0 */
415 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
416 		for (i = 0; i < 2; i++) {
417 			ret = intel_write_sha_text(dev_priv, 0);
418 			if (ret < 0)
419 				return ret;
420 			sha_idx += sizeof(sha_text);
421 		}
422 	} else if (sha_leftovers == 3) {
423 		/* Write 32 bits of text */
424 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
425 		sha_text |= bstatus[0] << 24;
426 		ret = intel_write_sha_text(dev_priv, sha_text);
427 		if (ret < 0)
428 			return ret;
429 		sha_idx += sizeof(sha_text);
430 
431 		/* Write 8 bits of text, 24 bits of M0 */
432 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
433 		ret = intel_write_sha_text(dev_priv, bstatus[1]);
434 		if (ret < 0)
435 			return ret;
436 		sha_idx += sizeof(sha_text);
437 
438 		/* Write 32 bits of M0 */
439 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
440 		ret = intel_write_sha_text(dev_priv, 0);
441 		if (ret < 0)
442 			return ret;
443 		sha_idx += sizeof(sha_text);
444 
445 		/* Write 8 bits of M0 */
446 		I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
447 		ret = intel_write_sha_text(dev_priv, 0);
448 		if (ret < 0)
449 			return ret;
450 		sha_idx += sizeof(sha_text);
451 	} else {
452 		DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
453 			      sha_leftovers);
454 		return -EINVAL;
455 	}
456 
457 	I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
458 	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
459 	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
460 		ret = intel_write_sha_text(dev_priv, 0);
461 		if (ret < 0)
462 			return ret;
463 		sha_idx += sizeof(sha_text);
464 	}
465 
466 	/*
467 	 * Last write gets the length of the concatenation in bits. That is:
468 	 *  - 5 bytes per device
469 	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
470 	 */
471 	sha_text = (num_downstream * 5 + 10) * 8;
472 	ret = intel_write_sha_text(dev_priv, sha_text);
473 	if (ret < 0)
474 		return ret;
475 
476 	/* Tell the HW we're done with the hash and wait for it to ACK */
477 	I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
478 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
479 				  HDCP_SHA1_COMPLETE, 1)) {
480 		DRM_ERROR("Timed out waiting for SHA1 complete\n");
481 		return -ETIMEDOUT;
482 	}
483 	if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
484 		DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
485 		return -ENXIO;
486 	}
487 
488 	return 0;
489 }
490 
491 /* Implements Part 2 of the HDCP authorization procedure */
492 static
493 int intel_hdcp_auth_downstream(struct intel_connector *connector)
494 {
495 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
496 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
497 	struct drm_device *dev = connector->base.dev;
498 	u8 bstatus[2], num_downstream, *ksv_fifo;
499 	int ret, i, tries = 3;
500 
501 	ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
502 	if (ret) {
503 		DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
504 		return ret;
505 	}
506 
507 	ret = shim->read_bstatus(intel_dig_port, bstatus);
508 	if (ret)
509 		return ret;
510 
511 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
512 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
513 		DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
514 		return -EPERM;
515 	}
516 
517 	/*
518 	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
519 	 * the HDCP encryption. That implies that repeater can't have its own
520 	 * display. As there is no consumption of encrypted content in the
521 	 * repeater with 0 downstream devices, we are failing the
522 	 * authentication.
523 	 */
524 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
525 	if (num_downstream == 0) {
526 		DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
527 		return -EINVAL;
528 	}
529 
530 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
531 	if (!ksv_fifo) {
532 		DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
533 		return -ENOMEM;
534 	}
535 
536 	ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
537 	if (ret)
538 		goto err;
539 
540 	if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
541 		DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
542 		ret = -EPERM;
543 		goto err;
544 	}
545 
546 	/*
547 	 * When V prime mismatches, DP Spec mandates re-read of
548 	 * V prime atleast twice.
549 	 */
550 	for (i = 0; i < tries; i++) {
551 		ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
552 						  ksv_fifo, num_downstream,
553 						  bstatus);
554 		if (!ret)
555 			break;
556 	}
557 
558 	if (i == tries) {
559 		DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
560 		goto err;
561 	}
562 
563 	DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
564 		      num_downstream);
565 	ret = 0;
566 err:
567 	kfree(ksv_fifo);
568 	return ret;
569 }
570 
571 /* Implements Part 1 of the HDCP authorization procedure */
572 static int intel_hdcp_auth(struct intel_connector *connector)
573 {
574 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
575 	struct intel_hdcp *hdcp = &connector->hdcp;
576 	struct drm_device *dev = connector->base.dev;
577 	const struct intel_hdcp_shim *shim = hdcp->shim;
578 	struct drm_i915_private *dev_priv;
579 	enum port port;
580 	unsigned long r0_prime_gen_start;
581 	int ret, i, tries = 2;
582 	union {
583 		u32 reg[2];
584 		u8 shim[DRM_HDCP_AN_LEN];
585 	} an;
586 	union {
587 		u32 reg[2];
588 		u8 shim[DRM_HDCP_KSV_LEN];
589 	} bksv;
590 	union {
591 		u32 reg;
592 		u8 shim[DRM_HDCP_RI_LEN];
593 	} ri;
594 	bool repeater_present, hdcp_capable;
595 
596 	dev_priv = intel_dig_port->base.base.dev->dev_private;
597 
598 	port = intel_dig_port->base.port;
599 
600 	/*
601 	 * Detects whether the display is HDCP capable. Although we check for
602 	 * valid Bksv below, the HDCP over DP spec requires that we check
603 	 * whether the display supports HDCP before we write An. For HDMI
604 	 * displays, this is not necessary.
605 	 */
606 	if (shim->hdcp_capable) {
607 		ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
608 		if (ret)
609 			return ret;
610 		if (!hdcp_capable) {
611 			DRM_DEBUG_KMS("Panel is not HDCP capable\n");
612 			return -EINVAL;
613 		}
614 	}
615 
616 	/* Initialize An with 2 random values and acquire it */
617 	for (i = 0; i < 2; i++)
618 		I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
619 	I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
620 
621 	/* Wait for An to be acquired */
622 	if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
623 				  HDCP_STATUS_AN_READY, 1)) {
624 		DRM_ERROR("Timed out waiting for An\n");
625 		return -ETIMEDOUT;
626 	}
627 
628 	an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
629 	an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
630 	ret = shim->write_an_aksv(intel_dig_port, an.shim);
631 	if (ret)
632 		return ret;
633 
634 	r0_prime_gen_start = jiffies;
635 
636 	memset(&bksv, 0, sizeof(bksv));
637 
638 	ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
639 	if (ret < 0)
640 		return ret;
641 
642 	if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) {
643 		DRM_ERROR("BKSV is revoked\n");
644 		return -EPERM;
645 	}
646 
647 	I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
648 	I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
649 
650 	ret = shim->repeater_present(intel_dig_port, &repeater_present);
651 	if (ret)
652 		return ret;
653 	if (repeater_present)
654 		I915_WRITE(HDCP_REP_CTL,
655 			   intel_hdcp_get_repeater_ctl(intel_dig_port));
656 
657 	ret = shim->toggle_signalling(intel_dig_port, true);
658 	if (ret)
659 		return ret;
660 
661 	I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
662 
663 	/* Wait for R0 ready */
664 	if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
665 		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
666 		DRM_ERROR("Timed out waiting for R0 ready\n");
667 		return -ETIMEDOUT;
668 	}
669 
670 	/*
671 	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
672 	 * some monitors can take longer than this. We'll set the timeout at
673 	 * 300ms just to be sure.
674 	 *
675 	 * On DP, there's an R0_READY bit available but no such bit
676 	 * exists on HDMI. Since the upper-bound is the same, we'll just do
677 	 * the stupid thing instead of polling on one and not the other.
678 	 */
679 	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
680 
681 	tries = 3;
682 
683 	/*
684 	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
685 	 * of R0 mismatch.
686 	 */
687 	for (i = 0; i < tries; i++) {
688 		ri.reg = 0;
689 		ret = shim->read_ri_prime(intel_dig_port, ri.shim);
690 		if (ret)
691 			return ret;
692 		I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
693 
694 		/* Wait for Ri prime match */
695 		if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
696 		    (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
697 			break;
698 	}
699 
700 	if (i == tries) {
701 		DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
702 			      I915_READ(PORT_HDCP_STATUS(port)));
703 		return -ETIMEDOUT;
704 	}
705 
706 	/* Wait for encryption confirmation */
707 	if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
708 				  HDCP_STATUS_ENC,
709 				  ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
710 		DRM_ERROR("Timed out waiting for encryption\n");
711 		return -ETIMEDOUT;
712 	}
713 
714 	/*
715 	 * XXX: If we have MST-connected devices, we need to enable encryption
716 	 * on those as well.
717 	 */
718 
719 	if (repeater_present)
720 		return intel_hdcp_auth_downstream(connector);
721 
722 	DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
723 	return 0;
724 }
725 
726 static int _intel_hdcp_disable(struct intel_connector *connector)
727 {
728 	struct intel_hdcp *hdcp = &connector->hdcp;
729 	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
730 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
731 	enum port port = intel_dig_port->base.port;
732 	int ret;
733 
734 	DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
735 		      connector->base.name, connector->base.base.id);
736 
737 	hdcp->hdcp_encrypted = false;
738 	I915_WRITE(PORT_HDCP_CONF(port), 0);
739 	if (intel_de_wait_for_clear(dev_priv, PORT_HDCP_STATUS(port), ~0,
740 				    ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
741 		DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
742 		return -ETIMEDOUT;
743 	}
744 
745 	ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
746 	if (ret) {
747 		DRM_ERROR("Failed to disable HDCP signalling\n");
748 		return ret;
749 	}
750 
751 	DRM_DEBUG_KMS("HDCP is disabled\n");
752 	return 0;
753 }
754 
755 static int _intel_hdcp_enable(struct intel_connector *connector)
756 {
757 	struct intel_hdcp *hdcp = &connector->hdcp;
758 	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
759 	int i, ret, tries = 3;
760 
761 	DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
762 		      connector->base.name, connector->base.base.id);
763 
764 	if (!hdcp_key_loadable(dev_priv)) {
765 		DRM_ERROR("HDCP key Load is not possible\n");
766 		return -ENXIO;
767 	}
768 
769 	for (i = 0; i < KEY_LOAD_TRIES; i++) {
770 		ret = intel_hdcp_load_keys(dev_priv);
771 		if (!ret)
772 			break;
773 		intel_hdcp_clear_keys(dev_priv);
774 	}
775 	if (ret) {
776 		DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
777 		return ret;
778 	}
779 
780 	/* Incase of authentication failures, HDCP spec expects reauth. */
781 	for (i = 0; i < tries; i++) {
782 		ret = intel_hdcp_auth(connector);
783 		if (!ret) {
784 			hdcp->hdcp_encrypted = true;
785 			return 0;
786 		}
787 
788 		DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
789 
790 		/* Ensuring HDCP encryption and signalling are stopped. */
791 		_intel_hdcp_disable(connector);
792 	}
793 
794 	DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
795 	return ret;
796 }
797 
798 static inline
799 struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
800 {
801 	return container_of(hdcp, struct intel_connector, hdcp);
802 }
803 
804 /* Implements Part 3 of the HDCP authorization procedure */
805 static int intel_hdcp_check_link(struct intel_connector *connector)
806 {
807 	struct intel_hdcp *hdcp = &connector->hdcp;
808 	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
809 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
810 	enum port port = intel_dig_port->base.port;
811 	int ret = 0;
812 
813 	mutex_lock(&hdcp->mutex);
814 
815 	/* Check_link valid only when HDCP1.4 is enabled */
816 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
817 	    !hdcp->hdcp_encrypted) {
818 		ret = -EINVAL;
819 		goto out;
820 	}
821 
822 	if (WARN_ON(!intel_hdcp_in_use(connector))) {
823 		DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
824 			  connector->base.name, connector->base.base.id,
825 			  I915_READ(PORT_HDCP_STATUS(port)));
826 		ret = -ENXIO;
827 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
828 		schedule_work(&hdcp->prop_work);
829 		goto out;
830 	}
831 
832 	if (hdcp->shim->check_link(intel_dig_port)) {
833 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
834 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
835 			schedule_work(&hdcp->prop_work);
836 		}
837 		goto out;
838 	}
839 
840 	DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
841 		      connector->base.name, connector->base.base.id);
842 
843 	ret = _intel_hdcp_disable(connector);
844 	if (ret) {
845 		DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
846 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
847 		schedule_work(&hdcp->prop_work);
848 		goto out;
849 	}
850 
851 	ret = _intel_hdcp_enable(connector);
852 	if (ret) {
853 		DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
854 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
855 		schedule_work(&hdcp->prop_work);
856 		goto out;
857 	}
858 
859 out:
860 	mutex_unlock(&hdcp->mutex);
861 	return ret;
862 }
863 
864 static void intel_hdcp_prop_work(struct work_struct *work)
865 {
866 	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
867 					       prop_work);
868 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
869 	struct drm_device *dev = connector->base.dev;
870 
871 	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
872 	mutex_lock(&hdcp->mutex);
873 
874 	/*
875 	 * This worker is only used to flip between ENABLED/DESIRED. Either of
876 	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
877 	 * we're running just after hdcp has been disabled, so just exit
878 	 */
879 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
880 		drm_hdcp_update_content_protection(&connector->base,
881 						   hdcp->value);
882 
883 	mutex_unlock(&hdcp->mutex);
884 	drm_modeset_unlock(&dev->mode_config.connection_mutex);
885 }
886 
887 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
888 {
889 	/* PORT E doesn't have HDCP, and PORT F is disabled */
890 	return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
891 }
892 
893 static int
894 hdcp2_prepare_ake_init(struct intel_connector *connector,
895 		       struct hdcp2_ake_init *ake_data)
896 {
897 	struct hdcp_port_data *data = &connector->hdcp.port_data;
898 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
899 	struct i915_hdcp_comp_master *comp;
900 	int ret;
901 
902 	mutex_lock(&dev_priv->hdcp_comp_mutex);
903 	comp = dev_priv->hdcp_master;
904 
905 	if (!comp || !comp->ops) {
906 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
907 		return -EINVAL;
908 	}
909 
910 	ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
911 	if (ret)
912 		DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
913 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
914 
915 	return ret;
916 }
917 
918 static int
919 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
920 				struct hdcp2_ake_send_cert *rx_cert,
921 				bool *paired,
922 				struct hdcp2_ake_no_stored_km *ek_pub_km,
923 				size_t *msg_sz)
924 {
925 	struct hdcp_port_data *data = &connector->hdcp.port_data;
926 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
927 	struct i915_hdcp_comp_master *comp;
928 	int ret;
929 
930 	mutex_lock(&dev_priv->hdcp_comp_mutex);
931 	comp = dev_priv->hdcp_master;
932 
933 	if (!comp || !comp->ops) {
934 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
935 		return -EINVAL;
936 	}
937 
938 	ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
939 							 rx_cert, paired,
940 							 ek_pub_km, msg_sz);
941 	if (ret < 0)
942 		DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
943 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
944 
945 	return ret;
946 }
947 
948 static int hdcp2_verify_hprime(struct intel_connector *connector,
949 			       struct hdcp2_ake_send_hprime *rx_hprime)
950 {
951 	struct hdcp_port_data *data = &connector->hdcp.port_data;
952 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
953 	struct i915_hdcp_comp_master *comp;
954 	int ret;
955 
956 	mutex_lock(&dev_priv->hdcp_comp_mutex);
957 	comp = dev_priv->hdcp_master;
958 
959 	if (!comp || !comp->ops) {
960 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
961 		return -EINVAL;
962 	}
963 
964 	ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
965 	if (ret < 0)
966 		DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
967 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
968 
969 	return ret;
970 }
971 
972 static int
973 hdcp2_store_pairing_info(struct intel_connector *connector,
974 			 struct hdcp2_ake_send_pairing_info *pairing_info)
975 {
976 	struct hdcp_port_data *data = &connector->hdcp.port_data;
977 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
978 	struct i915_hdcp_comp_master *comp;
979 	int ret;
980 
981 	mutex_lock(&dev_priv->hdcp_comp_mutex);
982 	comp = dev_priv->hdcp_master;
983 
984 	if (!comp || !comp->ops) {
985 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
986 		return -EINVAL;
987 	}
988 
989 	ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
990 	if (ret < 0)
991 		DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
992 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
993 
994 	return ret;
995 }
996 
997 static int
998 hdcp2_prepare_lc_init(struct intel_connector *connector,
999 		      struct hdcp2_lc_init *lc_init)
1000 {
1001 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1002 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1003 	struct i915_hdcp_comp_master *comp;
1004 	int ret;
1005 
1006 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1007 	comp = dev_priv->hdcp_master;
1008 
1009 	if (!comp || !comp->ops) {
1010 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1011 		return -EINVAL;
1012 	}
1013 
1014 	ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1015 	if (ret < 0)
1016 		DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
1017 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1018 
1019 	return ret;
1020 }
1021 
1022 static int
1023 hdcp2_verify_lprime(struct intel_connector *connector,
1024 		    struct hdcp2_lc_send_lprime *rx_lprime)
1025 {
1026 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1027 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1028 	struct i915_hdcp_comp_master *comp;
1029 	int ret;
1030 
1031 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1032 	comp = dev_priv->hdcp_master;
1033 
1034 	if (!comp || !comp->ops) {
1035 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1036 		return -EINVAL;
1037 	}
1038 
1039 	ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1040 	if (ret < 0)
1041 		DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
1042 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1043 
1044 	return ret;
1045 }
1046 
1047 static int hdcp2_prepare_skey(struct intel_connector *connector,
1048 			      struct hdcp2_ske_send_eks *ske_data)
1049 {
1050 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1051 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1052 	struct i915_hdcp_comp_master *comp;
1053 	int ret;
1054 
1055 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1056 	comp = dev_priv->hdcp_master;
1057 
1058 	if (!comp || !comp->ops) {
1059 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1060 		return -EINVAL;
1061 	}
1062 
1063 	ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1064 	if (ret < 0)
1065 		DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
1066 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1067 
1068 	return ret;
1069 }
1070 
1071 static int
1072 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1073 				      struct hdcp2_rep_send_receiverid_list
1074 								*rep_topology,
1075 				      struct hdcp2_rep_send_ack *rep_send_ack)
1076 {
1077 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1078 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1079 	struct i915_hdcp_comp_master *comp;
1080 	int ret;
1081 
1082 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1083 	comp = dev_priv->hdcp_master;
1084 
1085 	if (!comp || !comp->ops) {
1086 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1087 		return -EINVAL;
1088 	}
1089 
1090 	ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1091 							 rep_topology,
1092 							 rep_send_ack);
1093 	if (ret < 0)
1094 		DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
1095 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1096 
1097 	return ret;
1098 }
1099 
1100 static int
1101 hdcp2_verify_mprime(struct intel_connector *connector,
1102 		    struct hdcp2_rep_stream_ready *stream_ready)
1103 {
1104 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1105 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1106 	struct i915_hdcp_comp_master *comp;
1107 	int ret;
1108 
1109 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1110 	comp = dev_priv->hdcp_master;
1111 
1112 	if (!comp || !comp->ops) {
1113 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1114 		return -EINVAL;
1115 	}
1116 
1117 	ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1118 	if (ret < 0)
1119 		DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
1120 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1121 
1122 	return ret;
1123 }
1124 
1125 static int hdcp2_authenticate_port(struct intel_connector *connector)
1126 {
1127 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1128 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1129 	struct i915_hdcp_comp_master *comp;
1130 	int ret;
1131 
1132 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1133 	comp = dev_priv->hdcp_master;
1134 
1135 	if (!comp || !comp->ops) {
1136 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1137 		return -EINVAL;
1138 	}
1139 
1140 	ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1141 	if (ret < 0)
1142 		DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
1143 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1144 
1145 	return ret;
1146 }
1147 
1148 static int hdcp2_close_mei_session(struct intel_connector *connector)
1149 {
1150 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1151 	struct i915_hdcp_comp_master *comp;
1152 	int ret;
1153 
1154 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1155 	comp = dev_priv->hdcp_master;
1156 
1157 	if (!comp || !comp->ops) {
1158 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1159 		return -EINVAL;
1160 	}
1161 
1162 	ret = comp->ops->close_hdcp_session(comp->mei_dev,
1163 					     &connector->hdcp.port_data);
1164 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1165 
1166 	return ret;
1167 }
1168 
1169 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1170 {
1171 	return hdcp2_close_mei_session(connector);
1172 }
1173 
1174 /* Authentication flow starts from here */
1175 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1176 {
1177 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1178 	struct intel_hdcp *hdcp = &connector->hdcp;
1179 	struct drm_device *dev = connector->base.dev;
1180 	union {
1181 		struct hdcp2_ake_init ake_init;
1182 		struct hdcp2_ake_send_cert send_cert;
1183 		struct hdcp2_ake_no_stored_km no_stored_km;
1184 		struct hdcp2_ake_send_hprime send_hprime;
1185 		struct hdcp2_ake_send_pairing_info pairing_info;
1186 	} msgs;
1187 	const struct intel_hdcp_shim *shim = hdcp->shim;
1188 	size_t size;
1189 	int ret;
1190 
1191 	/* Init for seq_num */
1192 	hdcp->seq_num_v = 0;
1193 	hdcp->seq_num_m = 0;
1194 
1195 	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1196 	if (ret < 0)
1197 		return ret;
1198 
1199 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
1200 				  sizeof(msgs.ake_init));
1201 	if (ret < 0)
1202 		return ret;
1203 
1204 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
1205 				 &msgs.send_cert, sizeof(msgs.send_cert));
1206 	if (ret < 0)
1207 		return ret;
1208 
1209 	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1210 		DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
1211 		return -EINVAL;
1212 	}
1213 
1214 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1215 
1216 	if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id,
1217 					1)) {
1218 		DRM_ERROR("Receiver ID is revoked\n");
1219 		return -EPERM;
1220 	}
1221 
1222 	/*
1223 	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1224 	 * stored also.
1225 	 */
1226 	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1227 					      &hdcp->is_paired,
1228 					      &msgs.no_stored_km, &size);
1229 	if (ret < 0)
1230 		return ret;
1231 
1232 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
1233 	if (ret < 0)
1234 		return ret;
1235 
1236 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1237 				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1238 	if (ret < 0)
1239 		return ret;
1240 
1241 	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1242 	if (ret < 0)
1243 		return ret;
1244 
1245 	if (!hdcp->is_paired) {
1246 		/* Pairing is required */
1247 		ret = shim->read_2_2_msg(intel_dig_port,
1248 					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1249 					 &msgs.pairing_info,
1250 					 sizeof(msgs.pairing_info));
1251 		if (ret < 0)
1252 			return ret;
1253 
1254 		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1255 		if (ret < 0)
1256 			return ret;
1257 		hdcp->is_paired = true;
1258 	}
1259 
1260 	return 0;
1261 }
1262 
1263 static int hdcp2_locality_check(struct intel_connector *connector)
1264 {
1265 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1266 	struct intel_hdcp *hdcp = &connector->hdcp;
1267 	union {
1268 		struct hdcp2_lc_init lc_init;
1269 		struct hdcp2_lc_send_lprime send_lprime;
1270 	} msgs;
1271 	const struct intel_hdcp_shim *shim = hdcp->shim;
1272 	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1273 
1274 	for (i = 0; i < tries; i++) {
1275 		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1276 		if (ret < 0)
1277 			continue;
1278 
1279 		ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
1280 				      sizeof(msgs.lc_init));
1281 		if (ret < 0)
1282 			continue;
1283 
1284 		ret = shim->read_2_2_msg(intel_dig_port,
1285 					 HDCP_2_2_LC_SEND_LPRIME,
1286 					 &msgs.send_lprime,
1287 					 sizeof(msgs.send_lprime));
1288 		if (ret < 0)
1289 			continue;
1290 
1291 		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1292 		if (!ret)
1293 			break;
1294 	}
1295 
1296 	return ret;
1297 }
1298 
1299 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1300 {
1301 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1302 	struct intel_hdcp *hdcp = &connector->hdcp;
1303 	struct hdcp2_ske_send_eks send_eks;
1304 	int ret;
1305 
1306 	ret = hdcp2_prepare_skey(connector, &send_eks);
1307 	if (ret < 0)
1308 		return ret;
1309 
1310 	ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
1311 					sizeof(send_eks));
1312 	if (ret < 0)
1313 		return ret;
1314 
1315 	return 0;
1316 }
1317 
1318 static
1319 int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1320 {
1321 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1322 	struct intel_hdcp *hdcp = &connector->hdcp;
1323 	union {
1324 		struct hdcp2_rep_stream_manage stream_manage;
1325 		struct hdcp2_rep_stream_ready stream_ready;
1326 	} msgs;
1327 	const struct intel_hdcp_shim *shim = hdcp->shim;
1328 	int ret;
1329 
1330 	/* Prepare RepeaterAuth_Stream_Manage msg */
1331 	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1332 	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1333 
1334 	/* K no of streams is fixed as 1. Stored as big-endian. */
1335 	msgs.stream_manage.k = cpu_to_be16(1);
1336 
1337 	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
1338 	msgs.stream_manage.streams[0].stream_id = 0;
1339 	msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
1340 
1341 	/* Send it to Repeater */
1342 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
1343 				  sizeof(msgs.stream_manage));
1344 	if (ret < 0)
1345 		return ret;
1346 
1347 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
1348 				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1349 	if (ret < 0)
1350 		return ret;
1351 
1352 	hdcp->port_data.seq_num_m = hdcp->seq_num_m;
1353 	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1354 
1355 	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1356 	if (ret < 0)
1357 		return ret;
1358 
1359 	hdcp->seq_num_m++;
1360 
1361 	if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1362 		DRM_DEBUG_KMS("seq_num_m roll over.\n");
1363 		return -1;
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 static
1370 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1371 {
1372 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1373 	struct intel_hdcp *hdcp = &connector->hdcp;
1374 	struct drm_device *dev = connector->base.dev;
1375 	union {
1376 		struct hdcp2_rep_send_receiverid_list recvid_list;
1377 		struct hdcp2_rep_send_ack rep_ack;
1378 	} msgs;
1379 	const struct intel_hdcp_shim *shim = hdcp->shim;
1380 	u32 seq_num_v, device_cnt;
1381 	u8 *rx_info;
1382 	int ret;
1383 
1384 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1385 				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1386 	if (ret < 0)
1387 		return ret;
1388 
1389 	rx_info = msgs.recvid_list.rx_info;
1390 
1391 	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1392 	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1393 		DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
1394 		return -EINVAL;
1395 	}
1396 
1397 	/* Converting and Storing the seq_num_v to local variable as DWORD */
1398 	seq_num_v =
1399 		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1400 
1401 	if (seq_num_v < hdcp->seq_num_v) {
1402 		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1403 		DRM_DEBUG_KMS("Seq_num_v roll over.\n");
1404 		return -EINVAL;
1405 	}
1406 
1407 	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1408 		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1409 	if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids,
1410 					device_cnt)) {
1411 		DRM_ERROR("Revoked receiver ID(s) is in list\n");
1412 		return -EPERM;
1413 	}
1414 
1415 	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1416 						    &msgs.recvid_list,
1417 						    &msgs.rep_ack);
1418 	if (ret < 0)
1419 		return ret;
1420 
1421 	hdcp->seq_num_v = seq_num_v;
1422 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
1423 				  sizeof(msgs.rep_ack));
1424 	if (ret < 0)
1425 		return ret;
1426 
1427 	return 0;
1428 }
1429 
1430 static int hdcp2_authenticate_repeater(struct intel_connector *connector)
1431 {
1432 	int ret;
1433 
1434 	ret = hdcp2_authenticate_repeater_topology(connector);
1435 	if (ret < 0)
1436 		return ret;
1437 
1438 	return hdcp2_propagate_stream_management_info(connector);
1439 }
1440 
1441 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1442 {
1443 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1444 	struct intel_hdcp *hdcp = &connector->hdcp;
1445 	const struct intel_hdcp_shim *shim = hdcp->shim;
1446 	int ret;
1447 
1448 	ret = hdcp2_authentication_key_exchange(connector);
1449 	if (ret < 0) {
1450 		DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
1451 		return ret;
1452 	}
1453 
1454 	ret = hdcp2_locality_check(connector);
1455 	if (ret < 0) {
1456 		DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
1457 		return ret;
1458 	}
1459 
1460 	ret = hdcp2_session_key_exchange(connector);
1461 	if (ret < 0) {
1462 		DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
1463 		return ret;
1464 	}
1465 
1466 	if (shim->config_stream_type) {
1467 		ret = shim->config_stream_type(intel_dig_port,
1468 					       hdcp->is_repeater,
1469 					       hdcp->content_type);
1470 		if (ret < 0)
1471 			return ret;
1472 	}
1473 
1474 	if (hdcp->is_repeater) {
1475 		ret = hdcp2_authenticate_repeater(connector);
1476 		if (ret < 0) {
1477 			DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
1478 			return ret;
1479 		}
1480 	}
1481 
1482 	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1483 	ret = hdcp2_authenticate_port(connector);
1484 	if (ret < 0)
1485 		return ret;
1486 
1487 	return ret;
1488 }
1489 
1490 static int hdcp2_enable_encryption(struct intel_connector *connector)
1491 {
1492 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1493 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1494 	struct intel_hdcp *hdcp = &connector->hdcp;
1495 	enum port port = connector->encoder->port;
1496 	int ret;
1497 
1498 	WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
1499 
1500 	if (hdcp->shim->toggle_signalling) {
1501 		ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
1502 		if (ret) {
1503 			DRM_ERROR("Failed to enable HDCP signalling. %d\n",
1504 				  ret);
1505 			return ret;
1506 		}
1507 	}
1508 
1509 	if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
1510 		/* Link is Authenticated. Now set for Encryption */
1511 		I915_WRITE(HDCP2_CTL_DDI(port),
1512 			   I915_READ(HDCP2_CTL_DDI(port)) |
1513 			   CTL_LINK_ENCRYPTION_REQ);
1514 	}
1515 
1516 	ret = intel_de_wait_for_set(dev_priv, HDCP2_STATUS_DDI(port),
1517 				    LINK_ENCRYPTION_STATUS,
1518 				    ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1519 
1520 	return ret;
1521 }
1522 
1523 static int hdcp2_disable_encryption(struct intel_connector *connector)
1524 {
1525 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1526 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1527 	struct intel_hdcp *hdcp = &connector->hdcp;
1528 	enum port port = connector->encoder->port;
1529 	int ret;
1530 
1531 	WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
1532 
1533 	I915_WRITE(HDCP2_CTL_DDI(port),
1534 		   I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
1535 
1536 	ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS_DDI(port),
1537 				      LINK_ENCRYPTION_STATUS,
1538 				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1539 	if (ret == -ETIMEDOUT)
1540 		DRM_DEBUG_KMS("Disable Encryption Timedout");
1541 
1542 	if (hdcp->shim->toggle_signalling) {
1543 		ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
1544 		if (ret) {
1545 			DRM_ERROR("Failed to disable HDCP signalling. %d\n",
1546 				  ret);
1547 			return ret;
1548 		}
1549 	}
1550 
1551 	return ret;
1552 }
1553 
1554 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1555 {
1556 	int ret, i, tries = 3;
1557 
1558 	for (i = 0; i < tries; i++) {
1559 		ret = hdcp2_authenticate_sink(connector);
1560 		if (!ret)
1561 			break;
1562 
1563 		/* Clearing the mei hdcp session */
1564 		DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
1565 			      i + 1, tries, ret);
1566 		if (hdcp2_deauthenticate_port(connector) < 0)
1567 			DRM_DEBUG_KMS("Port deauth failed.\n");
1568 	}
1569 
1570 	if (i != tries) {
1571 		/*
1572 		 * Ensuring the required 200mSec min time interval between
1573 		 * Session Key Exchange and encryption.
1574 		 */
1575 		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1576 		ret = hdcp2_enable_encryption(connector);
1577 		if (ret < 0) {
1578 			DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
1579 			if (hdcp2_deauthenticate_port(connector) < 0)
1580 				DRM_DEBUG_KMS("Port deauth failed.\n");
1581 		}
1582 	}
1583 
1584 	return ret;
1585 }
1586 
1587 static int _intel_hdcp2_enable(struct intel_connector *connector)
1588 {
1589 	struct intel_hdcp *hdcp = &connector->hdcp;
1590 	int ret;
1591 
1592 	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1593 		      connector->base.name, connector->base.base.id,
1594 		      hdcp->content_type);
1595 
1596 	ret = hdcp2_authenticate_and_encrypt(connector);
1597 	if (ret) {
1598 		DRM_DEBUG_KMS("HDCP2 Type%d  Enabling Failed. (%d)\n",
1599 			      hdcp->content_type, ret);
1600 		return ret;
1601 	}
1602 
1603 	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
1604 		      connector->base.name, connector->base.base.id,
1605 		      hdcp->content_type);
1606 
1607 	hdcp->hdcp2_encrypted = true;
1608 	return 0;
1609 }
1610 
1611 static int _intel_hdcp2_disable(struct intel_connector *connector)
1612 {
1613 	int ret;
1614 
1615 	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
1616 		      connector->base.name, connector->base.base.id);
1617 
1618 	ret = hdcp2_disable_encryption(connector);
1619 
1620 	if (hdcp2_deauthenticate_port(connector) < 0)
1621 		DRM_DEBUG_KMS("Port deauth failed.\n");
1622 
1623 	connector->hdcp.hdcp2_encrypted = false;
1624 
1625 	return ret;
1626 }
1627 
1628 /* Implements the Link Integrity Check for HDCP2.2 */
1629 static int intel_hdcp2_check_link(struct intel_connector *connector)
1630 {
1631 	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1632 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1633 	struct intel_hdcp *hdcp = &connector->hdcp;
1634 	enum port port = connector->encoder->port;
1635 	int ret = 0;
1636 
1637 	mutex_lock(&hdcp->mutex);
1638 
1639 	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1640 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1641 	    !hdcp->hdcp2_encrypted) {
1642 		ret = -EINVAL;
1643 		goto out;
1644 	}
1645 
1646 	if (WARN_ON(!intel_hdcp2_in_use(connector))) {
1647 		DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
1648 			  I915_READ(HDCP2_STATUS_DDI(port)));
1649 		ret = -ENXIO;
1650 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1651 		schedule_work(&hdcp->prop_work);
1652 		goto out;
1653 	}
1654 
1655 	ret = hdcp->shim->check_2_2_link(intel_dig_port);
1656 	if (ret == HDCP_LINK_PROTECTED) {
1657 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1658 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1659 			schedule_work(&hdcp->prop_work);
1660 		}
1661 		goto out;
1662 	}
1663 
1664 	if (ret == HDCP_TOPOLOGY_CHANGE) {
1665 		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1666 			goto out;
1667 
1668 		DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
1669 		ret = hdcp2_authenticate_repeater_topology(connector);
1670 		if (!ret) {
1671 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1672 			schedule_work(&hdcp->prop_work);
1673 			goto out;
1674 		}
1675 		DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
1676 			      connector->base.name, connector->base.base.id,
1677 			      ret);
1678 	} else {
1679 		DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
1680 			      connector->base.name, connector->base.base.id);
1681 	}
1682 
1683 	ret = _intel_hdcp2_disable(connector);
1684 	if (ret) {
1685 		DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
1686 			  connector->base.name, connector->base.base.id, ret);
1687 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1688 		schedule_work(&hdcp->prop_work);
1689 		goto out;
1690 	}
1691 
1692 	ret = _intel_hdcp2_enable(connector);
1693 	if (ret) {
1694 		DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
1695 			      connector->base.name, connector->base.base.id,
1696 			      ret);
1697 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1698 		schedule_work(&hdcp->prop_work);
1699 		goto out;
1700 	}
1701 
1702 out:
1703 	mutex_unlock(&hdcp->mutex);
1704 	return ret;
1705 }
1706 
1707 static void intel_hdcp_check_work(struct work_struct *work)
1708 {
1709 	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
1710 					       struct intel_hdcp,
1711 					       check_work);
1712 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1713 
1714 	if (!intel_hdcp2_check_link(connector))
1715 		schedule_delayed_work(&hdcp->check_work,
1716 				      DRM_HDCP2_CHECK_PERIOD_MS);
1717 	else if (!intel_hdcp_check_link(connector))
1718 		schedule_delayed_work(&hdcp->check_work,
1719 				      DRM_HDCP_CHECK_PERIOD_MS);
1720 }
1721 
1722 static int i915_hdcp_component_bind(struct device *i915_kdev,
1723 				    struct device *mei_kdev, void *data)
1724 {
1725 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1726 
1727 	DRM_DEBUG("I915 HDCP comp bind\n");
1728 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1729 	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
1730 	dev_priv->hdcp_master->mei_dev = mei_kdev;
1731 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1732 
1733 	return 0;
1734 }
1735 
1736 static void i915_hdcp_component_unbind(struct device *i915_kdev,
1737 				       struct device *mei_kdev, void *data)
1738 {
1739 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1740 
1741 	DRM_DEBUG("I915 HDCP comp unbind\n");
1742 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1743 	dev_priv->hdcp_master = NULL;
1744 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1745 }
1746 
1747 static const struct component_ops i915_hdcp_component_ops = {
1748 	.bind   = i915_hdcp_component_bind,
1749 	.unbind = i915_hdcp_component_unbind,
1750 };
1751 
1752 static inline int initialize_hdcp_port_data(struct intel_connector *connector,
1753 					    const struct intel_hdcp_shim *shim)
1754 {
1755 	struct intel_hdcp *hdcp = &connector->hdcp;
1756 	struct hdcp_port_data *data = &hdcp->port_data;
1757 
1758 	data->port = connector->encoder->port;
1759 	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
1760 	data->protocol = (u8)shim->protocol;
1761 
1762 	data->k = 1;
1763 	if (!data->streams)
1764 		data->streams = kcalloc(data->k,
1765 					sizeof(struct hdcp2_streamid_type),
1766 					GFP_KERNEL);
1767 	if (!data->streams) {
1768 		DRM_ERROR("Out of Memory\n");
1769 		return -ENOMEM;
1770 	}
1771 
1772 	data->streams[0].stream_id = 0;
1773 	data->streams[0].stream_type = hdcp->content_type;
1774 
1775 	return 0;
1776 }
1777 
1778 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
1779 {
1780 	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
1781 		return false;
1782 
1783 	return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
1784 		IS_KABYLAKE(dev_priv));
1785 }
1786 
1787 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
1788 {
1789 	int ret;
1790 
1791 	if (!is_hdcp2_supported(dev_priv))
1792 		return;
1793 
1794 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1795 	WARN_ON(dev_priv->hdcp_comp_added);
1796 
1797 	dev_priv->hdcp_comp_added = true;
1798 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1799 	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
1800 				  I915_COMPONENT_HDCP);
1801 	if (ret < 0) {
1802 		DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
1803 		mutex_lock(&dev_priv->hdcp_comp_mutex);
1804 		dev_priv->hdcp_comp_added = false;
1805 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1806 		return;
1807 	}
1808 }
1809 
1810 static void intel_hdcp2_init(struct intel_connector *connector,
1811 			     const struct intel_hdcp_shim *shim)
1812 {
1813 	struct intel_hdcp *hdcp = &connector->hdcp;
1814 	int ret;
1815 
1816 	ret = initialize_hdcp_port_data(connector, shim);
1817 	if (ret) {
1818 		DRM_DEBUG_KMS("Mei hdcp data init failed\n");
1819 		return;
1820 	}
1821 
1822 	hdcp->hdcp2_supported = true;
1823 }
1824 
1825 int intel_hdcp_init(struct intel_connector *connector,
1826 		    const struct intel_hdcp_shim *shim)
1827 {
1828 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1829 	struct intel_hdcp *hdcp = &connector->hdcp;
1830 	int ret;
1831 
1832 	if (!shim)
1833 		return -EINVAL;
1834 
1835 	if (is_hdcp2_supported(dev_priv))
1836 		intel_hdcp2_init(connector, shim);
1837 
1838 	ret =
1839 	drm_connector_attach_content_protection_property(&connector->base,
1840 							 hdcp->hdcp2_supported);
1841 	if (ret) {
1842 		hdcp->hdcp2_supported = false;
1843 		kfree(hdcp->port_data.streams);
1844 		return ret;
1845 	}
1846 
1847 	hdcp->shim = shim;
1848 	mutex_init(&hdcp->mutex);
1849 	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
1850 	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
1851 	init_waitqueue_head(&hdcp->cp_irq_queue);
1852 
1853 	return 0;
1854 }
1855 
1856 int intel_hdcp_enable(struct intel_connector *connector, u8 content_type)
1857 {
1858 	struct intel_hdcp *hdcp = &connector->hdcp;
1859 	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
1860 	int ret = -EINVAL;
1861 
1862 	if (!hdcp->shim)
1863 		return -ENOENT;
1864 
1865 	mutex_lock(&hdcp->mutex);
1866 	WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
1867 	hdcp->content_type = content_type;
1868 
1869 	/*
1870 	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
1871 	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
1872 	 */
1873 	if (intel_hdcp2_capable(connector)) {
1874 		ret = _intel_hdcp2_enable(connector);
1875 		if (!ret)
1876 			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
1877 	}
1878 
1879 	/*
1880 	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
1881 	 * be attempted.
1882 	 */
1883 	if (ret && intel_hdcp_capable(connector) &&
1884 	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
1885 		ret = _intel_hdcp_enable(connector);
1886 	}
1887 
1888 	if (!ret) {
1889 		schedule_delayed_work(&hdcp->check_work, check_link_interval);
1890 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1891 		schedule_work(&hdcp->prop_work);
1892 	}
1893 
1894 	mutex_unlock(&hdcp->mutex);
1895 	return ret;
1896 }
1897 
1898 int intel_hdcp_disable(struct intel_connector *connector)
1899 {
1900 	struct intel_hdcp *hdcp = &connector->hdcp;
1901 	int ret = 0;
1902 
1903 	if (!hdcp->shim)
1904 		return -ENOENT;
1905 
1906 	mutex_lock(&hdcp->mutex);
1907 
1908 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1909 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
1910 		if (hdcp->hdcp2_encrypted)
1911 			ret = _intel_hdcp2_disable(connector);
1912 		else if (hdcp->hdcp_encrypted)
1913 			ret = _intel_hdcp_disable(connector);
1914 	}
1915 
1916 	mutex_unlock(&hdcp->mutex);
1917 	cancel_delayed_work_sync(&hdcp->check_work);
1918 	return ret;
1919 }
1920 
1921 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
1922 {
1923 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1924 	if (!dev_priv->hdcp_comp_added) {
1925 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1926 		return;
1927 	}
1928 
1929 	dev_priv->hdcp_comp_added = false;
1930 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1931 
1932 	component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
1933 }
1934 
1935 void intel_hdcp_cleanup(struct intel_connector *connector)
1936 {
1937 	if (!connector->hdcp.shim)
1938 		return;
1939 
1940 	mutex_lock(&connector->hdcp.mutex);
1941 	kfree(connector->hdcp.port_data.streams);
1942 	mutex_unlock(&connector->hdcp.mutex);
1943 }
1944 
1945 void intel_hdcp_atomic_check(struct drm_connector *connector,
1946 			     struct drm_connector_state *old_state,
1947 			     struct drm_connector_state *new_state)
1948 {
1949 	u64 old_cp = old_state->content_protection;
1950 	u64 new_cp = new_state->content_protection;
1951 	struct drm_crtc_state *crtc_state;
1952 
1953 	if (!new_state->crtc) {
1954 		/*
1955 		 * If the connector is being disabled with CP enabled, mark it
1956 		 * desired so it's re-enabled when the connector is brought back
1957 		 */
1958 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1959 			new_state->content_protection =
1960 				DRM_MODE_CONTENT_PROTECTION_DESIRED;
1961 		return;
1962 	}
1963 
1964 	/*
1965 	 * Nothing to do if the state didn't change, or HDCP was activated since
1966 	 * the last commit. And also no change in hdcp content type.
1967 	 */
1968 	if (old_cp == new_cp ||
1969 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
1970 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
1971 		if (old_state->hdcp_content_type ==
1972 				new_state->hdcp_content_type)
1973 			return;
1974 	}
1975 
1976 	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
1977 						   new_state->crtc);
1978 	crtc_state->mode_changed = true;
1979 }
1980 
1981 /* Handles the CP_IRQ raised from the DP HDCP sink */
1982 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
1983 {
1984 	struct intel_hdcp *hdcp = &connector->hdcp;
1985 
1986 	if (!hdcp->shim)
1987 		return;
1988 
1989 	atomic_inc(&connector->hdcp.cp_irq_count);
1990 	wake_up_all(&connector->hdcp.cp_irq_queue);
1991 
1992 	schedule_delayed_work(&hdcp->check_work, 0);
1993 }
1994