xref: /openbmc/linux/drivers/gpu/drm/i915/display/intel_hdcp.c (revision 15a1fbdcfb519c2bd291ed01c6c94e0b89537a77)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10 
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14 
15 #include <drm/drm_hdcp.h>
16 #include <drm/i915_component.h>
17 
18 #include "i915_reg.h"
19 #include "intel_display_power.h"
20 #include "intel_display_types.h"
21 #include "intel_hdcp.h"
22 #include "intel_sideband.h"
23 #include "intel_connector.h"
24 
25 #define KEY_LOAD_TRIES	5
26 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS	50
27 #define HDCP2_LC_RETRY_CNT			3
28 
29 static
30 bool intel_hdcp_is_ksv_valid(u8 *ksv)
31 {
32 	int i, ones = 0;
33 	/* KSV has 20 1's and 20 0's */
34 	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
35 		ones += hweight8(ksv[i]);
36 	if (ones != 20)
37 		return false;
38 
39 	return true;
40 }
41 
42 static
43 int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
44 			       const struct intel_hdcp_shim *shim, u8 *bksv)
45 {
46 	int ret, i, tries = 2;
47 
48 	/* HDCP spec states that we must retry the bksv if it is invalid */
49 	for (i = 0; i < tries; i++) {
50 		ret = shim->read_bksv(intel_dig_port, bksv);
51 		if (ret)
52 			return ret;
53 		if (intel_hdcp_is_ksv_valid(bksv))
54 			break;
55 	}
56 	if (i == tries) {
57 		DRM_DEBUG_KMS("Bksv is invalid\n");
58 		return -ENODEV;
59 	}
60 
61 	return 0;
62 }
63 
64 /* Is HDCP1.4 capable on Platform and Sink */
65 bool intel_hdcp_capable(struct intel_connector *connector)
66 {
67 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
68 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
69 	bool capable = false;
70 	u8 bksv[5];
71 
72 	if (!shim)
73 		return capable;
74 
75 	if (shim->hdcp_capable) {
76 		shim->hdcp_capable(intel_dig_port, &capable);
77 	} else {
78 		if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
79 			capable = true;
80 	}
81 
82 	return capable;
83 }
84 
85 /* Is HDCP2.2 capable on Platform and Sink */
86 bool intel_hdcp2_capable(struct intel_connector *connector)
87 {
88 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
89 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
90 	struct intel_hdcp *hdcp = &connector->hdcp;
91 	bool capable = false;
92 
93 	/* I915 support for HDCP2.2 */
94 	if (!hdcp->hdcp2_supported)
95 		return false;
96 
97 	/* MEI interface is solid */
98 	mutex_lock(&dev_priv->hdcp_comp_mutex);
99 	if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
100 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
101 		return false;
102 	}
103 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
104 
105 	/* Sink's capability for HDCP2.2 */
106 	hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
107 
108 	return capable;
109 }
110 
111 static inline
112 bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
113 		       enum transcoder cpu_transcoder, enum port port)
114 {
115 	return intel_de_read(dev_priv,
116 	                     HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
117 	       HDCP_STATUS_ENC;
118 }
119 
120 static inline
121 bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
122 			enum transcoder cpu_transcoder, enum port port)
123 {
124 	return intel_de_read(dev_priv,
125 	                     HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
126 	       LINK_ENCRYPTION_STATUS;
127 }
128 
129 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
130 				    const struct intel_hdcp_shim *shim)
131 {
132 	int ret, read_ret;
133 	bool ksv_ready;
134 
135 	/* Poll for ksv list ready (spec says max time allowed is 5s) */
136 	ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
137 							 &ksv_ready),
138 			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
139 			 100 * 1000);
140 	if (ret)
141 		return ret;
142 	if (read_ret)
143 		return read_ret;
144 	if (!ksv_ready)
145 		return -ETIMEDOUT;
146 
147 	return 0;
148 }
149 
150 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
151 {
152 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
153 	struct i915_power_well *power_well;
154 	enum i915_power_well_id id;
155 	bool enabled = false;
156 
157 	/*
158 	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
159 	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
160 	 */
161 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
162 		id = HSW_DISP_PW_GLOBAL;
163 	else
164 		id = SKL_DISP_PW_1;
165 
166 	mutex_lock(&power_domains->lock);
167 
168 	/* PG1 (power well #1) needs to be enabled */
169 	for_each_power_well(dev_priv, power_well) {
170 		if (power_well->desc->id == id) {
171 			enabled = power_well->desc->ops->is_enabled(dev_priv,
172 								    power_well);
173 			break;
174 		}
175 	}
176 	mutex_unlock(&power_domains->lock);
177 
178 	/*
179 	 * Another req for hdcp key loadability is enabled state of pll for
180 	 * cdclk. Without active crtc we wont land here. So we are assuming that
181 	 * cdclk is already on.
182 	 */
183 
184 	return enabled;
185 }
186 
187 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
188 {
189 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
190 	intel_de_write(dev_priv, HDCP_KEY_STATUS,
191 		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
192 }
193 
194 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
195 {
196 	int ret;
197 	u32 val;
198 
199 	val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
200 	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
201 		return 0;
202 
203 	/*
204 	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
205 	 * out of reset. So if Key is not already loaded, its an error state.
206 	 */
207 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
208 		if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
209 			return -ENXIO;
210 
211 	/*
212 	 * Initiate loading the HDCP key from fuses.
213 	 *
214 	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
215 	 * platforms except BXT and GLK, differ in the key load trigger process
216 	 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
217 	 */
218 	if (IS_GEN9_BC(dev_priv)) {
219 		ret = sandybridge_pcode_write(dev_priv,
220 					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
221 		if (ret) {
222 			drm_err(&dev_priv->drm,
223 				"Failed to initiate HDCP key load (%d)\n",
224 				ret);
225 			return ret;
226 		}
227 	} else {
228 		intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
229 	}
230 
231 	/* Wait for the keys to load (500us) */
232 	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
233 					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
234 					10, 1, &val);
235 	if (ret)
236 		return ret;
237 	else if (!(val & HDCP_KEY_LOAD_STATUS))
238 		return -ENXIO;
239 
240 	/* Send Aksv over to PCH display for use in authentication */
241 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
242 
243 	return 0;
244 }
245 
246 /* Returns updated SHA-1 index */
247 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
248 {
249 	intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
250 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
251 		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
252 		return -ETIMEDOUT;
253 	}
254 	return 0;
255 }
256 
257 static
258 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
259 				enum transcoder cpu_transcoder, enum port port)
260 {
261 	if (INTEL_GEN(dev_priv) >= 12) {
262 		switch (cpu_transcoder) {
263 		case TRANSCODER_A:
264 			return HDCP_TRANSA_REP_PRESENT |
265 			       HDCP_TRANSA_SHA1_M0;
266 		case TRANSCODER_B:
267 			return HDCP_TRANSB_REP_PRESENT |
268 			       HDCP_TRANSB_SHA1_M0;
269 		case TRANSCODER_C:
270 			return HDCP_TRANSC_REP_PRESENT |
271 			       HDCP_TRANSC_SHA1_M0;
272 		case TRANSCODER_D:
273 			return HDCP_TRANSD_REP_PRESENT |
274 			       HDCP_TRANSD_SHA1_M0;
275 		default:
276 			drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
277 				cpu_transcoder);
278 			return -EINVAL;
279 		}
280 	}
281 
282 	switch (port) {
283 	case PORT_A:
284 		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
285 	case PORT_B:
286 		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
287 	case PORT_C:
288 		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
289 	case PORT_D:
290 		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
291 	case PORT_E:
292 		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
293 	default:
294 		drm_err(&dev_priv->drm, "Unknown port %d\n", port);
295 		return -EINVAL;
296 	}
297 }
298 
299 static
300 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
301 				const struct intel_hdcp_shim *shim,
302 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
303 {
304 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
305 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
306 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
307 	enum port port = intel_dig_port->base.port;
308 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
309 	int ret, i, j, sha_idx;
310 
311 	/* Process V' values from the receiver */
312 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
313 		ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
314 		if (ret)
315 			return ret;
316 		intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
317 	}
318 
319 	/*
320 	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
321 	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
322 	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
323 	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
324 	 * index will keep track of our progress through the 64 bytes as well as
325 	 * helping us work the 40-bit KSVs through our 32-bit register.
326 	 *
327 	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
328 	 */
329 	sha_idx = 0;
330 	sha_text = 0;
331 	sha_leftovers = 0;
332 	rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
333 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
334 	for (i = 0; i < num_downstream; i++) {
335 		unsigned int sha_empty;
336 		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
337 
338 		/* Fill up the empty slots in sha_text and write it out */
339 		sha_empty = sizeof(sha_text) - sha_leftovers;
340 		for (j = 0; j < sha_empty; j++)
341 			sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
342 
343 		ret = intel_write_sha_text(dev_priv, sha_text);
344 		if (ret < 0)
345 			return ret;
346 
347 		/* Programming guide writes this every 64 bytes */
348 		sha_idx += sizeof(sha_text);
349 		if (!(sha_idx % 64))
350 			intel_de_write(dev_priv, HDCP_REP_CTL,
351 				       rep_ctl | HDCP_SHA1_TEXT_32);
352 
353 		/* Store the leftover bytes from the ksv in sha_text */
354 		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
355 		sha_text = 0;
356 		for (j = 0; j < sha_leftovers; j++)
357 			sha_text |= ksv[sha_empty + j] <<
358 					((sizeof(sha_text) - j - 1) * 8);
359 
360 		/*
361 		 * If we still have room in sha_text for more data, continue.
362 		 * Otherwise, write it out immediately.
363 		 */
364 		if (sizeof(sha_text) > sha_leftovers)
365 			continue;
366 
367 		ret = intel_write_sha_text(dev_priv, sha_text);
368 		if (ret < 0)
369 			return ret;
370 		sha_leftovers = 0;
371 		sha_text = 0;
372 		sha_idx += sizeof(sha_text);
373 	}
374 
375 	/*
376 	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
377 	 * bytes are leftover from the last ksv, we might be able to fit them
378 	 * all in sha_text (first 2 cases), or we might need to split them up
379 	 * into 2 writes (last 2 cases).
380 	 */
381 	if (sha_leftovers == 0) {
382 		/* Write 16 bits of text, 16 bits of M0 */
383 		intel_de_write(dev_priv, HDCP_REP_CTL,
384 			       rep_ctl | HDCP_SHA1_TEXT_16);
385 		ret = intel_write_sha_text(dev_priv,
386 					   bstatus[0] << 8 | bstatus[1]);
387 		if (ret < 0)
388 			return ret;
389 		sha_idx += sizeof(sha_text);
390 
391 		/* Write 32 bits of M0 */
392 		intel_de_write(dev_priv, HDCP_REP_CTL,
393 			       rep_ctl | HDCP_SHA1_TEXT_0);
394 		ret = intel_write_sha_text(dev_priv, 0);
395 		if (ret < 0)
396 			return ret;
397 		sha_idx += sizeof(sha_text);
398 
399 		/* Write 16 bits of M0 */
400 		intel_de_write(dev_priv, HDCP_REP_CTL,
401 			       rep_ctl | HDCP_SHA1_TEXT_16);
402 		ret = intel_write_sha_text(dev_priv, 0);
403 		if (ret < 0)
404 			return ret;
405 		sha_idx += sizeof(sha_text);
406 
407 	} else if (sha_leftovers == 1) {
408 		/* Write 24 bits of text, 8 bits of M0 */
409 		intel_de_write(dev_priv, HDCP_REP_CTL,
410 			       rep_ctl | HDCP_SHA1_TEXT_24);
411 		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
412 		/* Only 24-bits of data, must be in the LSB */
413 		sha_text = (sha_text & 0xffffff00) >> 8;
414 		ret = intel_write_sha_text(dev_priv, sha_text);
415 		if (ret < 0)
416 			return ret;
417 		sha_idx += sizeof(sha_text);
418 
419 		/* Write 32 bits of M0 */
420 		intel_de_write(dev_priv, HDCP_REP_CTL,
421 			       rep_ctl | HDCP_SHA1_TEXT_0);
422 		ret = intel_write_sha_text(dev_priv, 0);
423 		if (ret < 0)
424 			return ret;
425 		sha_idx += sizeof(sha_text);
426 
427 		/* Write 24 bits of M0 */
428 		intel_de_write(dev_priv, HDCP_REP_CTL,
429 			       rep_ctl | HDCP_SHA1_TEXT_8);
430 		ret = intel_write_sha_text(dev_priv, 0);
431 		if (ret < 0)
432 			return ret;
433 		sha_idx += sizeof(sha_text);
434 
435 	} else if (sha_leftovers == 2) {
436 		/* Write 32 bits of text */
437 		intel_de_write(dev_priv, HDCP_REP_CTL,
438 			       rep_ctl | HDCP_SHA1_TEXT_32);
439 		sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
440 		ret = intel_write_sha_text(dev_priv, sha_text);
441 		if (ret < 0)
442 			return ret;
443 		sha_idx += sizeof(sha_text);
444 
445 		/* Write 64 bits of M0 */
446 		intel_de_write(dev_priv, HDCP_REP_CTL,
447 			       rep_ctl | HDCP_SHA1_TEXT_0);
448 		for (i = 0; i < 2; i++) {
449 			ret = intel_write_sha_text(dev_priv, 0);
450 			if (ret < 0)
451 				return ret;
452 			sha_idx += sizeof(sha_text);
453 		}
454 	} else if (sha_leftovers == 3) {
455 		/* Write 32 bits of text */
456 		intel_de_write(dev_priv, HDCP_REP_CTL,
457 			       rep_ctl | HDCP_SHA1_TEXT_32);
458 		sha_text |= bstatus[0] << 24;
459 		ret = intel_write_sha_text(dev_priv, sha_text);
460 		if (ret < 0)
461 			return ret;
462 		sha_idx += sizeof(sha_text);
463 
464 		/* Write 8 bits of text, 24 bits of M0 */
465 		intel_de_write(dev_priv, HDCP_REP_CTL,
466 			       rep_ctl | HDCP_SHA1_TEXT_8);
467 		ret = intel_write_sha_text(dev_priv, bstatus[1]);
468 		if (ret < 0)
469 			return ret;
470 		sha_idx += sizeof(sha_text);
471 
472 		/* Write 32 bits of M0 */
473 		intel_de_write(dev_priv, HDCP_REP_CTL,
474 			       rep_ctl | HDCP_SHA1_TEXT_0);
475 		ret = intel_write_sha_text(dev_priv, 0);
476 		if (ret < 0)
477 			return ret;
478 		sha_idx += sizeof(sha_text);
479 
480 		/* Write 8 bits of M0 */
481 		intel_de_write(dev_priv, HDCP_REP_CTL,
482 			       rep_ctl | HDCP_SHA1_TEXT_24);
483 		ret = intel_write_sha_text(dev_priv, 0);
484 		if (ret < 0)
485 			return ret;
486 		sha_idx += sizeof(sha_text);
487 	} else {
488 		DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
489 			      sha_leftovers);
490 		return -EINVAL;
491 	}
492 
493 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
494 	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
495 	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
496 		ret = intel_write_sha_text(dev_priv, 0);
497 		if (ret < 0)
498 			return ret;
499 		sha_idx += sizeof(sha_text);
500 	}
501 
502 	/*
503 	 * Last write gets the length of the concatenation in bits. That is:
504 	 *  - 5 bytes per device
505 	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
506 	 */
507 	sha_text = (num_downstream * 5 + 10) * 8;
508 	ret = intel_write_sha_text(dev_priv, sha_text);
509 	if (ret < 0)
510 		return ret;
511 
512 	/* Tell the HW we're done with the hash and wait for it to ACK */
513 	intel_de_write(dev_priv, HDCP_REP_CTL,
514 		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
515 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
516 				  HDCP_SHA1_COMPLETE, 1)) {
517 		DRM_ERROR("Timed out waiting for SHA1 complete\n");
518 		return -ETIMEDOUT;
519 	}
520 	if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
521 		DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
522 		return -ENXIO;
523 	}
524 
525 	return 0;
526 }
527 
528 /* Implements Part 2 of the HDCP authorization procedure */
529 static
530 int intel_hdcp_auth_downstream(struct intel_connector *connector)
531 {
532 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
533 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
534 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
535 	u8 bstatus[2], num_downstream, *ksv_fifo;
536 	int ret, i, tries = 3;
537 
538 	ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
539 	if (ret) {
540 		DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
541 		return ret;
542 	}
543 
544 	ret = shim->read_bstatus(intel_dig_port, bstatus);
545 	if (ret)
546 		return ret;
547 
548 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
549 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
550 		DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
551 		return -EPERM;
552 	}
553 
554 	/*
555 	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
556 	 * the HDCP encryption. That implies that repeater can't have its own
557 	 * display. As there is no consumption of encrypted content in the
558 	 * repeater with 0 downstream devices, we are failing the
559 	 * authentication.
560 	 */
561 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
562 	if (num_downstream == 0) {
563 		DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
564 		return -EINVAL;
565 	}
566 
567 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
568 	if (!ksv_fifo) {
569 		DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
570 		return -ENOMEM;
571 	}
572 
573 	ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
574 	if (ret)
575 		goto err;
576 
577 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
578 					num_downstream)) {
579 		DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
580 		ret = -EPERM;
581 		goto err;
582 	}
583 
584 	/*
585 	 * When V prime mismatches, DP Spec mandates re-read of
586 	 * V prime atleast twice.
587 	 */
588 	for (i = 0; i < tries; i++) {
589 		ret = intel_hdcp_validate_v_prime(connector, shim,
590 						  ksv_fifo, num_downstream,
591 						  bstatus);
592 		if (!ret)
593 			break;
594 	}
595 
596 	if (i == tries) {
597 		DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
598 		goto err;
599 	}
600 
601 	DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
602 		      num_downstream);
603 	ret = 0;
604 err:
605 	kfree(ksv_fifo);
606 	return ret;
607 }
608 
609 /* Implements Part 1 of the HDCP authorization procedure */
610 static int intel_hdcp_auth(struct intel_connector *connector)
611 {
612 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
613 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
614 	struct intel_hdcp *hdcp = &connector->hdcp;
615 	const struct intel_hdcp_shim *shim = hdcp->shim;
616 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
617 	enum port port = intel_dig_port->base.port;
618 	unsigned long r0_prime_gen_start;
619 	int ret, i, tries = 2;
620 	union {
621 		u32 reg[2];
622 		u8 shim[DRM_HDCP_AN_LEN];
623 	} an;
624 	union {
625 		u32 reg[2];
626 		u8 shim[DRM_HDCP_KSV_LEN];
627 	} bksv;
628 	union {
629 		u32 reg;
630 		u8 shim[DRM_HDCP_RI_LEN];
631 	} ri;
632 	bool repeater_present, hdcp_capable;
633 
634 	/*
635 	 * Detects whether the display is HDCP capable. Although we check for
636 	 * valid Bksv below, the HDCP over DP spec requires that we check
637 	 * whether the display supports HDCP before we write An. For HDMI
638 	 * displays, this is not necessary.
639 	 */
640 	if (shim->hdcp_capable) {
641 		ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
642 		if (ret)
643 			return ret;
644 		if (!hdcp_capable) {
645 			DRM_DEBUG_KMS("Panel is not HDCP capable\n");
646 			return -EINVAL;
647 		}
648 	}
649 
650 	/* Initialize An with 2 random values and acquire it */
651 	for (i = 0; i < 2; i++)
652 		intel_de_write(dev_priv,
653 			       HDCP_ANINIT(dev_priv, cpu_transcoder, port),
654 			       get_random_u32());
655 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
656 		       HDCP_CONF_CAPTURE_AN);
657 
658 	/* Wait for An to be acquired */
659 	if (intel_de_wait_for_set(dev_priv,
660 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
661 				  HDCP_STATUS_AN_READY, 1)) {
662 		DRM_ERROR("Timed out waiting for An\n");
663 		return -ETIMEDOUT;
664 	}
665 
666 	an.reg[0] = intel_de_read(dev_priv,
667 				  HDCP_ANLO(dev_priv, cpu_transcoder, port));
668 	an.reg[1] = intel_de_read(dev_priv,
669 				  HDCP_ANHI(dev_priv, cpu_transcoder, port));
670 	ret = shim->write_an_aksv(intel_dig_port, an.shim);
671 	if (ret)
672 		return ret;
673 
674 	r0_prime_gen_start = jiffies;
675 
676 	memset(&bksv, 0, sizeof(bksv));
677 
678 	ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
679 	if (ret < 0)
680 		return ret;
681 
682 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) {
683 		DRM_ERROR("BKSV is revoked\n");
684 		return -EPERM;
685 	}
686 
687 	intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
688 		       bksv.reg[0]);
689 	intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
690 		       bksv.reg[1]);
691 
692 	ret = shim->repeater_present(intel_dig_port, &repeater_present);
693 	if (ret)
694 		return ret;
695 	if (repeater_present)
696 		intel_de_write(dev_priv, HDCP_REP_CTL,
697 			       intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
698 
699 	ret = shim->toggle_signalling(intel_dig_port, true);
700 	if (ret)
701 		return ret;
702 
703 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
704 		       HDCP_CONF_AUTH_AND_ENC);
705 
706 	/* Wait for R0 ready */
707 	if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
708 		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
709 		DRM_ERROR("Timed out waiting for R0 ready\n");
710 		return -ETIMEDOUT;
711 	}
712 
713 	/*
714 	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
715 	 * some monitors can take longer than this. We'll set the timeout at
716 	 * 300ms just to be sure.
717 	 *
718 	 * On DP, there's an R0_READY bit available but no such bit
719 	 * exists on HDMI. Since the upper-bound is the same, we'll just do
720 	 * the stupid thing instead of polling on one and not the other.
721 	 */
722 	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
723 
724 	tries = 3;
725 
726 	/*
727 	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
728 	 * of R0 mismatch.
729 	 */
730 	for (i = 0; i < tries; i++) {
731 		ri.reg = 0;
732 		ret = shim->read_ri_prime(intel_dig_port, ri.shim);
733 		if (ret)
734 			return ret;
735 		intel_de_write(dev_priv,
736 			       HDCP_RPRIME(dev_priv, cpu_transcoder, port),
737 			       ri.reg);
738 
739 		/* Wait for Ri prime match */
740 		if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
741 			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
742 			break;
743 	}
744 
745 	if (i == tries) {
746 		DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
747 			      intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
748 		return -ETIMEDOUT;
749 	}
750 
751 	/* Wait for encryption confirmation */
752 	if (intel_de_wait_for_set(dev_priv,
753 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
754 				  HDCP_STATUS_ENC,
755 				  ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
756 		DRM_ERROR("Timed out waiting for encryption\n");
757 		return -ETIMEDOUT;
758 	}
759 
760 	/*
761 	 * XXX: If we have MST-connected devices, we need to enable encryption
762 	 * on those as well.
763 	 */
764 
765 	if (repeater_present)
766 		return intel_hdcp_auth_downstream(connector);
767 
768 	DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
769 	return 0;
770 }
771 
772 static int _intel_hdcp_disable(struct intel_connector *connector)
773 {
774 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
775 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
776 	struct intel_hdcp *hdcp = &connector->hdcp;
777 	enum port port = intel_dig_port->base.port;
778 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
779 	int ret;
780 
781 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
782 		    connector->base.name, connector->base.base.id);
783 
784 	hdcp->hdcp_encrypted = false;
785 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
786 	if (intel_de_wait_for_clear(dev_priv,
787 				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
788 				    ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
789 		drm_err(&dev_priv->drm,
790 			"Failed to disable HDCP, timeout clearing status\n");
791 		return -ETIMEDOUT;
792 	}
793 
794 	ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
795 	if (ret) {
796 		drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
797 		return ret;
798 	}
799 
800 	drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
801 	return 0;
802 }
803 
804 static int _intel_hdcp_enable(struct intel_connector *connector)
805 {
806 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
807 	struct intel_hdcp *hdcp = &connector->hdcp;
808 	int i, ret, tries = 3;
809 
810 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
811 		    connector->base.name, connector->base.base.id);
812 
813 	if (!hdcp_key_loadable(dev_priv)) {
814 		drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
815 		return -ENXIO;
816 	}
817 
818 	for (i = 0; i < KEY_LOAD_TRIES; i++) {
819 		ret = intel_hdcp_load_keys(dev_priv);
820 		if (!ret)
821 			break;
822 		intel_hdcp_clear_keys(dev_priv);
823 	}
824 	if (ret) {
825 		drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
826 			ret);
827 		return ret;
828 	}
829 
830 	/* Incase of authentication failures, HDCP spec expects reauth. */
831 	for (i = 0; i < tries; i++) {
832 		ret = intel_hdcp_auth(connector);
833 		if (!ret) {
834 			hdcp->hdcp_encrypted = true;
835 			return 0;
836 		}
837 
838 		drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
839 
840 		/* Ensuring HDCP encryption and signalling are stopped. */
841 		_intel_hdcp_disable(connector);
842 	}
843 
844 	drm_dbg_kms(&dev_priv->drm,
845 		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
846 	return ret;
847 }
848 
849 static inline
850 struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
851 {
852 	return container_of(hdcp, struct intel_connector, hdcp);
853 }
854 
855 /* Implements Part 3 of the HDCP authorization procedure */
856 static int intel_hdcp_check_link(struct intel_connector *connector)
857 {
858 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
859 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
860 	struct intel_hdcp *hdcp = &connector->hdcp;
861 	enum port port = intel_dig_port->base.port;
862 	enum transcoder cpu_transcoder;
863 	int ret = 0;
864 
865 	mutex_lock(&hdcp->mutex);
866 	cpu_transcoder = hdcp->cpu_transcoder;
867 
868 	/* Check_link valid only when HDCP1.4 is enabled */
869 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
870 	    !hdcp->hdcp_encrypted) {
871 		ret = -EINVAL;
872 		goto out;
873 	}
874 
875 	if (drm_WARN_ON(&dev_priv->drm,
876 			!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
877 		drm_err(&dev_priv->drm,
878 			"%s:%d HDCP link stopped encryption,%x\n",
879 			connector->base.name, connector->base.base.id,
880 			intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
881 		ret = -ENXIO;
882 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
883 		schedule_work(&hdcp->prop_work);
884 		goto out;
885 	}
886 
887 	if (hdcp->shim->check_link(intel_dig_port)) {
888 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
889 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
890 			schedule_work(&hdcp->prop_work);
891 		}
892 		goto out;
893 	}
894 
895 	drm_dbg_kms(&dev_priv->drm,
896 		    "[%s:%d] HDCP link failed, retrying authentication\n",
897 		    connector->base.name, connector->base.base.id);
898 
899 	ret = _intel_hdcp_disable(connector);
900 	if (ret) {
901 		drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
902 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
903 		schedule_work(&hdcp->prop_work);
904 		goto out;
905 	}
906 
907 	ret = _intel_hdcp_enable(connector);
908 	if (ret) {
909 		drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
910 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
911 		schedule_work(&hdcp->prop_work);
912 		goto out;
913 	}
914 
915 out:
916 	mutex_unlock(&hdcp->mutex);
917 	return ret;
918 }
919 
920 static void intel_hdcp_prop_work(struct work_struct *work)
921 {
922 	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
923 					       prop_work);
924 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
925 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
926 
927 	drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
928 	mutex_lock(&hdcp->mutex);
929 
930 	/*
931 	 * This worker is only used to flip between ENABLED/DESIRED. Either of
932 	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
933 	 * we're running just after hdcp has been disabled, so just exit
934 	 */
935 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
936 		drm_hdcp_update_content_protection(&connector->base,
937 						   hdcp->value);
938 
939 	mutex_unlock(&hdcp->mutex);
940 	drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
941 }
942 
943 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
944 {
945 	return INTEL_INFO(dev_priv)->display.has_hdcp &&
946 			(INTEL_GEN(dev_priv) >= 12 || port < PORT_E);
947 }
948 
949 static int
950 hdcp2_prepare_ake_init(struct intel_connector *connector,
951 		       struct hdcp2_ake_init *ake_data)
952 {
953 	struct hdcp_port_data *data = &connector->hdcp.port_data;
954 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
955 	struct i915_hdcp_comp_master *comp;
956 	int ret;
957 
958 	mutex_lock(&dev_priv->hdcp_comp_mutex);
959 	comp = dev_priv->hdcp_master;
960 
961 	if (!comp || !comp->ops) {
962 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
963 		return -EINVAL;
964 	}
965 
966 	ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
967 	if (ret)
968 		drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
969 			    ret);
970 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
971 
972 	return ret;
973 }
974 
975 static int
976 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
977 				struct hdcp2_ake_send_cert *rx_cert,
978 				bool *paired,
979 				struct hdcp2_ake_no_stored_km *ek_pub_km,
980 				size_t *msg_sz)
981 {
982 	struct hdcp_port_data *data = &connector->hdcp.port_data;
983 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
984 	struct i915_hdcp_comp_master *comp;
985 	int ret;
986 
987 	mutex_lock(&dev_priv->hdcp_comp_mutex);
988 	comp = dev_priv->hdcp_master;
989 
990 	if (!comp || !comp->ops) {
991 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
992 		return -EINVAL;
993 	}
994 
995 	ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
996 							 rx_cert, paired,
997 							 ek_pub_km, msg_sz);
998 	if (ret < 0)
999 		drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1000 			    ret);
1001 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1002 
1003 	return ret;
1004 }
1005 
1006 static int hdcp2_verify_hprime(struct intel_connector *connector,
1007 			       struct hdcp2_ake_send_hprime *rx_hprime)
1008 {
1009 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1010 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1011 	struct i915_hdcp_comp_master *comp;
1012 	int ret;
1013 
1014 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1015 	comp = dev_priv->hdcp_master;
1016 
1017 	if (!comp || !comp->ops) {
1018 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1019 		return -EINVAL;
1020 	}
1021 
1022 	ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1023 	if (ret < 0)
1024 		drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1025 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1026 
1027 	return ret;
1028 }
1029 
1030 static int
1031 hdcp2_store_pairing_info(struct intel_connector *connector,
1032 			 struct hdcp2_ake_send_pairing_info *pairing_info)
1033 {
1034 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1035 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1036 	struct i915_hdcp_comp_master *comp;
1037 	int ret;
1038 
1039 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1040 	comp = dev_priv->hdcp_master;
1041 
1042 	if (!comp || !comp->ops) {
1043 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1044 		return -EINVAL;
1045 	}
1046 
1047 	ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1048 	if (ret < 0)
1049 		drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1050 			    ret);
1051 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1052 
1053 	return ret;
1054 }
1055 
1056 static int
1057 hdcp2_prepare_lc_init(struct intel_connector *connector,
1058 		      struct hdcp2_lc_init *lc_init)
1059 {
1060 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1061 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1062 	struct i915_hdcp_comp_master *comp;
1063 	int ret;
1064 
1065 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1066 	comp = dev_priv->hdcp_master;
1067 
1068 	if (!comp || !comp->ops) {
1069 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1070 		return -EINVAL;
1071 	}
1072 
1073 	ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1074 	if (ret < 0)
1075 		drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1076 			    ret);
1077 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1078 
1079 	return ret;
1080 }
1081 
1082 static int
1083 hdcp2_verify_lprime(struct intel_connector *connector,
1084 		    struct hdcp2_lc_send_lprime *rx_lprime)
1085 {
1086 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1087 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1088 	struct i915_hdcp_comp_master *comp;
1089 	int ret;
1090 
1091 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1092 	comp = dev_priv->hdcp_master;
1093 
1094 	if (!comp || !comp->ops) {
1095 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1096 		return -EINVAL;
1097 	}
1098 
1099 	ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1100 	if (ret < 0)
1101 		drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1102 			    ret);
1103 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1104 
1105 	return ret;
1106 }
1107 
1108 static int hdcp2_prepare_skey(struct intel_connector *connector,
1109 			      struct hdcp2_ske_send_eks *ske_data)
1110 {
1111 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1112 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1113 	struct i915_hdcp_comp_master *comp;
1114 	int ret;
1115 
1116 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1117 	comp = dev_priv->hdcp_master;
1118 
1119 	if (!comp || !comp->ops) {
1120 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1121 		return -EINVAL;
1122 	}
1123 
1124 	ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1125 	if (ret < 0)
1126 		drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1127 			    ret);
1128 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1129 
1130 	return ret;
1131 }
1132 
1133 static int
1134 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1135 				      struct hdcp2_rep_send_receiverid_list
1136 								*rep_topology,
1137 				      struct hdcp2_rep_send_ack *rep_send_ack)
1138 {
1139 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1140 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1141 	struct i915_hdcp_comp_master *comp;
1142 	int ret;
1143 
1144 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1145 	comp = dev_priv->hdcp_master;
1146 
1147 	if (!comp || !comp->ops) {
1148 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1149 		return -EINVAL;
1150 	}
1151 
1152 	ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1153 							 rep_topology,
1154 							 rep_send_ack);
1155 	if (ret < 0)
1156 		drm_dbg_kms(&dev_priv->drm,
1157 			    "Verify rep topology failed. %d\n", ret);
1158 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1159 
1160 	return ret;
1161 }
1162 
1163 static int
1164 hdcp2_verify_mprime(struct intel_connector *connector,
1165 		    struct hdcp2_rep_stream_ready *stream_ready)
1166 {
1167 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1168 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1169 	struct i915_hdcp_comp_master *comp;
1170 	int ret;
1171 
1172 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1173 	comp = dev_priv->hdcp_master;
1174 
1175 	if (!comp || !comp->ops) {
1176 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1177 		return -EINVAL;
1178 	}
1179 
1180 	ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1181 	if (ret < 0)
1182 		drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1183 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1184 
1185 	return ret;
1186 }
1187 
1188 static int hdcp2_authenticate_port(struct intel_connector *connector)
1189 {
1190 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1191 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1192 	struct i915_hdcp_comp_master *comp;
1193 	int ret;
1194 
1195 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1196 	comp = dev_priv->hdcp_master;
1197 
1198 	if (!comp || !comp->ops) {
1199 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1200 		return -EINVAL;
1201 	}
1202 
1203 	ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1204 	if (ret < 0)
1205 		drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1206 			    ret);
1207 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1208 
1209 	return ret;
1210 }
1211 
1212 static int hdcp2_close_mei_session(struct intel_connector *connector)
1213 {
1214 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1215 	struct i915_hdcp_comp_master *comp;
1216 	int ret;
1217 
1218 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1219 	comp = dev_priv->hdcp_master;
1220 
1221 	if (!comp || !comp->ops) {
1222 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1223 		return -EINVAL;
1224 	}
1225 
1226 	ret = comp->ops->close_hdcp_session(comp->mei_dev,
1227 					     &connector->hdcp.port_data);
1228 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1229 
1230 	return ret;
1231 }
1232 
1233 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1234 {
1235 	return hdcp2_close_mei_session(connector);
1236 }
1237 
1238 /* Authentication flow starts from here */
1239 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1240 {
1241 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1242 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1243 	struct intel_hdcp *hdcp = &connector->hdcp;
1244 	union {
1245 		struct hdcp2_ake_init ake_init;
1246 		struct hdcp2_ake_send_cert send_cert;
1247 		struct hdcp2_ake_no_stored_km no_stored_km;
1248 		struct hdcp2_ake_send_hprime send_hprime;
1249 		struct hdcp2_ake_send_pairing_info pairing_info;
1250 	} msgs;
1251 	const struct intel_hdcp_shim *shim = hdcp->shim;
1252 	size_t size;
1253 	int ret;
1254 
1255 	/* Init for seq_num */
1256 	hdcp->seq_num_v = 0;
1257 	hdcp->seq_num_m = 0;
1258 
1259 	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1260 	if (ret < 0)
1261 		return ret;
1262 
1263 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
1264 				  sizeof(msgs.ake_init));
1265 	if (ret < 0)
1266 		return ret;
1267 
1268 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
1269 				 &msgs.send_cert, sizeof(msgs.send_cert));
1270 	if (ret < 0)
1271 		return ret;
1272 
1273 	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1274 		DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
1275 		return -EINVAL;
1276 	}
1277 
1278 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1279 
1280 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1281 					msgs.send_cert.cert_rx.receiver_id,
1282 					1)) {
1283 		DRM_ERROR("Receiver ID is revoked\n");
1284 		return -EPERM;
1285 	}
1286 
1287 	/*
1288 	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1289 	 * stored also.
1290 	 */
1291 	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1292 					      &hdcp->is_paired,
1293 					      &msgs.no_stored_km, &size);
1294 	if (ret < 0)
1295 		return ret;
1296 
1297 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
1298 	if (ret < 0)
1299 		return ret;
1300 
1301 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1302 				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1303 	if (ret < 0)
1304 		return ret;
1305 
1306 	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1307 	if (ret < 0)
1308 		return ret;
1309 
1310 	if (!hdcp->is_paired) {
1311 		/* Pairing is required */
1312 		ret = shim->read_2_2_msg(intel_dig_port,
1313 					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1314 					 &msgs.pairing_info,
1315 					 sizeof(msgs.pairing_info));
1316 		if (ret < 0)
1317 			return ret;
1318 
1319 		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1320 		if (ret < 0)
1321 			return ret;
1322 		hdcp->is_paired = true;
1323 	}
1324 
1325 	return 0;
1326 }
1327 
1328 static int hdcp2_locality_check(struct intel_connector *connector)
1329 {
1330 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1331 	struct intel_hdcp *hdcp = &connector->hdcp;
1332 	union {
1333 		struct hdcp2_lc_init lc_init;
1334 		struct hdcp2_lc_send_lprime send_lprime;
1335 	} msgs;
1336 	const struct intel_hdcp_shim *shim = hdcp->shim;
1337 	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1338 
1339 	for (i = 0; i < tries; i++) {
1340 		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1341 		if (ret < 0)
1342 			continue;
1343 
1344 		ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
1345 				      sizeof(msgs.lc_init));
1346 		if (ret < 0)
1347 			continue;
1348 
1349 		ret = shim->read_2_2_msg(intel_dig_port,
1350 					 HDCP_2_2_LC_SEND_LPRIME,
1351 					 &msgs.send_lprime,
1352 					 sizeof(msgs.send_lprime));
1353 		if (ret < 0)
1354 			continue;
1355 
1356 		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1357 		if (!ret)
1358 			break;
1359 	}
1360 
1361 	return ret;
1362 }
1363 
1364 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1365 {
1366 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1367 	struct intel_hdcp *hdcp = &connector->hdcp;
1368 	struct hdcp2_ske_send_eks send_eks;
1369 	int ret;
1370 
1371 	ret = hdcp2_prepare_skey(connector, &send_eks);
1372 	if (ret < 0)
1373 		return ret;
1374 
1375 	ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
1376 					sizeof(send_eks));
1377 	if (ret < 0)
1378 		return ret;
1379 
1380 	return 0;
1381 }
1382 
1383 static
1384 int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1385 {
1386 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1387 	struct intel_hdcp *hdcp = &connector->hdcp;
1388 	union {
1389 		struct hdcp2_rep_stream_manage stream_manage;
1390 		struct hdcp2_rep_stream_ready stream_ready;
1391 	} msgs;
1392 	const struct intel_hdcp_shim *shim = hdcp->shim;
1393 	int ret;
1394 
1395 	/* Prepare RepeaterAuth_Stream_Manage msg */
1396 	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1397 	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1398 
1399 	/* K no of streams is fixed as 1. Stored as big-endian. */
1400 	msgs.stream_manage.k = cpu_to_be16(1);
1401 
1402 	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
1403 	msgs.stream_manage.streams[0].stream_id = 0;
1404 	msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
1405 
1406 	/* Send it to Repeater */
1407 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
1408 				  sizeof(msgs.stream_manage));
1409 	if (ret < 0)
1410 		return ret;
1411 
1412 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
1413 				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1414 	if (ret < 0)
1415 		return ret;
1416 
1417 	hdcp->port_data.seq_num_m = hdcp->seq_num_m;
1418 	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1419 
1420 	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1421 	if (ret < 0)
1422 		return ret;
1423 
1424 	hdcp->seq_num_m++;
1425 
1426 	if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1427 		DRM_DEBUG_KMS("seq_num_m roll over.\n");
1428 		return -1;
1429 	}
1430 
1431 	return 0;
1432 }
1433 
1434 static
1435 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1436 {
1437 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1438 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1439 	struct intel_hdcp *hdcp = &connector->hdcp;
1440 	union {
1441 		struct hdcp2_rep_send_receiverid_list recvid_list;
1442 		struct hdcp2_rep_send_ack rep_ack;
1443 	} msgs;
1444 	const struct intel_hdcp_shim *shim = hdcp->shim;
1445 	u32 seq_num_v, device_cnt;
1446 	u8 *rx_info;
1447 	int ret;
1448 
1449 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1450 				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1451 	if (ret < 0)
1452 		return ret;
1453 
1454 	rx_info = msgs.recvid_list.rx_info;
1455 
1456 	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1457 	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1458 		DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
1459 		return -EINVAL;
1460 	}
1461 
1462 	/* Converting and Storing the seq_num_v to local variable as DWORD */
1463 	seq_num_v =
1464 		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1465 
1466 	if (seq_num_v < hdcp->seq_num_v) {
1467 		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1468 		DRM_DEBUG_KMS("Seq_num_v roll over.\n");
1469 		return -EINVAL;
1470 	}
1471 
1472 	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1473 		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1474 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1475 					msgs.recvid_list.receiver_ids,
1476 					device_cnt)) {
1477 		DRM_ERROR("Revoked receiver ID(s) is in list\n");
1478 		return -EPERM;
1479 	}
1480 
1481 	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1482 						    &msgs.recvid_list,
1483 						    &msgs.rep_ack);
1484 	if (ret < 0)
1485 		return ret;
1486 
1487 	hdcp->seq_num_v = seq_num_v;
1488 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
1489 				  sizeof(msgs.rep_ack));
1490 	if (ret < 0)
1491 		return ret;
1492 
1493 	return 0;
1494 }
1495 
1496 static int hdcp2_authenticate_repeater(struct intel_connector *connector)
1497 {
1498 	int ret;
1499 
1500 	ret = hdcp2_authenticate_repeater_topology(connector);
1501 	if (ret < 0)
1502 		return ret;
1503 
1504 	return hdcp2_propagate_stream_management_info(connector);
1505 }
1506 
1507 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1508 {
1509 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1510 	struct intel_hdcp *hdcp = &connector->hdcp;
1511 	const struct intel_hdcp_shim *shim = hdcp->shim;
1512 	int ret;
1513 
1514 	ret = hdcp2_authentication_key_exchange(connector);
1515 	if (ret < 0) {
1516 		DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
1517 		return ret;
1518 	}
1519 
1520 	ret = hdcp2_locality_check(connector);
1521 	if (ret < 0) {
1522 		DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
1523 		return ret;
1524 	}
1525 
1526 	ret = hdcp2_session_key_exchange(connector);
1527 	if (ret < 0) {
1528 		DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
1529 		return ret;
1530 	}
1531 
1532 	if (shim->config_stream_type) {
1533 		ret = shim->config_stream_type(intel_dig_port,
1534 					       hdcp->is_repeater,
1535 					       hdcp->content_type);
1536 		if (ret < 0)
1537 			return ret;
1538 	}
1539 
1540 	if (hdcp->is_repeater) {
1541 		ret = hdcp2_authenticate_repeater(connector);
1542 		if (ret < 0) {
1543 			DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
1544 			return ret;
1545 		}
1546 	}
1547 
1548 	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1549 	ret = hdcp2_authenticate_port(connector);
1550 	if (ret < 0)
1551 		return ret;
1552 
1553 	return ret;
1554 }
1555 
1556 static int hdcp2_enable_encryption(struct intel_connector *connector)
1557 {
1558 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1559 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1560 	struct intel_hdcp *hdcp = &connector->hdcp;
1561 	enum port port = intel_dig_port->base.port;
1562 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1563 	int ret;
1564 
1565 	drm_WARN_ON(&dev_priv->drm,
1566 		    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1567 		    LINK_ENCRYPTION_STATUS);
1568 	if (hdcp->shim->toggle_signalling) {
1569 		ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
1570 		if (ret) {
1571 			drm_err(&dev_priv->drm,
1572 				"Failed to enable HDCP signalling. %d\n",
1573 				ret);
1574 			return ret;
1575 		}
1576 	}
1577 
1578 	if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1579 	    LINK_AUTH_STATUS) {
1580 		/* Link is Authenticated. Now set for Encryption */
1581 		intel_de_write(dev_priv,
1582 			       HDCP2_CTL(dev_priv, cpu_transcoder, port),
1583 			       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
1584 	}
1585 
1586 	ret = intel_de_wait_for_set(dev_priv,
1587 				    HDCP2_STATUS(dev_priv, cpu_transcoder,
1588 						 port),
1589 				    LINK_ENCRYPTION_STATUS,
1590 				    ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1591 
1592 	return ret;
1593 }
1594 
1595 static int hdcp2_disable_encryption(struct intel_connector *connector)
1596 {
1597 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1598 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1599 	struct intel_hdcp *hdcp = &connector->hdcp;
1600 	enum port port = intel_dig_port->base.port;
1601 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1602 	int ret;
1603 
1604 	drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1605 				      LINK_ENCRYPTION_STATUS));
1606 
1607 	intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1608 		       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
1609 
1610 	ret = intel_de_wait_for_clear(dev_priv,
1611 				      HDCP2_STATUS(dev_priv, cpu_transcoder,
1612 						   port),
1613 				      LINK_ENCRYPTION_STATUS,
1614 				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1615 	if (ret == -ETIMEDOUT)
1616 		drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1617 
1618 	if (hdcp->shim->toggle_signalling) {
1619 		ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
1620 		if (ret) {
1621 			drm_err(&dev_priv->drm,
1622 				"Failed to disable HDCP signalling. %d\n",
1623 				ret);
1624 			return ret;
1625 		}
1626 	}
1627 
1628 	return ret;
1629 }
1630 
1631 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1632 {
1633 	int ret, i, tries = 3;
1634 
1635 	for (i = 0; i < tries; i++) {
1636 		ret = hdcp2_authenticate_sink(connector);
1637 		if (!ret)
1638 			break;
1639 
1640 		/* Clearing the mei hdcp session */
1641 		DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
1642 			      i + 1, tries, ret);
1643 		if (hdcp2_deauthenticate_port(connector) < 0)
1644 			DRM_DEBUG_KMS("Port deauth failed.\n");
1645 	}
1646 
1647 	if (i != tries) {
1648 		/*
1649 		 * Ensuring the required 200mSec min time interval between
1650 		 * Session Key Exchange and encryption.
1651 		 */
1652 		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1653 		ret = hdcp2_enable_encryption(connector);
1654 		if (ret < 0) {
1655 			DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
1656 			if (hdcp2_deauthenticate_port(connector) < 0)
1657 				DRM_DEBUG_KMS("Port deauth failed.\n");
1658 		}
1659 	}
1660 
1661 	return ret;
1662 }
1663 
1664 static int _intel_hdcp2_enable(struct intel_connector *connector)
1665 {
1666 	struct intel_hdcp *hdcp = &connector->hdcp;
1667 	int ret;
1668 
1669 	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1670 		      connector->base.name, connector->base.base.id,
1671 		      hdcp->content_type);
1672 
1673 	ret = hdcp2_authenticate_and_encrypt(connector);
1674 	if (ret) {
1675 		DRM_DEBUG_KMS("HDCP2 Type%d  Enabling Failed. (%d)\n",
1676 			      hdcp->content_type, ret);
1677 		return ret;
1678 	}
1679 
1680 	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
1681 		      connector->base.name, connector->base.base.id,
1682 		      hdcp->content_type);
1683 
1684 	hdcp->hdcp2_encrypted = true;
1685 	return 0;
1686 }
1687 
1688 static int _intel_hdcp2_disable(struct intel_connector *connector)
1689 {
1690 	int ret;
1691 
1692 	DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
1693 		      connector->base.name, connector->base.base.id);
1694 
1695 	ret = hdcp2_disable_encryption(connector);
1696 
1697 	if (hdcp2_deauthenticate_port(connector) < 0)
1698 		DRM_DEBUG_KMS("Port deauth failed.\n");
1699 
1700 	connector->hdcp.hdcp2_encrypted = false;
1701 
1702 	return ret;
1703 }
1704 
1705 /* Implements the Link Integrity Check for HDCP2.2 */
1706 static int intel_hdcp2_check_link(struct intel_connector *connector)
1707 {
1708 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1709 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1710 	struct intel_hdcp *hdcp = &connector->hdcp;
1711 	enum port port = intel_dig_port->base.port;
1712 	enum transcoder cpu_transcoder;
1713 	int ret = 0;
1714 
1715 	mutex_lock(&hdcp->mutex);
1716 	cpu_transcoder = hdcp->cpu_transcoder;
1717 
1718 	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1719 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1720 	    !hdcp->hdcp2_encrypted) {
1721 		ret = -EINVAL;
1722 		goto out;
1723 	}
1724 
1725 	if (drm_WARN_ON(&dev_priv->drm,
1726 			!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
1727 		drm_err(&dev_priv->drm,
1728 			"HDCP2.2 link stopped the encryption, %x\n",
1729 			intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
1730 		ret = -ENXIO;
1731 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1732 		schedule_work(&hdcp->prop_work);
1733 		goto out;
1734 	}
1735 
1736 	ret = hdcp->shim->check_2_2_link(intel_dig_port);
1737 	if (ret == HDCP_LINK_PROTECTED) {
1738 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1739 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1740 			schedule_work(&hdcp->prop_work);
1741 		}
1742 		goto out;
1743 	}
1744 
1745 	if (ret == HDCP_TOPOLOGY_CHANGE) {
1746 		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1747 			goto out;
1748 
1749 		drm_dbg_kms(&dev_priv->drm,
1750 			    "HDCP2.2 Downstream topology change\n");
1751 		ret = hdcp2_authenticate_repeater_topology(connector);
1752 		if (!ret) {
1753 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1754 			schedule_work(&hdcp->prop_work);
1755 			goto out;
1756 		}
1757 		drm_dbg_kms(&dev_priv->drm,
1758 			    "[%s:%d] Repeater topology auth failed.(%d)\n",
1759 			    connector->base.name, connector->base.base.id,
1760 			    ret);
1761 	} else {
1762 		drm_dbg_kms(&dev_priv->drm,
1763 			    "[%s:%d] HDCP2.2 link failed, retrying auth\n",
1764 			    connector->base.name, connector->base.base.id);
1765 	}
1766 
1767 	ret = _intel_hdcp2_disable(connector);
1768 	if (ret) {
1769 		drm_err(&dev_priv->drm,
1770 			"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
1771 			connector->base.name, connector->base.base.id, ret);
1772 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1773 		schedule_work(&hdcp->prop_work);
1774 		goto out;
1775 	}
1776 
1777 	ret = _intel_hdcp2_enable(connector);
1778 	if (ret) {
1779 		drm_dbg_kms(&dev_priv->drm,
1780 			    "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
1781 			    connector->base.name, connector->base.base.id,
1782 			    ret);
1783 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1784 		schedule_work(&hdcp->prop_work);
1785 		goto out;
1786 	}
1787 
1788 out:
1789 	mutex_unlock(&hdcp->mutex);
1790 	return ret;
1791 }
1792 
1793 static void intel_hdcp_check_work(struct work_struct *work)
1794 {
1795 	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
1796 					       struct intel_hdcp,
1797 					       check_work);
1798 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1799 
1800 	if (!intel_hdcp2_check_link(connector))
1801 		schedule_delayed_work(&hdcp->check_work,
1802 				      DRM_HDCP2_CHECK_PERIOD_MS);
1803 	else if (!intel_hdcp_check_link(connector))
1804 		schedule_delayed_work(&hdcp->check_work,
1805 				      DRM_HDCP_CHECK_PERIOD_MS);
1806 }
1807 
1808 static int i915_hdcp_component_bind(struct device *i915_kdev,
1809 				    struct device *mei_kdev, void *data)
1810 {
1811 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1812 
1813 	drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
1814 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1815 	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
1816 	dev_priv->hdcp_master->mei_dev = mei_kdev;
1817 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1818 
1819 	return 0;
1820 }
1821 
1822 static void i915_hdcp_component_unbind(struct device *i915_kdev,
1823 				       struct device *mei_kdev, void *data)
1824 {
1825 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1826 
1827 	drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
1828 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1829 	dev_priv->hdcp_master = NULL;
1830 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1831 }
1832 
1833 static const struct component_ops i915_hdcp_component_ops = {
1834 	.bind   = i915_hdcp_component_bind,
1835 	.unbind = i915_hdcp_component_unbind,
1836 };
1837 
1838 static inline
1839 enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
1840 {
1841 	switch (port) {
1842 	case PORT_A:
1843 		return MEI_DDI_A;
1844 	case PORT_B ... PORT_F:
1845 		return (enum mei_fw_ddi)port;
1846 	default:
1847 		return MEI_DDI_INVALID_PORT;
1848 	}
1849 }
1850 
1851 static inline
1852 enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
1853 {
1854 	switch (cpu_transcoder) {
1855 	case TRANSCODER_A ... TRANSCODER_D:
1856 		return (enum mei_fw_tc)(cpu_transcoder | 0x10);
1857 	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
1858 		return MEI_INVALID_TRANSCODER;
1859 	}
1860 }
1861 
1862 static inline int initialize_hdcp_port_data(struct intel_connector *connector,
1863 					    const struct intel_hdcp_shim *shim)
1864 {
1865 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1866 	struct intel_hdcp *hdcp = &connector->hdcp;
1867 	struct hdcp_port_data *data = &hdcp->port_data;
1868 
1869 	if (INTEL_GEN(dev_priv) < 12)
1870 		data->fw_ddi =
1871 			intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port);
1872 	else
1873 		/*
1874 		 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
1875 		 * with zero(INVALID PORT index).
1876 		 */
1877 		data->fw_ddi = MEI_DDI_INVALID_PORT;
1878 
1879 	/*
1880 	 * As associated transcoder is set and modified at modeset, here fw_tc
1881 	 * is initialized to zero (invalid transcoder index). This will be
1882 	 * retained for <Gen12 forever.
1883 	 */
1884 	data->fw_tc = MEI_INVALID_TRANSCODER;
1885 
1886 	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
1887 	data->protocol = (u8)shim->protocol;
1888 
1889 	data->k = 1;
1890 	if (!data->streams)
1891 		data->streams = kcalloc(data->k,
1892 					sizeof(struct hdcp2_streamid_type),
1893 					GFP_KERNEL);
1894 	if (!data->streams) {
1895 		drm_err(&dev_priv->drm, "Out of Memory\n");
1896 		return -ENOMEM;
1897 	}
1898 
1899 	data->streams[0].stream_id = 0;
1900 	data->streams[0].stream_type = hdcp->content_type;
1901 
1902 	return 0;
1903 }
1904 
1905 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
1906 {
1907 	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
1908 		return false;
1909 
1910 	return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
1911 		IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv));
1912 }
1913 
1914 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
1915 {
1916 	int ret;
1917 
1918 	if (!is_hdcp2_supported(dev_priv))
1919 		return;
1920 
1921 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1922 	drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
1923 
1924 	dev_priv->hdcp_comp_added = true;
1925 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1926 	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
1927 				  I915_COMPONENT_HDCP);
1928 	if (ret < 0) {
1929 		drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
1930 			    ret);
1931 		mutex_lock(&dev_priv->hdcp_comp_mutex);
1932 		dev_priv->hdcp_comp_added = false;
1933 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1934 		return;
1935 	}
1936 }
1937 
1938 static void intel_hdcp2_init(struct intel_connector *connector,
1939 			     const struct intel_hdcp_shim *shim)
1940 {
1941 	struct intel_hdcp *hdcp = &connector->hdcp;
1942 	int ret;
1943 
1944 	ret = initialize_hdcp_port_data(connector, shim);
1945 	if (ret) {
1946 		DRM_DEBUG_KMS("Mei hdcp data init failed\n");
1947 		return;
1948 	}
1949 
1950 	hdcp->hdcp2_supported = true;
1951 }
1952 
1953 int intel_hdcp_init(struct intel_connector *connector,
1954 		    const struct intel_hdcp_shim *shim)
1955 {
1956 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1957 	struct intel_hdcp *hdcp = &connector->hdcp;
1958 	int ret;
1959 
1960 	if (!shim)
1961 		return -EINVAL;
1962 
1963 	if (is_hdcp2_supported(dev_priv))
1964 		intel_hdcp2_init(connector, shim);
1965 
1966 	ret =
1967 	drm_connector_attach_content_protection_property(&connector->base,
1968 							 hdcp->hdcp2_supported);
1969 	if (ret) {
1970 		hdcp->hdcp2_supported = false;
1971 		kfree(hdcp->port_data.streams);
1972 		return ret;
1973 	}
1974 
1975 	hdcp->shim = shim;
1976 	mutex_init(&hdcp->mutex);
1977 	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
1978 	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
1979 	init_waitqueue_head(&hdcp->cp_irq_queue);
1980 
1981 	return 0;
1982 }
1983 
1984 int intel_hdcp_enable(struct intel_connector *connector,
1985 		      enum transcoder cpu_transcoder, u8 content_type)
1986 {
1987 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1988 	struct intel_hdcp *hdcp = &connector->hdcp;
1989 	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
1990 	int ret = -EINVAL;
1991 
1992 	if (!hdcp->shim)
1993 		return -ENOENT;
1994 
1995 	mutex_lock(&hdcp->mutex);
1996 	drm_WARN_ON(&dev_priv->drm,
1997 		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
1998 	hdcp->content_type = content_type;
1999 
2000 	if (INTEL_GEN(dev_priv) >= 12) {
2001 		hdcp->cpu_transcoder = cpu_transcoder;
2002 		hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
2003 	}
2004 
2005 	/*
2006 	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2007 	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2008 	 */
2009 	if (intel_hdcp2_capable(connector)) {
2010 		ret = _intel_hdcp2_enable(connector);
2011 		if (!ret)
2012 			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2013 	}
2014 
2015 	/*
2016 	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2017 	 * be attempted.
2018 	 */
2019 	if (ret && intel_hdcp_capable(connector) &&
2020 	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2021 		ret = _intel_hdcp_enable(connector);
2022 	}
2023 
2024 	if (!ret) {
2025 		schedule_delayed_work(&hdcp->check_work, check_link_interval);
2026 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
2027 		schedule_work(&hdcp->prop_work);
2028 	}
2029 
2030 	mutex_unlock(&hdcp->mutex);
2031 	return ret;
2032 }
2033 
2034 int intel_hdcp_disable(struct intel_connector *connector)
2035 {
2036 	struct intel_hdcp *hdcp = &connector->hdcp;
2037 	int ret = 0;
2038 
2039 	if (!hdcp->shim)
2040 		return -ENOENT;
2041 
2042 	mutex_lock(&hdcp->mutex);
2043 
2044 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2045 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
2046 		if (hdcp->hdcp2_encrypted)
2047 			ret = _intel_hdcp2_disable(connector);
2048 		else if (hdcp->hdcp_encrypted)
2049 			ret = _intel_hdcp_disable(connector);
2050 	}
2051 
2052 	mutex_unlock(&hdcp->mutex);
2053 	cancel_delayed_work_sync(&hdcp->check_work);
2054 	return ret;
2055 }
2056 
2057 void intel_hdcp_update_pipe(struct intel_encoder *encoder,
2058 			    const struct intel_crtc_state *crtc_state,
2059 			    const struct drm_connector_state *conn_state)
2060 {
2061 	struct intel_connector *connector =
2062 				to_intel_connector(conn_state->connector);
2063 	struct intel_hdcp *hdcp = &connector->hdcp;
2064 	bool content_protection_type_changed =
2065 		(conn_state->hdcp_content_type != hdcp->content_type &&
2066 		 conn_state->content_protection !=
2067 		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2068 
2069 	/*
2070 	 * During the HDCP encryption session if Type change is requested,
2071 	 * disable the HDCP and reenable it with new TYPE value.
2072 	 */
2073 	if (conn_state->content_protection ==
2074 	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2075 	    content_protection_type_changed)
2076 		intel_hdcp_disable(connector);
2077 
2078 	/*
2079 	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2080 	 * change procedure.
2081 	 */
2082 	if (content_protection_type_changed) {
2083 		mutex_lock(&hdcp->mutex);
2084 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2085 		schedule_work(&hdcp->prop_work);
2086 		mutex_unlock(&hdcp->mutex);
2087 	}
2088 
2089 	if (conn_state->content_protection ==
2090 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
2091 	    content_protection_type_changed)
2092 		intel_hdcp_enable(connector,
2093 				  crtc_state->cpu_transcoder,
2094 				  (u8)conn_state->hdcp_content_type);
2095 }
2096 
2097 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2098 {
2099 	mutex_lock(&dev_priv->hdcp_comp_mutex);
2100 	if (!dev_priv->hdcp_comp_added) {
2101 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
2102 		return;
2103 	}
2104 
2105 	dev_priv->hdcp_comp_added = false;
2106 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2107 
2108 	component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2109 }
2110 
2111 void intel_hdcp_cleanup(struct intel_connector *connector)
2112 {
2113 	if (!connector->hdcp.shim)
2114 		return;
2115 
2116 	mutex_lock(&connector->hdcp.mutex);
2117 	kfree(connector->hdcp.port_data.streams);
2118 	mutex_unlock(&connector->hdcp.mutex);
2119 }
2120 
2121 void intel_hdcp_atomic_check(struct drm_connector *connector,
2122 			     struct drm_connector_state *old_state,
2123 			     struct drm_connector_state *new_state)
2124 {
2125 	u64 old_cp = old_state->content_protection;
2126 	u64 new_cp = new_state->content_protection;
2127 	struct drm_crtc_state *crtc_state;
2128 
2129 	if (!new_state->crtc) {
2130 		/*
2131 		 * If the connector is being disabled with CP enabled, mark it
2132 		 * desired so it's re-enabled when the connector is brought back
2133 		 */
2134 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2135 			new_state->content_protection =
2136 				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2137 		return;
2138 	}
2139 
2140 	/*
2141 	 * Nothing to do if the state didn't change, or HDCP was activated since
2142 	 * the last commit. And also no change in hdcp content type.
2143 	 */
2144 	if (old_cp == new_cp ||
2145 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2146 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2147 		if (old_state->hdcp_content_type ==
2148 				new_state->hdcp_content_type)
2149 			return;
2150 	}
2151 
2152 	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2153 						   new_state->crtc);
2154 	crtc_state->mode_changed = true;
2155 }
2156 
2157 /* Handles the CP_IRQ raised from the DP HDCP sink */
2158 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2159 {
2160 	struct intel_hdcp *hdcp = &connector->hdcp;
2161 
2162 	if (!hdcp->shim)
2163 		return;
2164 
2165 	atomic_inc(&connector->hdcp.cp_irq_count);
2166 	wake_up_all(&connector->hdcp.cp_irq_queue);
2167 
2168 	schedule_delayed_work(&hdcp->check_work, 0);
2169 }
2170