xref: /openbmc/linux/drivers/gpu/drm/i915/display/intel_hdcp.c (revision f97cee494dc92395a668445bcd24d34c89f4ff8c)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10 
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14 
15 #include <drm/drm_hdcp.h>
16 #include <drm/i915_component.h>
17 
18 #include "i915_reg.h"
19 #include "intel_display_power.h"
20 #include "intel_display_types.h"
21 #include "intel_hdcp.h"
22 #include "intel_sideband.h"
23 #include "intel_connector.h"
24 
25 #define KEY_LOAD_TRIES	5
26 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS	50
27 #define HDCP2_LC_RETRY_CNT			3
28 
29 static
30 bool intel_hdcp_is_ksv_valid(u8 *ksv)
31 {
32 	int i, ones = 0;
33 	/* KSV has 20 1's and 20 0's */
34 	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
35 		ones += hweight8(ksv[i]);
36 	if (ones != 20)
37 		return false;
38 
39 	return true;
40 }
41 
42 static
43 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
44 			       const struct intel_hdcp_shim *shim, u8 *bksv)
45 {
46 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
47 	int ret, i, tries = 2;
48 
49 	/* HDCP spec states that we must retry the bksv if it is invalid */
50 	for (i = 0; i < tries; i++) {
51 		ret = shim->read_bksv(dig_port, bksv);
52 		if (ret)
53 			return ret;
54 		if (intel_hdcp_is_ksv_valid(bksv))
55 			break;
56 	}
57 	if (i == tries) {
58 		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
59 		return -ENODEV;
60 	}
61 
62 	return 0;
63 }
64 
65 /* Is HDCP1.4 capable on Platform and Sink */
66 bool intel_hdcp_capable(struct intel_connector *connector)
67 {
68 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
69 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
70 	bool capable = false;
71 	u8 bksv[5];
72 
73 	if (!shim)
74 		return capable;
75 
76 	if (shim->hdcp_capable) {
77 		shim->hdcp_capable(dig_port, &capable);
78 	} else {
79 		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
80 			capable = true;
81 	}
82 
83 	return capable;
84 }
85 
86 /* Is HDCP2.2 capable on Platform and Sink */
87 bool intel_hdcp2_capable(struct intel_connector *connector)
88 {
89 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
90 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
91 	struct intel_hdcp *hdcp = &connector->hdcp;
92 	bool capable = false;
93 
94 	/* I915 support for HDCP2.2 */
95 	if (!hdcp->hdcp2_supported)
96 		return false;
97 
98 	/* MEI interface is solid */
99 	mutex_lock(&dev_priv->hdcp_comp_mutex);
100 	if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
101 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
102 		return false;
103 	}
104 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
105 
106 	/* Sink's capability for HDCP2.2 */
107 	hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
108 
109 	return capable;
110 }
111 
112 static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
113 			      enum transcoder cpu_transcoder, enum port port)
114 {
115 	return intel_de_read(dev_priv,
116 	                     HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
117 	       HDCP_STATUS_ENC;
118 }
119 
120 static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
121 			       enum transcoder cpu_transcoder, enum port port)
122 {
123 	return intel_de_read(dev_priv,
124 	                     HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
125 	       LINK_ENCRYPTION_STATUS;
126 }
127 
128 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
129 				    const struct intel_hdcp_shim *shim)
130 {
131 	int ret, read_ret;
132 	bool ksv_ready;
133 
134 	/* Poll for ksv list ready (spec says max time allowed is 5s) */
135 	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
136 							 &ksv_ready),
137 			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
138 			 100 * 1000);
139 	if (ret)
140 		return ret;
141 	if (read_ret)
142 		return read_ret;
143 	if (!ksv_ready)
144 		return -ETIMEDOUT;
145 
146 	return 0;
147 }
148 
149 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
150 {
151 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
152 	struct i915_power_well *power_well;
153 	enum i915_power_well_id id;
154 	bool enabled = false;
155 
156 	/*
157 	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
158 	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
159 	 */
160 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
161 		id = HSW_DISP_PW_GLOBAL;
162 	else
163 		id = SKL_DISP_PW_1;
164 
165 	mutex_lock(&power_domains->lock);
166 
167 	/* PG1 (power well #1) needs to be enabled */
168 	for_each_power_well(dev_priv, power_well) {
169 		if (power_well->desc->id == id) {
170 			enabled = power_well->desc->ops->is_enabled(dev_priv,
171 								    power_well);
172 			break;
173 		}
174 	}
175 	mutex_unlock(&power_domains->lock);
176 
177 	/*
178 	 * Another req for hdcp key loadability is enabled state of pll for
179 	 * cdclk. Without active crtc we wont land here. So we are assuming that
180 	 * cdclk is already on.
181 	 */
182 
183 	return enabled;
184 }
185 
186 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
187 {
188 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
189 	intel_de_write(dev_priv, HDCP_KEY_STATUS,
190 		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
191 }
192 
193 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
194 {
195 	int ret;
196 	u32 val;
197 
198 	val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
199 	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
200 		return 0;
201 
202 	/*
203 	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
204 	 * out of reset. So if Key is not already loaded, its an error state.
205 	 */
206 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
207 		if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
208 			return -ENXIO;
209 
210 	/*
211 	 * Initiate loading the HDCP key from fuses.
212 	 *
213 	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
214 	 * platforms except BXT and GLK, differ in the key load trigger process
215 	 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
216 	 */
217 	if (IS_GEN9_BC(dev_priv)) {
218 		ret = sandybridge_pcode_write(dev_priv,
219 					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
220 		if (ret) {
221 			drm_err(&dev_priv->drm,
222 				"Failed to initiate HDCP key load (%d)\n",
223 				ret);
224 			return ret;
225 		}
226 	} else {
227 		intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
228 	}
229 
230 	/* Wait for the keys to load (500us) */
231 	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
232 					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
233 					10, 1, &val);
234 	if (ret)
235 		return ret;
236 	else if (!(val & HDCP_KEY_LOAD_STATUS))
237 		return -ENXIO;
238 
239 	/* Send Aksv over to PCH display for use in authentication */
240 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
241 
242 	return 0;
243 }
244 
245 /* Returns updated SHA-1 index */
246 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
247 {
248 	intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
249 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
250 		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
251 		return -ETIMEDOUT;
252 	}
253 	return 0;
254 }
255 
256 static
257 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
258 				enum transcoder cpu_transcoder, enum port port)
259 {
260 	if (INTEL_GEN(dev_priv) >= 12) {
261 		switch (cpu_transcoder) {
262 		case TRANSCODER_A:
263 			return HDCP_TRANSA_REP_PRESENT |
264 			       HDCP_TRANSA_SHA1_M0;
265 		case TRANSCODER_B:
266 			return HDCP_TRANSB_REP_PRESENT |
267 			       HDCP_TRANSB_SHA1_M0;
268 		case TRANSCODER_C:
269 			return HDCP_TRANSC_REP_PRESENT |
270 			       HDCP_TRANSC_SHA1_M0;
271 		case TRANSCODER_D:
272 			return HDCP_TRANSD_REP_PRESENT |
273 			       HDCP_TRANSD_SHA1_M0;
274 		default:
275 			drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
276 				cpu_transcoder);
277 			return -EINVAL;
278 		}
279 	}
280 
281 	switch (port) {
282 	case PORT_A:
283 		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
284 	case PORT_B:
285 		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
286 	case PORT_C:
287 		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
288 	case PORT_D:
289 		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
290 	case PORT_E:
291 		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
292 	default:
293 		drm_err(&dev_priv->drm, "Unknown port %d\n", port);
294 		return -EINVAL;
295 	}
296 }
297 
298 static
299 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
300 				const struct intel_hdcp_shim *shim,
301 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
302 {
303 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
304 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
305 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
306 	enum port port = dig_port->base.port;
307 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
308 	int ret, i, j, sha_idx;
309 
310 	/* Process V' values from the receiver */
311 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
312 		ret = shim->read_v_prime_part(dig_port, i, &vprime);
313 		if (ret)
314 			return ret;
315 		intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
316 	}
317 
318 	/*
319 	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
320 	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
321 	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
322 	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
323 	 * index will keep track of our progress through the 64 bytes as well as
324 	 * helping us work the 40-bit KSVs through our 32-bit register.
325 	 *
326 	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
327 	 */
328 	sha_idx = 0;
329 	sha_text = 0;
330 	sha_leftovers = 0;
331 	rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
332 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
333 	for (i = 0; i < num_downstream; i++) {
334 		unsigned int sha_empty;
335 		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
336 
337 		/* Fill up the empty slots in sha_text and write it out */
338 		sha_empty = sizeof(sha_text) - sha_leftovers;
339 		for (j = 0; j < sha_empty; j++) {
340 			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
341 			sha_text |= ksv[j] << off;
342 		}
343 
344 		ret = intel_write_sha_text(dev_priv, sha_text);
345 		if (ret < 0)
346 			return ret;
347 
348 		/* Programming guide writes this every 64 bytes */
349 		sha_idx += sizeof(sha_text);
350 		if (!(sha_idx % 64))
351 			intel_de_write(dev_priv, HDCP_REP_CTL,
352 				       rep_ctl | HDCP_SHA1_TEXT_32);
353 
354 		/* Store the leftover bytes from the ksv in sha_text */
355 		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
356 		sha_text = 0;
357 		for (j = 0; j < sha_leftovers; j++)
358 			sha_text |= ksv[sha_empty + j] <<
359 					((sizeof(sha_text) - j - 1) * 8);
360 
361 		/*
362 		 * If we still have room in sha_text for more data, continue.
363 		 * Otherwise, write it out immediately.
364 		 */
365 		if (sizeof(sha_text) > sha_leftovers)
366 			continue;
367 
368 		ret = intel_write_sha_text(dev_priv, sha_text);
369 		if (ret < 0)
370 			return ret;
371 		sha_leftovers = 0;
372 		sha_text = 0;
373 		sha_idx += sizeof(sha_text);
374 	}
375 
376 	/*
377 	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
378 	 * bytes are leftover from the last ksv, we might be able to fit them
379 	 * all in sha_text (first 2 cases), or we might need to split them up
380 	 * into 2 writes (last 2 cases).
381 	 */
382 	if (sha_leftovers == 0) {
383 		/* Write 16 bits of text, 16 bits of M0 */
384 		intel_de_write(dev_priv, HDCP_REP_CTL,
385 			       rep_ctl | HDCP_SHA1_TEXT_16);
386 		ret = intel_write_sha_text(dev_priv,
387 					   bstatus[0] << 8 | bstatus[1]);
388 		if (ret < 0)
389 			return ret;
390 		sha_idx += sizeof(sha_text);
391 
392 		/* Write 32 bits of M0 */
393 		intel_de_write(dev_priv, HDCP_REP_CTL,
394 			       rep_ctl | HDCP_SHA1_TEXT_0);
395 		ret = intel_write_sha_text(dev_priv, 0);
396 		if (ret < 0)
397 			return ret;
398 		sha_idx += sizeof(sha_text);
399 
400 		/* Write 16 bits of M0 */
401 		intel_de_write(dev_priv, HDCP_REP_CTL,
402 			       rep_ctl | HDCP_SHA1_TEXT_16);
403 		ret = intel_write_sha_text(dev_priv, 0);
404 		if (ret < 0)
405 			return ret;
406 		sha_idx += sizeof(sha_text);
407 
408 	} else if (sha_leftovers == 1) {
409 		/* Write 24 bits of text, 8 bits of M0 */
410 		intel_de_write(dev_priv, HDCP_REP_CTL,
411 			       rep_ctl | HDCP_SHA1_TEXT_24);
412 		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
413 		/* Only 24-bits of data, must be in the LSB */
414 		sha_text = (sha_text & 0xffffff00) >> 8;
415 		ret = intel_write_sha_text(dev_priv, sha_text);
416 		if (ret < 0)
417 			return ret;
418 		sha_idx += sizeof(sha_text);
419 
420 		/* Write 32 bits of M0 */
421 		intel_de_write(dev_priv, HDCP_REP_CTL,
422 			       rep_ctl | HDCP_SHA1_TEXT_0);
423 		ret = intel_write_sha_text(dev_priv, 0);
424 		if (ret < 0)
425 			return ret;
426 		sha_idx += sizeof(sha_text);
427 
428 		/* Write 24 bits of M0 */
429 		intel_de_write(dev_priv, HDCP_REP_CTL,
430 			       rep_ctl | HDCP_SHA1_TEXT_8);
431 		ret = intel_write_sha_text(dev_priv, 0);
432 		if (ret < 0)
433 			return ret;
434 		sha_idx += sizeof(sha_text);
435 
436 	} else if (sha_leftovers == 2) {
437 		/* Write 32 bits of text */
438 		intel_de_write(dev_priv, HDCP_REP_CTL,
439 			       rep_ctl | HDCP_SHA1_TEXT_32);
440 		sha_text |= bstatus[0] << 8 | bstatus[1];
441 		ret = intel_write_sha_text(dev_priv, sha_text);
442 		if (ret < 0)
443 			return ret;
444 		sha_idx += sizeof(sha_text);
445 
446 		/* Write 64 bits of M0 */
447 		intel_de_write(dev_priv, HDCP_REP_CTL,
448 			       rep_ctl | HDCP_SHA1_TEXT_0);
449 		for (i = 0; i < 2; i++) {
450 			ret = intel_write_sha_text(dev_priv, 0);
451 			if (ret < 0)
452 				return ret;
453 			sha_idx += sizeof(sha_text);
454 		}
455 
456 		/*
457 		 * Terminate the SHA-1 stream by hand. For the other leftover
458 		 * cases this is appended by the hardware.
459 		 */
460 		intel_de_write(dev_priv, HDCP_REP_CTL,
461 			       rep_ctl | HDCP_SHA1_TEXT_32);
462 		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
463 		ret = intel_write_sha_text(dev_priv, sha_text);
464 		if (ret < 0)
465 			return ret;
466 		sha_idx += sizeof(sha_text);
467 	} else if (sha_leftovers == 3) {
468 		/* Write 32 bits of text (filled from LSB) */
469 		intel_de_write(dev_priv, HDCP_REP_CTL,
470 			       rep_ctl | HDCP_SHA1_TEXT_32);
471 		sha_text |= bstatus[0];
472 		ret = intel_write_sha_text(dev_priv, sha_text);
473 		if (ret < 0)
474 			return ret;
475 		sha_idx += sizeof(sha_text);
476 
477 		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
478 		intel_de_write(dev_priv, HDCP_REP_CTL,
479 			       rep_ctl | HDCP_SHA1_TEXT_8);
480 		ret = intel_write_sha_text(dev_priv, bstatus[1]);
481 		if (ret < 0)
482 			return ret;
483 		sha_idx += sizeof(sha_text);
484 
485 		/* Write 32 bits of M0 */
486 		intel_de_write(dev_priv, HDCP_REP_CTL,
487 			       rep_ctl | HDCP_SHA1_TEXT_0);
488 		ret = intel_write_sha_text(dev_priv, 0);
489 		if (ret < 0)
490 			return ret;
491 		sha_idx += sizeof(sha_text);
492 
493 		/* Write 8 bits of M0 */
494 		intel_de_write(dev_priv, HDCP_REP_CTL,
495 			       rep_ctl | HDCP_SHA1_TEXT_24);
496 		ret = intel_write_sha_text(dev_priv, 0);
497 		if (ret < 0)
498 			return ret;
499 		sha_idx += sizeof(sha_text);
500 	} else {
501 		drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
502 			    sha_leftovers);
503 		return -EINVAL;
504 	}
505 
506 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
507 	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
508 	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
509 		ret = intel_write_sha_text(dev_priv, 0);
510 		if (ret < 0)
511 			return ret;
512 		sha_idx += sizeof(sha_text);
513 	}
514 
515 	/*
516 	 * Last write gets the length of the concatenation in bits. That is:
517 	 *  - 5 bytes per device
518 	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
519 	 */
520 	sha_text = (num_downstream * 5 + 10) * 8;
521 	ret = intel_write_sha_text(dev_priv, sha_text);
522 	if (ret < 0)
523 		return ret;
524 
525 	/* Tell the HW we're done with the hash and wait for it to ACK */
526 	intel_de_write(dev_priv, HDCP_REP_CTL,
527 		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
528 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
529 				  HDCP_SHA1_COMPLETE, 1)) {
530 		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
531 		return -ETIMEDOUT;
532 	}
533 	if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
534 		drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
535 		return -ENXIO;
536 	}
537 
538 	return 0;
539 }
540 
541 /* Implements Part 2 of the HDCP authorization procedure */
542 static
543 int intel_hdcp_auth_downstream(struct intel_connector *connector)
544 {
545 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
546 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
547 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
548 	u8 bstatus[2], num_downstream, *ksv_fifo;
549 	int ret, i, tries = 3;
550 
551 	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
552 	if (ret) {
553 		drm_dbg_kms(&dev_priv->drm,
554 			    "KSV list failed to become ready (%d)\n", ret);
555 		return ret;
556 	}
557 
558 	ret = shim->read_bstatus(dig_port, bstatus);
559 	if (ret)
560 		return ret;
561 
562 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
563 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
564 		drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
565 		return -EPERM;
566 	}
567 
568 	/*
569 	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
570 	 * the HDCP encryption. That implies that repeater can't have its own
571 	 * display. As there is no consumption of encrypted content in the
572 	 * repeater with 0 downstream devices, we are failing the
573 	 * authentication.
574 	 */
575 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
576 	if (num_downstream == 0) {
577 		drm_dbg_kms(&dev_priv->drm,
578 			    "Repeater with zero downstream devices\n");
579 		return -EINVAL;
580 	}
581 
582 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
583 	if (!ksv_fifo) {
584 		drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
585 		return -ENOMEM;
586 	}
587 
588 	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
589 	if (ret)
590 		goto err;
591 
592 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
593 					num_downstream) > 0) {
594 		drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
595 		ret = -EPERM;
596 		goto err;
597 	}
598 
599 	/*
600 	 * When V prime mismatches, DP Spec mandates re-read of
601 	 * V prime atleast twice.
602 	 */
603 	for (i = 0; i < tries; i++) {
604 		ret = intel_hdcp_validate_v_prime(connector, shim,
605 						  ksv_fifo, num_downstream,
606 						  bstatus);
607 		if (!ret)
608 			break;
609 	}
610 
611 	if (i == tries) {
612 		drm_dbg_kms(&dev_priv->drm,
613 			    "V Prime validation failed.(%d)\n", ret);
614 		goto err;
615 	}
616 
617 	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
618 		    num_downstream);
619 	ret = 0;
620 err:
621 	kfree(ksv_fifo);
622 	return ret;
623 }
624 
625 /* Implements Part 1 of the HDCP authorization procedure */
626 static int intel_hdcp_auth(struct intel_connector *connector)
627 {
628 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
629 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
630 	struct intel_hdcp *hdcp = &connector->hdcp;
631 	const struct intel_hdcp_shim *shim = hdcp->shim;
632 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
633 	enum port port = dig_port->base.port;
634 	unsigned long r0_prime_gen_start;
635 	int ret, i, tries = 2;
636 	union {
637 		u32 reg[2];
638 		u8 shim[DRM_HDCP_AN_LEN];
639 	} an;
640 	union {
641 		u32 reg[2];
642 		u8 shim[DRM_HDCP_KSV_LEN];
643 	} bksv;
644 	union {
645 		u32 reg;
646 		u8 shim[DRM_HDCP_RI_LEN];
647 	} ri;
648 	bool repeater_present, hdcp_capable;
649 
650 	/*
651 	 * Detects whether the display is HDCP capable. Although we check for
652 	 * valid Bksv below, the HDCP over DP spec requires that we check
653 	 * whether the display supports HDCP before we write An. For HDMI
654 	 * displays, this is not necessary.
655 	 */
656 	if (shim->hdcp_capable) {
657 		ret = shim->hdcp_capable(dig_port, &hdcp_capable);
658 		if (ret)
659 			return ret;
660 		if (!hdcp_capable) {
661 			drm_dbg_kms(&dev_priv->drm,
662 				    "Panel is not HDCP capable\n");
663 			return -EINVAL;
664 		}
665 	}
666 
667 	/* Initialize An with 2 random values and acquire it */
668 	for (i = 0; i < 2; i++)
669 		intel_de_write(dev_priv,
670 			       HDCP_ANINIT(dev_priv, cpu_transcoder, port),
671 			       get_random_u32());
672 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
673 		       HDCP_CONF_CAPTURE_AN);
674 
675 	/* Wait for An to be acquired */
676 	if (intel_de_wait_for_set(dev_priv,
677 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
678 				  HDCP_STATUS_AN_READY, 1)) {
679 		drm_err(&dev_priv->drm, "Timed out waiting for An\n");
680 		return -ETIMEDOUT;
681 	}
682 
683 	an.reg[0] = intel_de_read(dev_priv,
684 				  HDCP_ANLO(dev_priv, cpu_transcoder, port));
685 	an.reg[1] = intel_de_read(dev_priv,
686 				  HDCP_ANHI(dev_priv, cpu_transcoder, port));
687 	ret = shim->write_an_aksv(dig_port, an.shim);
688 	if (ret)
689 		return ret;
690 
691 	r0_prime_gen_start = jiffies;
692 
693 	memset(&bksv, 0, sizeof(bksv));
694 
695 	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
696 	if (ret < 0)
697 		return ret;
698 
699 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
700 		drm_err(&dev_priv->drm, "BKSV is revoked\n");
701 		return -EPERM;
702 	}
703 
704 	intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
705 		       bksv.reg[0]);
706 	intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
707 		       bksv.reg[1]);
708 
709 	ret = shim->repeater_present(dig_port, &repeater_present);
710 	if (ret)
711 		return ret;
712 	if (repeater_present)
713 		intel_de_write(dev_priv, HDCP_REP_CTL,
714 			       intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
715 
716 	ret = shim->toggle_signalling(dig_port, true);
717 	if (ret)
718 		return ret;
719 
720 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
721 		       HDCP_CONF_AUTH_AND_ENC);
722 
723 	/* Wait for R0 ready */
724 	if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
725 		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
726 		drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
727 		return -ETIMEDOUT;
728 	}
729 
730 	/*
731 	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
732 	 * some monitors can take longer than this. We'll set the timeout at
733 	 * 300ms just to be sure.
734 	 *
735 	 * On DP, there's an R0_READY bit available but no such bit
736 	 * exists on HDMI. Since the upper-bound is the same, we'll just do
737 	 * the stupid thing instead of polling on one and not the other.
738 	 */
739 	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
740 
741 	tries = 3;
742 
743 	/*
744 	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
745 	 * of R0 mismatch.
746 	 */
747 	for (i = 0; i < tries; i++) {
748 		ri.reg = 0;
749 		ret = shim->read_ri_prime(dig_port, ri.shim);
750 		if (ret)
751 			return ret;
752 		intel_de_write(dev_priv,
753 			       HDCP_RPRIME(dev_priv, cpu_transcoder, port),
754 			       ri.reg);
755 
756 		/* Wait for Ri prime match */
757 		if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
758 			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
759 			break;
760 	}
761 
762 	if (i == tries) {
763 		drm_dbg_kms(&dev_priv->drm,
764 			    "Timed out waiting for Ri prime match (%x)\n",
765 			    intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
766 					  cpu_transcoder, port)));
767 		return -ETIMEDOUT;
768 	}
769 
770 	/* Wait for encryption confirmation */
771 	if (intel_de_wait_for_set(dev_priv,
772 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
773 				  HDCP_STATUS_ENC,
774 				  ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
775 		drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
776 		return -ETIMEDOUT;
777 	}
778 
779 	/*
780 	 * XXX: If we have MST-connected devices, we need to enable encryption
781 	 * on those as well.
782 	 */
783 
784 	if (repeater_present)
785 		return intel_hdcp_auth_downstream(connector);
786 
787 	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
788 	return 0;
789 }
790 
791 static int _intel_hdcp_disable(struct intel_connector *connector)
792 {
793 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
794 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
795 	struct intel_hdcp *hdcp = &connector->hdcp;
796 	enum port port = dig_port->base.port;
797 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
798 	u32 repeater_ctl;
799 	int ret;
800 
801 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
802 		    connector->base.name, connector->base.base.id);
803 
804 	hdcp->hdcp_encrypted = false;
805 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
806 	if (intel_de_wait_for_clear(dev_priv,
807 				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
808 				    ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
809 		drm_err(&dev_priv->drm,
810 			"Failed to disable HDCP, timeout clearing status\n");
811 		return -ETIMEDOUT;
812 	}
813 
814 	repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
815 						   port);
816 	intel_de_write(dev_priv, HDCP_REP_CTL,
817 		       intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
818 
819 	ret = hdcp->shim->toggle_signalling(dig_port, false);
820 	if (ret) {
821 		drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
822 		return ret;
823 	}
824 
825 	drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
826 	return 0;
827 }
828 
829 static int _intel_hdcp_enable(struct intel_connector *connector)
830 {
831 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
832 	struct intel_hdcp *hdcp = &connector->hdcp;
833 	int i, ret, tries = 3;
834 
835 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
836 		    connector->base.name, connector->base.base.id);
837 
838 	if (!hdcp_key_loadable(dev_priv)) {
839 		drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
840 		return -ENXIO;
841 	}
842 
843 	for (i = 0; i < KEY_LOAD_TRIES; i++) {
844 		ret = intel_hdcp_load_keys(dev_priv);
845 		if (!ret)
846 			break;
847 		intel_hdcp_clear_keys(dev_priv);
848 	}
849 	if (ret) {
850 		drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
851 			ret);
852 		return ret;
853 	}
854 
855 	/* Incase of authentication failures, HDCP spec expects reauth. */
856 	for (i = 0; i < tries; i++) {
857 		ret = intel_hdcp_auth(connector);
858 		if (!ret) {
859 			hdcp->hdcp_encrypted = true;
860 			return 0;
861 		}
862 
863 		drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
864 
865 		/* Ensuring HDCP encryption and signalling are stopped. */
866 		_intel_hdcp_disable(connector);
867 	}
868 
869 	drm_dbg_kms(&dev_priv->drm,
870 		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
871 	return ret;
872 }
873 
874 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
875 {
876 	return container_of(hdcp, struct intel_connector, hdcp);
877 }
878 
879 /* Implements Part 3 of the HDCP authorization procedure */
880 static int intel_hdcp_check_link(struct intel_connector *connector)
881 {
882 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
883 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
884 	struct intel_hdcp *hdcp = &connector->hdcp;
885 	enum port port = dig_port->base.port;
886 	enum transcoder cpu_transcoder;
887 	int ret = 0;
888 
889 	mutex_lock(&hdcp->mutex);
890 	cpu_transcoder = hdcp->cpu_transcoder;
891 
892 	/* Check_link valid only when HDCP1.4 is enabled */
893 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
894 	    !hdcp->hdcp_encrypted) {
895 		ret = -EINVAL;
896 		goto out;
897 	}
898 
899 	if (drm_WARN_ON(&dev_priv->drm,
900 			!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
901 		drm_err(&dev_priv->drm,
902 			"%s:%d HDCP link stopped encryption,%x\n",
903 			connector->base.name, connector->base.base.id,
904 			intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
905 		ret = -ENXIO;
906 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
907 		schedule_work(&hdcp->prop_work);
908 		goto out;
909 	}
910 
911 	if (hdcp->shim->check_link(dig_port)) {
912 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
913 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
914 			schedule_work(&hdcp->prop_work);
915 		}
916 		goto out;
917 	}
918 
919 	drm_dbg_kms(&dev_priv->drm,
920 		    "[%s:%d] HDCP link failed, retrying authentication\n",
921 		    connector->base.name, connector->base.base.id);
922 
923 	ret = _intel_hdcp_disable(connector);
924 	if (ret) {
925 		drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
926 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
927 		schedule_work(&hdcp->prop_work);
928 		goto out;
929 	}
930 
931 	ret = _intel_hdcp_enable(connector);
932 	if (ret) {
933 		drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
934 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
935 		schedule_work(&hdcp->prop_work);
936 		goto out;
937 	}
938 
939 out:
940 	mutex_unlock(&hdcp->mutex);
941 	return ret;
942 }
943 
944 static void intel_hdcp_prop_work(struct work_struct *work)
945 {
946 	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
947 					       prop_work);
948 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
949 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
950 
951 	drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
952 	mutex_lock(&hdcp->mutex);
953 
954 	/*
955 	 * This worker is only used to flip between ENABLED/DESIRED. Either of
956 	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
957 	 * we're running just after hdcp has been disabled, so just exit
958 	 */
959 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
960 		drm_hdcp_update_content_protection(&connector->base,
961 						   hdcp->value);
962 
963 	mutex_unlock(&hdcp->mutex);
964 	drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
965 }
966 
967 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
968 {
969 	return INTEL_INFO(dev_priv)->display.has_hdcp &&
970 			(INTEL_GEN(dev_priv) >= 12 || port < PORT_E);
971 }
972 
973 static int
974 hdcp2_prepare_ake_init(struct intel_connector *connector,
975 		       struct hdcp2_ake_init *ake_data)
976 {
977 	struct hdcp_port_data *data = &connector->hdcp.port_data;
978 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
979 	struct i915_hdcp_comp_master *comp;
980 	int ret;
981 
982 	mutex_lock(&dev_priv->hdcp_comp_mutex);
983 	comp = dev_priv->hdcp_master;
984 
985 	if (!comp || !comp->ops) {
986 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
987 		return -EINVAL;
988 	}
989 
990 	ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
991 	if (ret)
992 		drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
993 			    ret);
994 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
995 
996 	return ret;
997 }
998 
999 static int
1000 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1001 				struct hdcp2_ake_send_cert *rx_cert,
1002 				bool *paired,
1003 				struct hdcp2_ake_no_stored_km *ek_pub_km,
1004 				size_t *msg_sz)
1005 {
1006 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1007 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1008 	struct i915_hdcp_comp_master *comp;
1009 	int ret;
1010 
1011 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1012 	comp = dev_priv->hdcp_master;
1013 
1014 	if (!comp || !comp->ops) {
1015 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1016 		return -EINVAL;
1017 	}
1018 
1019 	ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1020 							 rx_cert, paired,
1021 							 ek_pub_km, msg_sz);
1022 	if (ret < 0)
1023 		drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1024 			    ret);
1025 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1026 
1027 	return ret;
1028 }
1029 
1030 static int hdcp2_verify_hprime(struct intel_connector *connector,
1031 			       struct hdcp2_ake_send_hprime *rx_hprime)
1032 {
1033 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1034 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1035 	struct i915_hdcp_comp_master *comp;
1036 	int ret;
1037 
1038 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1039 	comp = dev_priv->hdcp_master;
1040 
1041 	if (!comp || !comp->ops) {
1042 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1043 		return -EINVAL;
1044 	}
1045 
1046 	ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1047 	if (ret < 0)
1048 		drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1049 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1050 
1051 	return ret;
1052 }
1053 
1054 static int
1055 hdcp2_store_pairing_info(struct intel_connector *connector,
1056 			 struct hdcp2_ake_send_pairing_info *pairing_info)
1057 {
1058 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1059 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1060 	struct i915_hdcp_comp_master *comp;
1061 	int ret;
1062 
1063 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1064 	comp = dev_priv->hdcp_master;
1065 
1066 	if (!comp || !comp->ops) {
1067 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1068 		return -EINVAL;
1069 	}
1070 
1071 	ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1072 	if (ret < 0)
1073 		drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1074 			    ret);
1075 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1076 
1077 	return ret;
1078 }
1079 
1080 static int
1081 hdcp2_prepare_lc_init(struct intel_connector *connector,
1082 		      struct hdcp2_lc_init *lc_init)
1083 {
1084 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1085 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1086 	struct i915_hdcp_comp_master *comp;
1087 	int ret;
1088 
1089 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1090 	comp = dev_priv->hdcp_master;
1091 
1092 	if (!comp || !comp->ops) {
1093 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1094 		return -EINVAL;
1095 	}
1096 
1097 	ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1098 	if (ret < 0)
1099 		drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1100 			    ret);
1101 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1102 
1103 	return ret;
1104 }
1105 
1106 static int
1107 hdcp2_verify_lprime(struct intel_connector *connector,
1108 		    struct hdcp2_lc_send_lprime *rx_lprime)
1109 {
1110 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1111 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1112 	struct i915_hdcp_comp_master *comp;
1113 	int ret;
1114 
1115 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1116 	comp = dev_priv->hdcp_master;
1117 
1118 	if (!comp || !comp->ops) {
1119 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1120 		return -EINVAL;
1121 	}
1122 
1123 	ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1124 	if (ret < 0)
1125 		drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1126 			    ret);
1127 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1128 
1129 	return ret;
1130 }
1131 
1132 static int hdcp2_prepare_skey(struct intel_connector *connector,
1133 			      struct hdcp2_ske_send_eks *ske_data)
1134 {
1135 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1136 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1137 	struct i915_hdcp_comp_master *comp;
1138 	int ret;
1139 
1140 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1141 	comp = dev_priv->hdcp_master;
1142 
1143 	if (!comp || !comp->ops) {
1144 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1145 		return -EINVAL;
1146 	}
1147 
1148 	ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1149 	if (ret < 0)
1150 		drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1151 			    ret);
1152 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1153 
1154 	return ret;
1155 }
1156 
1157 static int
1158 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1159 				      struct hdcp2_rep_send_receiverid_list
1160 								*rep_topology,
1161 				      struct hdcp2_rep_send_ack *rep_send_ack)
1162 {
1163 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1164 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1165 	struct i915_hdcp_comp_master *comp;
1166 	int ret;
1167 
1168 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1169 	comp = dev_priv->hdcp_master;
1170 
1171 	if (!comp || !comp->ops) {
1172 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1173 		return -EINVAL;
1174 	}
1175 
1176 	ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1177 							 rep_topology,
1178 							 rep_send_ack);
1179 	if (ret < 0)
1180 		drm_dbg_kms(&dev_priv->drm,
1181 			    "Verify rep topology failed. %d\n", ret);
1182 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1183 
1184 	return ret;
1185 }
1186 
1187 static int
1188 hdcp2_verify_mprime(struct intel_connector *connector,
1189 		    struct hdcp2_rep_stream_ready *stream_ready)
1190 {
1191 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1192 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1193 	struct i915_hdcp_comp_master *comp;
1194 	int ret;
1195 
1196 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1197 	comp = dev_priv->hdcp_master;
1198 
1199 	if (!comp || !comp->ops) {
1200 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1201 		return -EINVAL;
1202 	}
1203 
1204 	ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1205 	if (ret < 0)
1206 		drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1207 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1208 
1209 	return ret;
1210 }
1211 
1212 static int hdcp2_authenticate_port(struct intel_connector *connector)
1213 {
1214 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1215 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1216 	struct i915_hdcp_comp_master *comp;
1217 	int ret;
1218 
1219 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1220 	comp = dev_priv->hdcp_master;
1221 
1222 	if (!comp || !comp->ops) {
1223 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1224 		return -EINVAL;
1225 	}
1226 
1227 	ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1228 	if (ret < 0)
1229 		drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1230 			    ret);
1231 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1232 
1233 	return ret;
1234 }
1235 
1236 static int hdcp2_close_mei_session(struct intel_connector *connector)
1237 {
1238 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1239 	struct i915_hdcp_comp_master *comp;
1240 	int ret;
1241 
1242 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1243 	comp = dev_priv->hdcp_master;
1244 
1245 	if (!comp || !comp->ops) {
1246 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1247 		return -EINVAL;
1248 	}
1249 
1250 	ret = comp->ops->close_hdcp_session(comp->mei_dev,
1251 					     &connector->hdcp.port_data);
1252 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1253 
1254 	return ret;
1255 }
1256 
1257 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1258 {
1259 	return hdcp2_close_mei_session(connector);
1260 }
1261 
1262 /* Authentication flow starts from here */
1263 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1264 {
1265 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1266 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1267 	struct intel_hdcp *hdcp = &connector->hdcp;
1268 	union {
1269 		struct hdcp2_ake_init ake_init;
1270 		struct hdcp2_ake_send_cert send_cert;
1271 		struct hdcp2_ake_no_stored_km no_stored_km;
1272 		struct hdcp2_ake_send_hprime send_hprime;
1273 		struct hdcp2_ake_send_pairing_info pairing_info;
1274 	} msgs;
1275 	const struct intel_hdcp_shim *shim = hdcp->shim;
1276 	size_t size;
1277 	int ret;
1278 
1279 	/* Init for seq_num */
1280 	hdcp->seq_num_v = 0;
1281 	hdcp->seq_num_m = 0;
1282 
1283 	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1284 	if (ret < 0)
1285 		return ret;
1286 
1287 	ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1288 				  sizeof(msgs.ake_init));
1289 	if (ret < 0)
1290 		return ret;
1291 
1292 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1293 				 &msgs.send_cert, sizeof(msgs.send_cert));
1294 	if (ret < 0)
1295 		return ret;
1296 
1297 	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1298 		drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1299 		return -EINVAL;
1300 	}
1301 
1302 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1303 
1304 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1305 					msgs.send_cert.cert_rx.receiver_id,
1306 					1) > 0) {
1307 		drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1308 		return -EPERM;
1309 	}
1310 
1311 	/*
1312 	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1313 	 * stored also.
1314 	 */
1315 	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1316 					      &hdcp->is_paired,
1317 					      &msgs.no_stored_km, &size);
1318 	if (ret < 0)
1319 		return ret;
1320 
1321 	ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1322 	if (ret < 0)
1323 		return ret;
1324 
1325 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1326 				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1327 	if (ret < 0)
1328 		return ret;
1329 
1330 	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1331 	if (ret < 0)
1332 		return ret;
1333 
1334 	if (!hdcp->is_paired) {
1335 		/* Pairing is required */
1336 		ret = shim->read_2_2_msg(dig_port,
1337 					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1338 					 &msgs.pairing_info,
1339 					 sizeof(msgs.pairing_info));
1340 		if (ret < 0)
1341 			return ret;
1342 
1343 		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1344 		if (ret < 0)
1345 			return ret;
1346 		hdcp->is_paired = true;
1347 	}
1348 
1349 	return 0;
1350 }
1351 
1352 static int hdcp2_locality_check(struct intel_connector *connector)
1353 {
1354 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1355 	struct intel_hdcp *hdcp = &connector->hdcp;
1356 	union {
1357 		struct hdcp2_lc_init lc_init;
1358 		struct hdcp2_lc_send_lprime send_lprime;
1359 	} msgs;
1360 	const struct intel_hdcp_shim *shim = hdcp->shim;
1361 	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1362 
1363 	for (i = 0; i < tries; i++) {
1364 		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1365 		if (ret < 0)
1366 			continue;
1367 
1368 		ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1369 				      sizeof(msgs.lc_init));
1370 		if (ret < 0)
1371 			continue;
1372 
1373 		ret = shim->read_2_2_msg(dig_port,
1374 					 HDCP_2_2_LC_SEND_LPRIME,
1375 					 &msgs.send_lprime,
1376 					 sizeof(msgs.send_lprime));
1377 		if (ret < 0)
1378 			continue;
1379 
1380 		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1381 		if (!ret)
1382 			break;
1383 	}
1384 
1385 	return ret;
1386 }
1387 
1388 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1389 {
1390 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1391 	struct intel_hdcp *hdcp = &connector->hdcp;
1392 	struct hdcp2_ske_send_eks send_eks;
1393 	int ret;
1394 
1395 	ret = hdcp2_prepare_skey(connector, &send_eks);
1396 	if (ret < 0)
1397 		return ret;
1398 
1399 	ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1400 					sizeof(send_eks));
1401 	if (ret < 0)
1402 		return ret;
1403 
1404 	return 0;
1405 }
1406 
1407 static
1408 int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1409 {
1410 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1411 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1412 	struct intel_hdcp *hdcp = &connector->hdcp;
1413 	union {
1414 		struct hdcp2_rep_stream_manage stream_manage;
1415 		struct hdcp2_rep_stream_ready stream_ready;
1416 	} msgs;
1417 	const struct intel_hdcp_shim *shim = hdcp->shim;
1418 	int ret;
1419 
1420 	/* Prepare RepeaterAuth_Stream_Manage msg */
1421 	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1422 	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1423 
1424 	/* K no of streams is fixed as 1. Stored as big-endian. */
1425 	msgs.stream_manage.k = cpu_to_be16(1);
1426 
1427 	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
1428 	msgs.stream_manage.streams[0].stream_id = 0;
1429 	msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
1430 
1431 	/* Send it to Repeater */
1432 	ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1433 				  sizeof(msgs.stream_manage));
1434 	if (ret < 0)
1435 		return ret;
1436 
1437 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1438 				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1439 	if (ret < 0)
1440 		return ret;
1441 
1442 	hdcp->port_data.seq_num_m = hdcp->seq_num_m;
1443 	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1444 
1445 	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1446 	if (ret < 0)
1447 		return ret;
1448 
1449 	hdcp->seq_num_m++;
1450 
1451 	if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1452 		drm_dbg_kms(&i915->drm, "seq_num_m roll over.\n");
1453 		return -1;
1454 	}
1455 
1456 	return 0;
1457 }
1458 
1459 static
1460 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1461 {
1462 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1463 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1464 	struct intel_hdcp *hdcp = &connector->hdcp;
1465 	union {
1466 		struct hdcp2_rep_send_receiverid_list recvid_list;
1467 		struct hdcp2_rep_send_ack rep_ack;
1468 	} msgs;
1469 	const struct intel_hdcp_shim *shim = hdcp->shim;
1470 	u32 seq_num_v, device_cnt;
1471 	u8 *rx_info;
1472 	int ret;
1473 
1474 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1475 				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1476 	if (ret < 0)
1477 		return ret;
1478 
1479 	rx_info = msgs.recvid_list.rx_info;
1480 
1481 	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1482 	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1483 		drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
1484 		return -EINVAL;
1485 	}
1486 
1487 	/* Converting and Storing the seq_num_v to local variable as DWORD */
1488 	seq_num_v =
1489 		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1490 
1491 	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1492 		drm_dbg_kms(&dev_priv->drm,
1493 			    "Non zero Seq_num_v at first RecvId_List msg\n");
1494 		return -EINVAL;
1495 	}
1496 
1497 	if (seq_num_v < hdcp->seq_num_v) {
1498 		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1499 		drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1500 		return -EINVAL;
1501 	}
1502 
1503 	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1504 		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1505 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1506 					msgs.recvid_list.receiver_ids,
1507 					device_cnt) > 0) {
1508 		drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1509 		return -EPERM;
1510 	}
1511 
1512 	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1513 						    &msgs.recvid_list,
1514 						    &msgs.rep_ack);
1515 	if (ret < 0)
1516 		return ret;
1517 
1518 	hdcp->seq_num_v = seq_num_v;
1519 	ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1520 				  sizeof(msgs.rep_ack));
1521 	if (ret < 0)
1522 		return ret;
1523 
1524 	return 0;
1525 }
1526 
1527 static int hdcp2_authenticate_repeater(struct intel_connector *connector)
1528 {
1529 	int ret;
1530 
1531 	ret = hdcp2_authenticate_repeater_topology(connector);
1532 	if (ret < 0)
1533 		return ret;
1534 
1535 	return hdcp2_propagate_stream_management_info(connector);
1536 }
1537 
1538 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1539 {
1540 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1541 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1542 	struct intel_hdcp *hdcp = &connector->hdcp;
1543 	const struct intel_hdcp_shim *shim = hdcp->shim;
1544 	int ret;
1545 
1546 	ret = hdcp2_authentication_key_exchange(connector);
1547 	if (ret < 0) {
1548 		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1549 		return ret;
1550 	}
1551 
1552 	ret = hdcp2_locality_check(connector);
1553 	if (ret < 0) {
1554 		drm_dbg_kms(&i915->drm,
1555 			    "Locality Check failed. Err : %d\n", ret);
1556 		return ret;
1557 	}
1558 
1559 	ret = hdcp2_session_key_exchange(connector);
1560 	if (ret < 0) {
1561 		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1562 		return ret;
1563 	}
1564 
1565 	if (shim->config_stream_type) {
1566 		ret = shim->config_stream_type(dig_port,
1567 					       hdcp->is_repeater,
1568 					       hdcp->content_type);
1569 		if (ret < 0)
1570 			return ret;
1571 	}
1572 
1573 	if (hdcp->is_repeater) {
1574 		ret = hdcp2_authenticate_repeater(connector);
1575 		if (ret < 0) {
1576 			drm_dbg_kms(&i915->drm,
1577 				    "Repeater Auth Failed. Err: %d\n", ret);
1578 			return ret;
1579 		}
1580 	}
1581 
1582 	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1583 	ret = hdcp2_authenticate_port(connector);
1584 	if (ret < 0)
1585 		return ret;
1586 
1587 	return ret;
1588 }
1589 
1590 static int hdcp2_enable_encryption(struct intel_connector *connector)
1591 {
1592 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1593 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1594 	struct intel_hdcp *hdcp = &connector->hdcp;
1595 	enum port port = dig_port->base.port;
1596 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1597 	int ret;
1598 
1599 	drm_WARN_ON(&dev_priv->drm,
1600 		    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1601 		    LINK_ENCRYPTION_STATUS);
1602 	if (hdcp->shim->toggle_signalling) {
1603 		ret = hdcp->shim->toggle_signalling(dig_port, true);
1604 		if (ret) {
1605 			drm_err(&dev_priv->drm,
1606 				"Failed to enable HDCP signalling. %d\n",
1607 				ret);
1608 			return ret;
1609 		}
1610 	}
1611 
1612 	if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1613 	    LINK_AUTH_STATUS) {
1614 		/* Link is Authenticated. Now set for Encryption */
1615 		intel_de_write(dev_priv,
1616 			       HDCP2_CTL(dev_priv, cpu_transcoder, port),
1617 			       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
1618 	}
1619 
1620 	ret = intel_de_wait_for_set(dev_priv,
1621 				    HDCP2_STATUS(dev_priv, cpu_transcoder,
1622 						 port),
1623 				    LINK_ENCRYPTION_STATUS,
1624 				    ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1625 
1626 	return ret;
1627 }
1628 
1629 static int hdcp2_disable_encryption(struct intel_connector *connector)
1630 {
1631 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1632 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1633 	struct intel_hdcp *hdcp = &connector->hdcp;
1634 	enum port port = dig_port->base.port;
1635 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1636 	int ret;
1637 
1638 	drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1639 				      LINK_ENCRYPTION_STATUS));
1640 
1641 	intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1642 		       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
1643 
1644 	ret = intel_de_wait_for_clear(dev_priv,
1645 				      HDCP2_STATUS(dev_priv, cpu_transcoder,
1646 						   port),
1647 				      LINK_ENCRYPTION_STATUS,
1648 				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1649 	if (ret == -ETIMEDOUT)
1650 		drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1651 
1652 	if (hdcp->shim->toggle_signalling) {
1653 		ret = hdcp->shim->toggle_signalling(dig_port, false);
1654 		if (ret) {
1655 			drm_err(&dev_priv->drm,
1656 				"Failed to disable HDCP signalling. %d\n",
1657 				ret);
1658 			return ret;
1659 		}
1660 	}
1661 
1662 	return ret;
1663 }
1664 
1665 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1666 {
1667 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1668 	int ret, i, tries = 3;
1669 
1670 	for (i = 0; i < tries; i++) {
1671 		ret = hdcp2_authenticate_sink(connector);
1672 		if (!ret)
1673 			break;
1674 
1675 		/* Clearing the mei hdcp session */
1676 		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1677 			    i + 1, tries, ret);
1678 		if (hdcp2_deauthenticate_port(connector) < 0)
1679 			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1680 	}
1681 
1682 	if (i != tries) {
1683 		/*
1684 		 * Ensuring the required 200mSec min time interval between
1685 		 * Session Key Exchange and encryption.
1686 		 */
1687 		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1688 		ret = hdcp2_enable_encryption(connector);
1689 		if (ret < 0) {
1690 			drm_dbg_kms(&i915->drm,
1691 				    "Encryption Enable Failed.(%d)\n", ret);
1692 			if (hdcp2_deauthenticate_port(connector) < 0)
1693 				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1694 		}
1695 	}
1696 
1697 	return ret;
1698 }
1699 
1700 static int _intel_hdcp2_enable(struct intel_connector *connector)
1701 {
1702 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1703 	struct intel_hdcp *hdcp = &connector->hdcp;
1704 	int ret;
1705 
1706 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1707 		    connector->base.name, connector->base.base.id,
1708 		    hdcp->content_type);
1709 
1710 	ret = hdcp2_authenticate_and_encrypt(connector);
1711 	if (ret) {
1712 		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1713 			    hdcp->content_type, ret);
1714 		return ret;
1715 	}
1716 
1717 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1718 		    connector->base.name, connector->base.base.id,
1719 		    hdcp->content_type);
1720 
1721 	hdcp->hdcp2_encrypted = true;
1722 	return 0;
1723 }
1724 
1725 static int _intel_hdcp2_disable(struct intel_connector *connector)
1726 {
1727 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1728 	int ret;
1729 
1730 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1731 		    connector->base.name, connector->base.base.id);
1732 
1733 	ret = hdcp2_disable_encryption(connector);
1734 
1735 	if (hdcp2_deauthenticate_port(connector) < 0)
1736 		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1737 
1738 	connector->hdcp.hdcp2_encrypted = false;
1739 
1740 	return ret;
1741 }
1742 
1743 /* Implements the Link Integrity Check for HDCP2.2 */
1744 static int intel_hdcp2_check_link(struct intel_connector *connector)
1745 {
1746 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1747 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1748 	struct intel_hdcp *hdcp = &connector->hdcp;
1749 	enum port port = dig_port->base.port;
1750 	enum transcoder cpu_transcoder;
1751 	int ret = 0;
1752 
1753 	mutex_lock(&hdcp->mutex);
1754 	cpu_transcoder = hdcp->cpu_transcoder;
1755 
1756 	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1757 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1758 	    !hdcp->hdcp2_encrypted) {
1759 		ret = -EINVAL;
1760 		goto out;
1761 	}
1762 
1763 	if (drm_WARN_ON(&dev_priv->drm,
1764 			!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
1765 		drm_err(&dev_priv->drm,
1766 			"HDCP2.2 link stopped the encryption, %x\n",
1767 			intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
1768 		ret = -ENXIO;
1769 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1770 		schedule_work(&hdcp->prop_work);
1771 		goto out;
1772 	}
1773 
1774 	ret = hdcp->shim->check_2_2_link(dig_port);
1775 	if (ret == HDCP_LINK_PROTECTED) {
1776 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1777 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1778 			schedule_work(&hdcp->prop_work);
1779 		}
1780 		goto out;
1781 	}
1782 
1783 	if (ret == HDCP_TOPOLOGY_CHANGE) {
1784 		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1785 			goto out;
1786 
1787 		drm_dbg_kms(&dev_priv->drm,
1788 			    "HDCP2.2 Downstream topology change\n");
1789 		ret = hdcp2_authenticate_repeater_topology(connector);
1790 		if (!ret) {
1791 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1792 			schedule_work(&hdcp->prop_work);
1793 			goto out;
1794 		}
1795 		drm_dbg_kms(&dev_priv->drm,
1796 			    "[%s:%d] Repeater topology auth failed.(%d)\n",
1797 			    connector->base.name, connector->base.base.id,
1798 			    ret);
1799 	} else {
1800 		drm_dbg_kms(&dev_priv->drm,
1801 			    "[%s:%d] HDCP2.2 link failed, retrying auth\n",
1802 			    connector->base.name, connector->base.base.id);
1803 	}
1804 
1805 	ret = _intel_hdcp2_disable(connector);
1806 	if (ret) {
1807 		drm_err(&dev_priv->drm,
1808 			"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
1809 			connector->base.name, connector->base.base.id, ret);
1810 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1811 		schedule_work(&hdcp->prop_work);
1812 		goto out;
1813 	}
1814 
1815 	ret = _intel_hdcp2_enable(connector);
1816 	if (ret) {
1817 		drm_dbg_kms(&dev_priv->drm,
1818 			    "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
1819 			    connector->base.name, connector->base.base.id,
1820 			    ret);
1821 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1822 		schedule_work(&hdcp->prop_work);
1823 		goto out;
1824 	}
1825 
1826 out:
1827 	mutex_unlock(&hdcp->mutex);
1828 	return ret;
1829 }
1830 
1831 static void intel_hdcp_check_work(struct work_struct *work)
1832 {
1833 	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
1834 					       struct intel_hdcp,
1835 					       check_work);
1836 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1837 
1838 	if (!intel_hdcp2_check_link(connector))
1839 		schedule_delayed_work(&hdcp->check_work,
1840 				      DRM_HDCP2_CHECK_PERIOD_MS);
1841 	else if (!intel_hdcp_check_link(connector))
1842 		schedule_delayed_work(&hdcp->check_work,
1843 				      DRM_HDCP_CHECK_PERIOD_MS);
1844 }
1845 
1846 static int i915_hdcp_component_bind(struct device *i915_kdev,
1847 				    struct device *mei_kdev, void *data)
1848 {
1849 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1850 
1851 	drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
1852 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1853 	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
1854 	dev_priv->hdcp_master->mei_dev = mei_kdev;
1855 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1856 
1857 	return 0;
1858 }
1859 
1860 static void i915_hdcp_component_unbind(struct device *i915_kdev,
1861 				       struct device *mei_kdev, void *data)
1862 {
1863 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1864 
1865 	drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
1866 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1867 	dev_priv->hdcp_master = NULL;
1868 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1869 }
1870 
1871 static const struct component_ops i915_hdcp_component_ops = {
1872 	.bind   = i915_hdcp_component_bind,
1873 	.unbind = i915_hdcp_component_unbind,
1874 };
1875 
1876 static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
1877 {
1878 	switch (port) {
1879 	case PORT_A:
1880 		return MEI_DDI_A;
1881 	case PORT_B ... PORT_F:
1882 		return (enum mei_fw_ddi)port;
1883 	default:
1884 		return MEI_DDI_INVALID_PORT;
1885 	}
1886 }
1887 
1888 static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
1889 {
1890 	switch (cpu_transcoder) {
1891 	case TRANSCODER_A ... TRANSCODER_D:
1892 		return (enum mei_fw_tc)(cpu_transcoder | 0x10);
1893 	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
1894 		return MEI_INVALID_TRANSCODER;
1895 	}
1896 }
1897 
1898 static int initialize_hdcp_port_data(struct intel_connector *connector,
1899 				     const struct intel_hdcp_shim *shim)
1900 {
1901 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1902 	struct intel_hdcp *hdcp = &connector->hdcp;
1903 	struct hdcp_port_data *data = &hdcp->port_data;
1904 
1905 	if (INTEL_GEN(dev_priv) < 12)
1906 		data->fw_ddi =
1907 			intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port);
1908 	else
1909 		/*
1910 		 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
1911 		 * with zero(INVALID PORT index).
1912 		 */
1913 		data->fw_ddi = MEI_DDI_INVALID_PORT;
1914 
1915 	/*
1916 	 * As associated transcoder is set and modified at modeset, here fw_tc
1917 	 * is initialized to zero (invalid transcoder index). This will be
1918 	 * retained for <Gen12 forever.
1919 	 */
1920 	data->fw_tc = MEI_INVALID_TRANSCODER;
1921 
1922 	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
1923 	data->protocol = (u8)shim->protocol;
1924 
1925 	data->k = 1;
1926 	if (!data->streams)
1927 		data->streams = kcalloc(data->k,
1928 					sizeof(struct hdcp2_streamid_type),
1929 					GFP_KERNEL);
1930 	if (!data->streams) {
1931 		drm_err(&dev_priv->drm, "Out of Memory\n");
1932 		return -ENOMEM;
1933 	}
1934 
1935 	data->streams[0].stream_id = 0;
1936 	data->streams[0].stream_type = hdcp->content_type;
1937 
1938 	return 0;
1939 }
1940 
1941 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
1942 {
1943 	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
1944 		return false;
1945 
1946 	return (INTEL_GEN(dev_priv) >= 10 ||
1947 		IS_GEMINILAKE(dev_priv) ||
1948 		IS_KABYLAKE(dev_priv) ||
1949 		IS_COFFEELAKE(dev_priv) ||
1950 		IS_COMETLAKE(dev_priv));
1951 }
1952 
1953 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
1954 {
1955 	int ret;
1956 
1957 	if (!is_hdcp2_supported(dev_priv))
1958 		return;
1959 
1960 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1961 	drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
1962 
1963 	dev_priv->hdcp_comp_added = true;
1964 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1965 	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
1966 				  I915_COMPONENT_HDCP);
1967 	if (ret < 0) {
1968 		drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
1969 			    ret);
1970 		mutex_lock(&dev_priv->hdcp_comp_mutex);
1971 		dev_priv->hdcp_comp_added = false;
1972 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1973 		return;
1974 	}
1975 }
1976 
1977 static void intel_hdcp2_init(struct intel_connector *connector,
1978 			     const struct intel_hdcp_shim *shim)
1979 {
1980 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1981 	struct intel_hdcp *hdcp = &connector->hdcp;
1982 	int ret;
1983 
1984 	ret = initialize_hdcp_port_data(connector, shim);
1985 	if (ret) {
1986 		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
1987 		return;
1988 	}
1989 
1990 	hdcp->hdcp2_supported = true;
1991 }
1992 
1993 int intel_hdcp_init(struct intel_connector *connector,
1994 		    const struct intel_hdcp_shim *shim)
1995 {
1996 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1997 	struct intel_hdcp *hdcp = &connector->hdcp;
1998 	int ret;
1999 
2000 	if (!shim)
2001 		return -EINVAL;
2002 
2003 	if (is_hdcp2_supported(dev_priv))
2004 		intel_hdcp2_init(connector, shim);
2005 
2006 	ret =
2007 	drm_connector_attach_content_protection_property(&connector->base,
2008 							 hdcp->hdcp2_supported);
2009 	if (ret) {
2010 		hdcp->hdcp2_supported = false;
2011 		kfree(hdcp->port_data.streams);
2012 		return ret;
2013 	}
2014 
2015 	hdcp->shim = shim;
2016 	mutex_init(&hdcp->mutex);
2017 	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2018 	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2019 	init_waitqueue_head(&hdcp->cp_irq_queue);
2020 
2021 	return 0;
2022 }
2023 
2024 int intel_hdcp_enable(struct intel_connector *connector,
2025 		      enum transcoder cpu_transcoder, u8 content_type)
2026 {
2027 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2028 	struct intel_hdcp *hdcp = &connector->hdcp;
2029 	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2030 	int ret = -EINVAL;
2031 
2032 	if (!hdcp->shim)
2033 		return -ENOENT;
2034 
2035 	mutex_lock(&hdcp->mutex);
2036 	drm_WARN_ON(&dev_priv->drm,
2037 		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2038 	hdcp->content_type = content_type;
2039 
2040 	if (INTEL_GEN(dev_priv) >= 12) {
2041 		hdcp->cpu_transcoder = cpu_transcoder;
2042 		hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
2043 	}
2044 
2045 	/*
2046 	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2047 	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2048 	 */
2049 	if (intel_hdcp2_capable(connector)) {
2050 		ret = _intel_hdcp2_enable(connector);
2051 		if (!ret)
2052 			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2053 	}
2054 
2055 	/*
2056 	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2057 	 * be attempted.
2058 	 */
2059 	if (ret && intel_hdcp_capable(connector) &&
2060 	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2061 		ret = _intel_hdcp_enable(connector);
2062 	}
2063 
2064 	if (!ret) {
2065 		schedule_delayed_work(&hdcp->check_work, check_link_interval);
2066 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
2067 		schedule_work(&hdcp->prop_work);
2068 	}
2069 
2070 	mutex_unlock(&hdcp->mutex);
2071 	return ret;
2072 }
2073 
2074 int intel_hdcp_disable(struct intel_connector *connector)
2075 {
2076 	struct intel_hdcp *hdcp = &connector->hdcp;
2077 	int ret = 0;
2078 
2079 	if (!hdcp->shim)
2080 		return -ENOENT;
2081 
2082 	mutex_lock(&hdcp->mutex);
2083 
2084 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2085 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
2086 		if (hdcp->hdcp2_encrypted)
2087 			ret = _intel_hdcp2_disable(connector);
2088 		else if (hdcp->hdcp_encrypted)
2089 			ret = _intel_hdcp_disable(connector);
2090 	}
2091 
2092 	mutex_unlock(&hdcp->mutex);
2093 	cancel_delayed_work_sync(&hdcp->check_work);
2094 	return ret;
2095 }
2096 
2097 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2098 			    struct intel_encoder *encoder,
2099 			    const struct intel_crtc_state *crtc_state,
2100 			    const struct drm_connector_state *conn_state)
2101 {
2102 	struct intel_connector *connector =
2103 				to_intel_connector(conn_state->connector);
2104 	struct intel_hdcp *hdcp = &connector->hdcp;
2105 	bool content_protection_type_changed =
2106 		(conn_state->hdcp_content_type != hdcp->content_type &&
2107 		 conn_state->content_protection !=
2108 		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2109 	bool desired_and_not_enabled = false;
2110 
2111 	/*
2112 	 * During the HDCP encryption session if Type change is requested,
2113 	 * disable the HDCP and reenable it with new TYPE value.
2114 	 */
2115 	if (conn_state->content_protection ==
2116 	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2117 	    content_protection_type_changed)
2118 		intel_hdcp_disable(connector);
2119 
2120 	/*
2121 	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2122 	 * change procedure.
2123 	 */
2124 	if (content_protection_type_changed) {
2125 		mutex_lock(&hdcp->mutex);
2126 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2127 		schedule_work(&hdcp->prop_work);
2128 		mutex_unlock(&hdcp->mutex);
2129 	}
2130 
2131 	if (conn_state->content_protection ==
2132 	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2133 		mutex_lock(&hdcp->mutex);
2134 		/* Avoid enabling hdcp, if it already ENABLED */
2135 		desired_and_not_enabled =
2136 			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2137 		mutex_unlock(&hdcp->mutex);
2138 	}
2139 
2140 	if (desired_and_not_enabled || content_protection_type_changed)
2141 		intel_hdcp_enable(connector,
2142 				  crtc_state->cpu_transcoder,
2143 				  (u8)conn_state->hdcp_content_type);
2144 }
2145 
2146 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2147 {
2148 	mutex_lock(&dev_priv->hdcp_comp_mutex);
2149 	if (!dev_priv->hdcp_comp_added) {
2150 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
2151 		return;
2152 	}
2153 
2154 	dev_priv->hdcp_comp_added = false;
2155 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2156 
2157 	component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2158 }
2159 
2160 void intel_hdcp_cleanup(struct intel_connector *connector)
2161 {
2162 	if (!connector->hdcp.shim)
2163 		return;
2164 
2165 	mutex_lock(&connector->hdcp.mutex);
2166 	kfree(connector->hdcp.port_data.streams);
2167 	mutex_unlock(&connector->hdcp.mutex);
2168 }
2169 
2170 void intel_hdcp_atomic_check(struct drm_connector *connector,
2171 			     struct drm_connector_state *old_state,
2172 			     struct drm_connector_state *new_state)
2173 {
2174 	u64 old_cp = old_state->content_protection;
2175 	u64 new_cp = new_state->content_protection;
2176 	struct drm_crtc_state *crtc_state;
2177 
2178 	if (!new_state->crtc) {
2179 		/*
2180 		 * If the connector is being disabled with CP enabled, mark it
2181 		 * desired so it's re-enabled when the connector is brought back
2182 		 */
2183 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2184 			new_state->content_protection =
2185 				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2186 		return;
2187 	}
2188 
2189 	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2190 						   new_state->crtc);
2191 	/*
2192 	 * Fix the HDCP uapi content protection state in case of modeset.
2193 	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2194 	 * need to be sent if there is transition from ENABLED->DESIRED.
2195 	 */
2196 	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2197 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2198 	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2199 		new_state->content_protection =
2200 			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2201 
2202 	/*
2203 	 * Nothing to do if the state didn't change, or HDCP was activated since
2204 	 * the last commit. And also no change in hdcp content type.
2205 	 */
2206 	if (old_cp == new_cp ||
2207 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2208 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2209 		if (old_state->hdcp_content_type ==
2210 				new_state->hdcp_content_type)
2211 			return;
2212 	}
2213 
2214 	crtc_state->mode_changed = true;
2215 }
2216 
2217 /* Handles the CP_IRQ raised from the DP HDCP sink */
2218 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2219 {
2220 	struct intel_hdcp *hdcp = &connector->hdcp;
2221 
2222 	if (!hdcp->shim)
2223 		return;
2224 
2225 	atomic_inc(&connector->hdcp.cp_irq_count);
2226 	wake_up_all(&connector->hdcp.cp_irq_queue);
2227 
2228 	schedule_delayed_work(&hdcp->check_work, 0);
2229 }
2230