1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10 
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14 
15 #include <drm/drm_hdcp.h>
16 #include <drm/i915_component.h>
17 
18 #include "i915_reg.h"
19 #include "intel_display_power.h"
20 #include "intel_display_types.h"
21 #include "intel_hdcp.h"
22 #include "intel_sideband.h"
23 #include "intel_connector.h"
24 
25 #define KEY_LOAD_TRIES	5
26 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS	50
27 #define HDCP2_LC_RETRY_CNT			3
28 
29 static
30 bool intel_hdcp_is_ksv_valid(u8 *ksv)
31 {
32 	int i, ones = 0;
33 	/* KSV has 20 1's and 20 0's */
34 	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
35 		ones += hweight8(ksv[i]);
36 	if (ones != 20)
37 		return false;
38 
39 	return true;
40 }
41 
42 static
43 int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
44 			       const struct intel_hdcp_shim *shim, u8 *bksv)
45 {
46 	struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
47 	int ret, i, tries = 2;
48 
49 	/* HDCP spec states that we must retry the bksv if it is invalid */
50 	for (i = 0; i < tries; i++) {
51 		ret = shim->read_bksv(intel_dig_port, bksv);
52 		if (ret)
53 			return ret;
54 		if (intel_hdcp_is_ksv_valid(bksv))
55 			break;
56 	}
57 	if (i == tries) {
58 		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
59 		return -ENODEV;
60 	}
61 
62 	return 0;
63 }
64 
65 /* Is HDCP1.4 capable on Platform and Sink */
66 bool intel_hdcp_capable(struct intel_connector *connector)
67 {
68 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
69 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
70 	bool capable = false;
71 	u8 bksv[5];
72 
73 	if (!shim)
74 		return capable;
75 
76 	if (shim->hdcp_capable) {
77 		shim->hdcp_capable(intel_dig_port, &capable);
78 	} else {
79 		if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
80 			capable = true;
81 	}
82 
83 	return capable;
84 }
85 
86 /* Is HDCP2.2 capable on Platform and Sink */
87 bool intel_hdcp2_capable(struct intel_connector *connector)
88 {
89 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
90 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
91 	struct intel_hdcp *hdcp = &connector->hdcp;
92 	bool capable = false;
93 
94 	/* I915 support for HDCP2.2 */
95 	if (!hdcp->hdcp2_supported)
96 		return false;
97 
98 	/* MEI interface is solid */
99 	mutex_lock(&dev_priv->hdcp_comp_mutex);
100 	if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
101 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
102 		return false;
103 	}
104 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
105 
106 	/* Sink's capability for HDCP2.2 */
107 	hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
108 
109 	return capable;
110 }
111 
112 static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
113 			      enum transcoder cpu_transcoder, enum port port)
114 {
115 	return intel_de_read(dev_priv,
116 	                     HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
117 	       HDCP_STATUS_ENC;
118 }
119 
120 static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
121 			       enum transcoder cpu_transcoder, enum port port)
122 {
123 	return intel_de_read(dev_priv,
124 	                     HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
125 	       LINK_ENCRYPTION_STATUS;
126 }
127 
128 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
129 				    const struct intel_hdcp_shim *shim)
130 {
131 	int ret, read_ret;
132 	bool ksv_ready;
133 
134 	/* Poll for ksv list ready (spec says max time allowed is 5s) */
135 	ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
136 							 &ksv_ready),
137 			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
138 			 100 * 1000);
139 	if (ret)
140 		return ret;
141 	if (read_ret)
142 		return read_ret;
143 	if (!ksv_ready)
144 		return -ETIMEDOUT;
145 
146 	return 0;
147 }
148 
149 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
150 {
151 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
152 	struct i915_power_well *power_well;
153 	enum i915_power_well_id id;
154 	bool enabled = false;
155 
156 	/*
157 	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
158 	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
159 	 */
160 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
161 		id = HSW_DISP_PW_GLOBAL;
162 	else
163 		id = SKL_DISP_PW_1;
164 
165 	mutex_lock(&power_domains->lock);
166 
167 	/* PG1 (power well #1) needs to be enabled */
168 	for_each_power_well(dev_priv, power_well) {
169 		if (power_well->desc->id == id) {
170 			enabled = power_well->desc->ops->is_enabled(dev_priv,
171 								    power_well);
172 			break;
173 		}
174 	}
175 	mutex_unlock(&power_domains->lock);
176 
177 	/*
178 	 * Another req for hdcp key loadability is enabled state of pll for
179 	 * cdclk. Without active crtc we wont land here. So we are assuming that
180 	 * cdclk is already on.
181 	 */
182 
183 	return enabled;
184 }
185 
186 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
187 {
188 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
189 	intel_de_write(dev_priv, HDCP_KEY_STATUS,
190 		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
191 }
192 
193 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
194 {
195 	int ret;
196 	u32 val;
197 
198 	val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
199 	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
200 		return 0;
201 
202 	/*
203 	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
204 	 * out of reset. So if Key is not already loaded, its an error state.
205 	 */
206 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
207 		if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
208 			return -ENXIO;
209 
210 	/*
211 	 * Initiate loading the HDCP key from fuses.
212 	 *
213 	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
214 	 * platforms except BXT and GLK, differ in the key load trigger process
215 	 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
216 	 */
217 	if (IS_GEN9_BC(dev_priv)) {
218 		ret = sandybridge_pcode_write(dev_priv,
219 					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
220 		if (ret) {
221 			drm_err(&dev_priv->drm,
222 				"Failed to initiate HDCP key load (%d)\n",
223 				ret);
224 			return ret;
225 		}
226 	} else {
227 		intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
228 	}
229 
230 	/* Wait for the keys to load (500us) */
231 	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
232 					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
233 					10, 1, &val);
234 	if (ret)
235 		return ret;
236 	else if (!(val & HDCP_KEY_LOAD_STATUS))
237 		return -ENXIO;
238 
239 	/* Send Aksv over to PCH display for use in authentication */
240 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
241 
242 	return 0;
243 }
244 
245 /* Returns updated SHA-1 index */
246 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
247 {
248 	intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
249 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
250 		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
251 		return -ETIMEDOUT;
252 	}
253 	return 0;
254 }
255 
256 static
257 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
258 				enum transcoder cpu_transcoder, enum port port)
259 {
260 	if (INTEL_GEN(dev_priv) >= 12) {
261 		switch (cpu_transcoder) {
262 		case TRANSCODER_A:
263 			return HDCP_TRANSA_REP_PRESENT |
264 			       HDCP_TRANSA_SHA1_M0;
265 		case TRANSCODER_B:
266 			return HDCP_TRANSB_REP_PRESENT |
267 			       HDCP_TRANSB_SHA1_M0;
268 		case TRANSCODER_C:
269 			return HDCP_TRANSC_REP_PRESENT |
270 			       HDCP_TRANSC_SHA1_M0;
271 		case TRANSCODER_D:
272 			return HDCP_TRANSD_REP_PRESENT |
273 			       HDCP_TRANSD_SHA1_M0;
274 		default:
275 			drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
276 				cpu_transcoder);
277 			return -EINVAL;
278 		}
279 	}
280 
281 	switch (port) {
282 	case PORT_A:
283 		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
284 	case PORT_B:
285 		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
286 	case PORT_C:
287 		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
288 	case PORT_D:
289 		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
290 	case PORT_E:
291 		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
292 	default:
293 		drm_err(&dev_priv->drm, "Unknown port %d\n", port);
294 		return -EINVAL;
295 	}
296 }
297 
298 static
299 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
300 				const struct intel_hdcp_shim *shim,
301 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
302 {
303 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
304 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
305 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
306 	enum port port = intel_dig_port->base.port;
307 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
308 	int ret, i, j, sha_idx;
309 
310 	/* Process V' values from the receiver */
311 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
312 		ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
313 		if (ret)
314 			return ret;
315 		intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
316 	}
317 
318 	/*
319 	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
320 	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
321 	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
322 	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
323 	 * index will keep track of our progress through the 64 bytes as well as
324 	 * helping us work the 40-bit KSVs through our 32-bit register.
325 	 *
326 	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
327 	 */
328 	sha_idx = 0;
329 	sha_text = 0;
330 	sha_leftovers = 0;
331 	rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
332 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
333 	for (i = 0; i < num_downstream; i++) {
334 		unsigned int sha_empty;
335 		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
336 
337 		/* Fill up the empty slots in sha_text and write it out */
338 		sha_empty = sizeof(sha_text) - sha_leftovers;
339 		for (j = 0; j < sha_empty; j++)
340 			sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
341 
342 		ret = intel_write_sha_text(dev_priv, sha_text);
343 		if (ret < 0)
344 			return ret;
345 
346 		/* Programming guide writes this every 64 bytes */
347 		sha_idx += sizeof(sha_text);
348 		if (!(sha_idx % 64))
349 			intel_de_write(dev_priv, HDCP_REP_CTL,
350 				       rep_ctl | HDCP_SHA1_TEXT_32);
351 
352 		/* Store the leftover bytes from the ksv in sha_text */
353 		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
354 		sha_text = 0;
355 		for (j = 0; j < sha_leftovers; j++)
356 			sha_text |= ksv[sha_empty + j] <<
357 					((sizeof(sha_text) - j - 1) * 8);
358 
359 		/*
360 		 * If we still have room in sha_text for more data, continue.
361 		 * Otherwise, write it out immediately.
362 		 */
363 		if (sizeof(sha_text) > sha_leftovers)
364 			continue;
365 
366 		ret = intel_write_sha_text(dev_priv, sha_text);
367 		if (ret < 0)
368 			return ret;
369 		sha_leftovers = 0;
370 		sha_text = 0;
371 		sha_idx += sizeof(sha_text);
372 	}
373 
374 	/*
375 	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
376 	 * bytes are leftover from the last ksv, we might be able to fit them
377 	 * all in sha_text (first 2 cases), or we might need to split them up
378 	 * into 2 writes (last 2 cases).
379 	 */
380 	if (sha_leftovers == 0) {
381 		/* Write 16 bits of text, 16 bits of M0 */
382 		intel_de_write(dev_priv, HDCP_REP_CTL,
383 			       rep_ctl | HDCP_SHA1_TEXT_16);
384 		ret = intel_write_sha_text(dev_priv,
385 					   bstatus[0] << 8 | bstatus[1]);
386 		if (ret < 0)
387 			return ret;
388 		sha_idx += sizeof(sha_text);
389 
390 		/* Write 32 bits of M0 */
391 		intel_de_write(dev_priv, HDCP_REP_CTL,
392 			       rep_ctl | HDCP_SHA1_TEXT_0);
393 		ret = intel_write_sha_text(dev_priv, 0);
394 		if (ret < 0)
395 			return ret;
396 		sha_idx += sizeof(sha_text);
397 
398 		/* Write 16 bits of M0 */
399 		intel_de_write(dev_priv, HDCP_REP_CTL,
400 			       rep_ctl | HDCP_SHA1_TEXT_16);
401 		ret = intel_write_sha_text(dev_priv, 0);
402 		if (ret < 0)
403 			return ret;
404 		sha_idx += sizeof(sha_text);
405 
406 	} else if (sha_leftovers == 1) {
407 		/* Write 24 bits of text, 8 bits of M0 */
408 		intel_de_write(dev_priv, HDCP_REP_CTL,
409 			       rep_ctl | HDCP_SHA1_TEXT_24);
410 		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
411 		/* Only 24-bits of data, must be in the LSB */
412 		sha_text = (sha_text & 0xffffff00) >> 8;
413 		ret = intel_write_sha_text(dev_priv, sha_text);
414 		if (ret < 0)
415 			return ret;
416 		sha_idx += sizeof(sha_text);
417 
418 		/* Write 32 bits of M0 */
419 		intel_de_write(dev_priv, HDCP_REP_CTL,
420 			       rep_ctl | HDCP_SHA1_TEXT_0);
421 		ret = intel_write_sha_text(dev_priv, 0);
422 		if (ret < 0)
423 			return ret;
424 		sha_idx += sizeof(sha_text);
425 
426 		/* Write 24 bits of M0 */
427 		intel_de_write(dev_priv, HDCP_REP_CTL,
428 			       rep_ctl | HDCP_SHA1_TEXT_8);
429 		ret = intel_write_sha_text(dev_priv, 0);
430 		if (ret < 0)
431 			return ret;
432 		sha_idx += sizeof(sha_text);
433 
434 	} else if (sha_leftovers == 2) {
435 		/* Write 32 bits of text */
436 		intel_de_write(dev_priv, HDCP_REP_CTL,
437 			       rep_ctl | HDCP_SHA1_TEXT_32);
438 		sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
439 		ret = intel_write_sha_text(dev_priv, sha_text);
440 		if (ret < 0)
441 			return ret;
442 		sha_idx += sizeof(sha_text);
443 
444 		/* Write 64 bits of M0 */
445 		intel_de_write(dev_priv, HDCP_REP_CTL,
446 			       rep_ctl | HDCP_SHA1_TEXT_0);
447 		for (i = 0; i < 2; i++) {
448 			ret = intel_write_sha_text(dev_priv, 0);
449 			if (ret < 0)
450 				return ret;
451 			sha_idx += sizeof(sha_text);
452 		}
453 	} else if (sha_leftovers == 3) {
454 		/* Write 32 bits of text */
455 		intel_de_write(dev_priv, HDCP_REP_CTL,
456 			       rep_ctl | HDCP_SHA1_TEXT_32);
457 		sha_text |= bstatus[0] << 24;
458 		ret = intel_write_sha_text(dev_priv, sha_text);
459 		if (ret < 0)
460 			return ret;
461 		sha_idx += sizeof(sha_text);
462 
463 		/* Write 8 bits of text, 24 bits of M0 */
464 		intel_de_write(dev_priv, HDCP_REP_CTL,
465 			       rep_ctl | HDCP_SHA1_TEXT_8);
466 		ret = intel_write_sha_text(dev_priv, bstatus[1]);
467 		if (ret < 0)
468 			return ret;
469 		sha_idx += sizeof(sha_text);
470 
471 		/* Write 32 bits of M0 */
472 		intel_de_write(dev_priv, HDCP_REP_CTL,
473 			       rep_ctl | HDCP_SHA1_TEXT_0);
474 		ret = intel_write_sha_text(dev_priv, 0);
475 		if (ret < 0)
476 			return ret;
477 		sha_idx += sizeof(sha_text);
478 
479 		/* Write 8 bits of M0 */
480 		intel_de_write(dev_priv, HDCP_REP_CTL,
481 			       rep_ctl | HDCP_SHA1_TEXT_24);
482 		ret = intel_write_sha_text(dev_priv, 0);
483 		if (ret < 0)
484 			return ret;
485 		sha_idx += sizeof(sha_text);
486 	} else {
487 		drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
488 			    sha_leftovers);
489 		return -EINVAL;
490 	}
491 
492 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
493 	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
494 	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
495 		ret = intel_write_sha_text(dev_priv, 0);
496 		if (ret < 0)
497 			return ret;
498 		sha_idx += sizeof(sha_text);
499 	}
500 
501 	/*
502 	 * Last write gets the length of the concatenation in bits. That is:
503 	 *  - 5 bytes per device
504 	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
505 	 */
506 	sha_text = (num_downstream * 5 + 10) * 8;
507 	ret = intel_write_sha_text(dev_priv, sha_text);
508 	if (ret < 0)
509 		return ret;
510 
511 	/* Tell the HW we're done with the hash and wait for it to ACK */
512 	intel_de_write(dev_priv, HDCP_REP_CTL,
513 		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
514 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
515 				  HDCP_SHA1_COMPLETE, 1)) {
516 		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
517 		return -ETIMEDOUT;
518 	}
519 	if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
520 		drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
521 		return -ENXIO;
522 	}
523 
524 	return 0;
525 }
526 
527 /* Implements Part 2 of the HDCP authorization procedure */
528 static
529 int intel_hdcp_auth_downstream(struct intel_connector *connector)
530 {
531 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
532 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
533 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
534 	u8 bstatus[2], num_downstream, *ksv_fifo;
535 	int ret, i, tries = 3;
536 
537 	ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
538 	if (ret) {
539 		drm_dbg_kms(&dev_priv->drm,
540 			    "KSV list failed to become ready (%d)\n", ret);
541 		return ret;
542 	}
543 
544 	ret = shim->read_bstatus(intel_dig_port, bstatus);
545 	if (ret)
546 		return ret;
547 
548 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
549 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
550 		drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
551 		return -EPERM;
552 	}
553 
554 	/*
555 	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
556 	 * the HDCP encryption. That implies that repeater can't have its own
557 	 * display. As there is no consumption of encrypted content in the
558 	 * repeater with 0 downstream devices, we are failing the
559 	 * authentication.
560 	 */
561 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
562 	if (num_downstream == 0) {
563 		drm_dbg_kms(&dev_priv->drm,
564 			    "Repeater with zero downstream devices\n");
565 		return -EINVAL;
566 	}
567 
568 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
569 	if (!ksv_fifo) {
570 		drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
571 		return -ENOMEM;
572 	}
573 
574 	ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
575 	if (ret)
576 		goto err;
577 
578 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
579 					num_downstream)) {
580 		drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
581 		ret = -EPERM;
582 		goto err;
583 	}
584 
585 	/*
586 	 * When V prime mismatches, DP Spec mandates re-read of
587 	 * V prime atleast twice.
588 	 */
589 	for (i = 0; i < tries; i++) {
590 		ret = intel_hdcp_validate_v_prime(connector, shim,
591 						  ksv_fifo, num_downstream,
592 						  bstatus);
593 		if (!ret)
594 			break;
595 	}
596 
597 	if (i == tries) {
598 		drm_dbg_kms(&dev_priv->drm,
599 			    "V Prime validation failed.(%d)\n", ret);
600 		goto err;
601 	}
602 
603 	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
604 		    num_downstream);
605 	ret = 0;
606 err:
607 	kfree(ksv_fifo);
608 	return ret;
609 }
610 
611 /* Implements Part 1 of the HDCP authorization procedure */
612 static int intel_hdcp_auth(struct intel_connector *connector)
613 {
614 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
615 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
616 	struct intel_hdcp *hdcp = &connector->hdcp;
617 	const struct intel_hdcp_shim *shim = hdcp->shim;
618 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
619 	enum port port = intel_dig_port->base.port;
620 	unsigned long r0_prime_gen_start;
621 	int ret, i, tries = 2;
622 	union {
623 		u32 reg[2];
624 		u8 shim[DRM_HDCP_AN_LEN];
625 	} an;
626 	union {
627 		u32 reg[2];
628 		u8 shim[DRM_HDCP_KSV_LEN];
629 	} bksv;
630 	union {
631 		u32 reg;
632 		u8 shim[DRM_HDCP_RI_LEN];
633 	} ri;
634 	bool repeater_present, hdcp_capable;
635 
636 	/*
637 	 * Detects whether the display is HDCP capable. Although we check for
638 	 * valid Bksv below, the HDCP over DP spec requires that we check
639 	 * whether the display supports HDCP before we write An. For HDMI
640 	 * displays, this is not necessary.
641 	 */
642 	if (shim->hdcp_capable) {
643 		ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
644 		if (ret)
645 			return ret;
646 		if (!hdcp_capable) {
647 			drm_dbg_kms(&dev_priv->drm,
648 				    "Panel is not HDCP capable\n");
649 			return -EINVAL;
650 		}
651 	}
652 
653 	/* Initialize An with 2 random values and acquire it */
654 	for (i = 0; i < 2; i++)
655 		intel_de_write(dev_priv,
656 			       HDCP_ANINIT(dev_priv, cpu_transcoder, port),
657 			       get_random_u32());
658 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
659 		       HDCP_CONF_CAPTURE_AN);
660 
661 	/* Wait for An to be acquired */
662 	if (intel_de_wait_for_set(dev_priv,
663 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
664 				  HDCP_STATUS_AN_READY, 1)) {
665 		drm_err(&dev_priv->drm, "Timed out waiting for An\n");
666 		return -ETIMEDOUT;
667 	}
668 
669 	an.reg[0] = intel_de_read(dev_priv,
670 				  HDCP_ANLO(dev_priv, cpu_transcoder, port));
671 	an.reg[1] = intel_de_read(dev_priv,
672 				  HDCP_ANHI(dev_priv, cpu_transcoder, port));
673 	ret = shim->write_an_aksv(intel_dig_port, an.shim);
674 	if (ret)
675 		return ret;
676 
677 	r0_prime_gen_start = jiffies;
678 
679 	memset(&bksv, 0, sizeof(bksv));
680 
681 	ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
682 	if (ret < 0)
683 		return ret;
684 
685 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) {
686 		drm_err(&dev_priv->drm, "BKSV is revoked\n");
687 		return -EPERM;
688 	}
689 
690 	intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
691 		       bksv.reg[0]);
692 	intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
693 		       bksv.reg[1]);
694 
695 	ret = shim->repeater_present(intel_dig_port, &repeater_present);
696 	if (ret)
697 		return ret;
698 	if (repeater_present)
699 		intel_de_write(dev_priv, HDCP_REP_CTL,
700 			       intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
701 
702 	ret = shim->toggle_signalling(intel_dig_port, true);
703 	if (ret)
704 		return ret;
705 
706 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
707 		       HDCP_CONF_AUTH_AND_ENC);
708 
709 	/* Wait for R0 ready */
710 	if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
711 		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
712 		drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
713 		return -ETIMEDOUT;
714 	}
715 
716 	/*
717 	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
718 	 * some monitors can take longer than this. We'll set the timeout at
719 	 * 300ms just to be sure.
720 	 *
721 	 * On DP, there's an R0_READY bit available but no such bit
722 	 * exists on HDMI. Since the upper-bound is the same, we'll just do
723 	 * the stupid thing instead of polling on one and not the other.
724 	 */
725 	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
726 
727 	tries = 3;
728 
729 	/*
730 	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
731 	 * of R0 mismatch.
732 	 */
733 	for (i = 0; i < tries; i++) {
734 		ri.reg = 0;
735 		ret = shim->read_ri_prime(intel_dig_port, ri.shim);
736 		if (ret)
737 			return ret;
738 		intel_de_write(dev_priv,
739 			       HDCP_RPRIME(dev_priv, cpu_transcoder, port),
740 			       ri.reg);
741 
742 		/* Wait for Ri prime match */
743 		if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
744 			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
745 			break;
746 	}
747 
748 	if (i == tries) {
749 		drm_dbg_kms(&dev_priv->drm,
750 			    "Timed out waiting for Ri prime match (%x)\n",
751 			    intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
752 					  cpu_transcoder, port)));
753 		return -ETIMEDOUT;
754 	}
755 
756 	/* Wait for encryption confirmation */
757 	if (intel_de_wait_for_set(dev_priv,
758 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
759 				  HDCP_STATUS_ENC,
760 				  ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
761 		drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
762 		return -ETIMEDOUT;
763 	}
764 
765 	/*
766 	 * XXX: If we have MST-connected devices, we need to enable encryption
767 	 * on those as well.
768 	 */
769 
770 	if (repeater_present)
771 		return intel_hdcp_auth_downstream(connector);
772 
773 	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
774 	return 0;
775 }
776 
777 static int _intel_hdcp_disable(struct intel_connector *connector)
778 {
779 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
780 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
781 	struct intel_hdcp *hdcp = &connector->hdcp;
782 	enum port port = intel_dig_port->base.port;
783 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
784 	int ret;
785 
786 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
787 		    connector->base.name, connector->base.base.id);
788 
789 	hdcp->hdcp_encrypted = false;
790 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
791 	if (intel_de_wait_for_clear(dev_priv,
792 				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
793 				    ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
794 		drm_err(&dev_priv->drm,
795 			"Failed to disable HDCP, timeout clearing status\n");
796 		return -ETIMEDOUT;
797 	}
798 
799 	ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
800 	if (ret) {
801 		drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
802 		return ret;
803 	}
804 
805 	drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
806 	return 0;
807 }
808 
809 static int _intel_hdcp_enable(struct intel_connector *connector)
810 {
811 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
812 	struct intel_hdcp *hdcp = &connector->hdcp;
813 	int i, ret, tries = 3;
814 
815 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
816 		    connector->base.name, connector->base.base.id);
817 
818 	if (!hdcp_key_loadable(dev_priv)) {
819 		drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
820 		return -ENXIO;
821 	}
822 
823 	for (i = 0; i < KEY_LOAD_TRIES; i++) {
824 		ret = intel_hdcp_load_keys(dev_priv);
825 		if (!ret)
826 			break;
827 		intel_hdcp_clear_keys(dev_priv);
828 	}
829 	if (ret) {
830 		drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
831 			ret);
832 		return ret;
833 	}
834 
835 	/* Incase of authentication failures, HDCP spec expects reauth. */
836 	for (i = 0; i < tries; i++) {
837 		ret = intel_hdcp_auth(connector);
838 		if (!ret) {
839 			hdcp->hdcp_encrypted = true;
840 			return 0;
841 		}
842 
843 		drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
844 
845 		/* Ensuring HDCP encryption and signalling are stopped. */
846 		_intel_hdcp_disable(connector);
847 	}
848 
849 	drm_dbg_kms(&dev_priv->drm,
850 		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
851 	return ret;
852 }
853 
854 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
855 {
856 	return container_of(hdcp, struct intel_connector, hdcp);
857 }
858 
859 /* Implements Part 3 of the HDCP authorization procedure */
860 static int intel_hdcp_check_link(struct intel_connector *connector)
861 {
862 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
863 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
864 	struct intel_hdcp *hdcp = &connector->hdcp;
865 	enum port port = intel_dig_port->base.port;
866 	enum transcoder cpu_transcoder;
867 	int ret = 0;
868 
869 	mutex_lock(&hdcp->mutex);
870 	cpu_transcoder = hdcp->cpu_transcoder;
871 
872 	/* Check_link valid only when HDCP1.4 is enabled */
873 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
874 	    !hdcp->hdcp_encrypted) {
875 		ret = -EINVAL;
876 		goto out;
877 	}
878 
879 	if (drm_WARN_ON(&dev_priv->drm,
880 			!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
881 		drm_err(&dev_priv->drm,
882 			"%s:%d HDCP link stopped encryption,%x\n",
883 			connector->base.name, connector->base.base.id,
884 			intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
885 		ret = -ENXIO;
886 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
887 		schedule_work(&hdcp->prop_work);
888 		goto out;
889 	}
890 
891 	if (hdcp->shim->check_link(intel_dig_port)) {
892 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
893 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
894 			schedule_work(&hdcp->prop_work);
895 		}
896 		goto out;
897 	}
898 
899 	drm_dbg_kms(&dev_priv->drm,
900 		    "[%s:%d] HDCP link failed, retrying authentication\n",
901 		    connector->base.name, connector->base.base.id);
902 
903 	ret = _intel_hdcp_disable(connector);
904 	if (ret) {
905 		drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
906 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
907 		schedule_work(&hdcp->prop_work);
908 		goto out;
909 	}
910 
911 	ret = _intel_hdcp_enable(connector);
912 	if (ret) {
913 		drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
914 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
915 		schedule_work(&hdcp->prop_work);
916 		goto out;
917 	}
918 
919 out:
920 	mutex_unlock(&hdcp->mutex);
921 	return ret;
922 }
923 
924 static void intel_hdcp_prop_work(struct work_struct *work)
925 {
926 	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
927 					       prop_work);
928 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
929 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
930 
931 	drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
932 	mutex_lock(&hdcp->mutex);
933 
934 	/*
935 	 * This worker is only used to flip between ENABLED/DESIRED. Either of
936 	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
937 	 * we're running just after hdcp has been disabled, so just exit
938 	 */
939 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
940 		drm_hdcp_update_content_protection(&connector->base,
941 						   hdcp->value);
942 
943 	mutex_unlock(&hdcp->mutex);
944 	drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
945 }
946 
947 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
948 {
949 	return INTEL_INFO(dev_priv)->display.has_hdcp &&
950 			(INTEL_GEN(dev_priv) >= 12 || port < PORT_E);
951 }
952 
953 static int
954 hdcp2_prepare_ake_init(struct intel_connector *connector,
955 		       struct hdcp2_ake_init *ake_data)
956 {
957 	struct hdcp_port_data *data = &connector->hdcp.port_data;
958 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
959 	struct i915_hdcp_comp_master *comp;
960 	int ret;
961 
962 	mutex_lock(&dev_priv->hdcp_comp_mutex);
963 	comp = dev_priv->hdcp_master;
964 
965 	if (!comp || !comp->ops) {
966 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
967 		return -EINVAL;
968 	}
969 
970 	ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
971 	if (ret)
972 		drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
973 			    ret);
974 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
975 
976 	return ret;
977 }
978 
979 static int
980 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
981 				struct hdcp2_ake_send_cert *rx_cert,
982 				bool *paired,
983 				struct hdcp2_ake_no_stored_km *ek_pub_km,
984 				size_t *msg_sz)
985 {
986 	struct hdcp_port_data *data = &connector->hdcp.port_data;
987 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
988 	struct i915_hdcp_comp_master *comp;
989 	int ret;
990 
991 	mutex_lock(&dev_priv->hdcp_comp_mutex);
992 	comp = dev_priv->hdcp_master;
993 
994 	if (!comp || !comp->ops) {
995 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
996 		return -EINVAL;
997 	}
998 
999 	ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1000 							 rx_cert, paired,
1001 							 ek_pub_km, msg_sz);
1002 	if (ret < 0)
1003 		drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1004 			    ret);
1005 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1006 
1007 	return ret;
1008 }
1009 
1010 static int hdcp2_verify_hprime(struct intel_connector *connector,
1011 			       struct hdcp2_ake_send_hprime *rx_hprime)
1012 {
1013 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1014 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1015 	struct i915_hdcp_comp_master *comp;
1016 	int ret;
1017 
1018 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1019 	comp = dev_priv->hdcp_master;
1020 
1021 	if (!comp || !comp->ops) {
1022 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1023 		return -EINVAL;
1024 	}
1025 
1026 	ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1027 	if (ret < 0)
1028 		drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1029 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1030 
1031 	return ret;
1032 }
1033 
1034 static int
1035 hdcp2_store_pairing_info(struct intel_connector *connector,
1036 			 struct hdcp2_ake_send_pairing_info *pairing_info)
1037 {
1038 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1039 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1040 	struct i915_hdcp_comp_master *comp;
1041 	int ret;
1042 
1043 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1044 	comp = dev_priv->hdcp_master;
1045 
1046 	if (!comp || !comp->ops) {
1047 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1048 		return -EINVAL;
1049 	}
1050 
1051 	ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1052 	if (ret < 0)
1053 		drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1054 			    ret);
1055 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1056 
1057 	return ret;
1058 }
1059 
1060 static int
1061 hdcp2_prepare_lc_init(struct intel_connector *connector,
1062 		      struct hdcp2_lc_init *lc_init)
1063 {
1064 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1065 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1066 	struct i915_hdcp_comp_master *comp;
1067 	int ret;
1068 
1069 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1070 	comp = dev_priv->hdcp_master;
1071 
1072 	if (!comp || !comp->ops) {
1073 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1074 		return -EINVAL;
1075 	}
1076 
1077 	ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1078 	if (ret < 0)
1079 		drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1080 			    ret);
1081 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1082 
1083 	return ret;
1084 }
1085 
1086 static int
1087 hdcp2_verify_lprime(struct intel_connector *connector,
1088 		    struct hdcp2_lc_send_lprime *rx_lprime)
1089 {
1090 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1091 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1092 	struct i915_hdcp_comp_master *comp;
1093 	int ret;
1094 
1095 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1096 	comp = dev_priv->hdcp_master;
1097 
1098 	if (!comp || !comp->ops) {
1099 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1100 		return -EINVAL;
1101 	}
1102 
1103 	ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1104 	if (ret < 0)
1105 		drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1106 			    ret);
1107 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1108 
1109 	return ret;
1110 }
1111 
1112 static int hdcp2_prepare_skey(struct intel_connector *connector,
1113 			      struct hdcp2_ske_send_eks *ske_data)
1114 {
1115 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1116 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1117 	struct i915_hdcp_comp_master *comp;
1118 	int ret;
1119 
1120 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1121 	comp = dev_priv->hdcp_master;
1122 
1123 	if (!comp || !comp->ops) {
1124 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1125 		return -EINVAL;
1126 	}
1127 
1128 	ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1129 	if (ret < 0)
1130 		drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1131 			    ret);
1132 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1133 
1134 	return ret;
1135 }
1136 
1137 static int
1138 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1139 				      struct hdcp2_rep_send_receiverid_list
1140 								*rep_topology,
1141 				      struct hdcp2_rep_send_ack *rep_send_ack)
1142 {
1143 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1144 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1145 	struct i915_hdcp_comp_master *comp;
1146 	int ret;
1147 
1148 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1149 	comp = dev_priv->hdcp_master;
1150 
1151 	if (!comp || !comp->ops) {
1152 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1153 		return -EINVAL;
1154 	}
1155 
1156 	ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1157 							 rep_topology,
1158 							 rep_send_ack);
1159 	if (ret < 0)
1160 		drm_dbg_kms(&dev_priv->drm,
1161 			    "Verify rep topology failed. %d\n", ret);
1162 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1163 
1164 	return ret;
1165 }
1166 
1167 static int
1168 hdcp2_verify_mprime(struct intel_connector *connector,
1169 		    struct hdcp2_rep_stream_ready *stream_ready)
1170 {
1171 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1172 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1173 	struct i915_hdcp_comp_master *comp;
1174 	int ret;
1175 
1176 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1177 	comp = dev_priv->hdcp_master;
1178 
1179 	if (!comp || !comp->ops) {
1180 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1181 		return -EINVAL;
1182 	}
1183 
1184 	ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1185 	if (ret < 0)
1186 		drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1187 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1188 
1189 	return ret;
1190 }
1191 
1192 static int hdcp2_authenticate_port(struct intel_connector *connector)
1193 {
1194 	struct hdcp_port_data *data = &connector->hdcp.port_data;
1195 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1196 	struct i915_hdcp_comp_master *comp;
1197 	int ret;
1198 
1199 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1200 	comp = dev_priv->hdcp_master;
1201 
1202 	if (!comp || !comp->ops) {
1203 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1204 		return -EINVAL;
1205 	}
1206 
1207 	ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1208 	if (ret < 0)
1209 		drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1210 			    ret);
1211 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1212 
1213 	return ret;
1214 }
1215 
1216 static int hdcp2_close_mei_session(struct intel_connector *connector)
1217 {
1218 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1219 	struct i915_hdcp_comp_master *comp;
1220 	int ret;
1221 
1222 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1223 	comp = dev_priv->hdcp_master;
1224 
1225 	if (!comp || !comp->ops) {
1226 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1227 		return -EINVAL;
1228 	}
1229 
1230 	ret = comp->ops->close_hdcp_session(comp->mei_dev,
1231 					     &connector->hdcp.port_data);
1232 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1233 
1234 	return ret;
1235 }
1236 
1237 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1238 {
1239 	return hdcp2_close_mei_session(connector);
1240 }
1241 
1242 /* Authentication flow starts from here */
1243 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1244 {
1245 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1246 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1247 	struct intel_hdcp *hdcp = &connector->hdcp;
1248 	union {
1249 		struct hdcp2_ake_init ake_init;
1250 		struct hdcp2_ake_send_cert send_cert;
1251 		struct hdcp2_ake_no_stored_km no_stored_km;
1252 		struct hdcp2_ake_send_hprime send_hprime;
1253 		struct hdcp2_ake_send_pairing_info pairing_info;
1254 	} msgs;
1255 	const struct intel_hdcp_shim *shim = hdcp->shim;
1256 	size_t size;
1257 	int ret;
1258 
1259 	/* Init for seq_num */
1260 	hdcp->seq_num_v = 0;
1261 	hdcp->seq_num_m = 0;
1262 
1263 	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1264 	if (ret < 0)
1265 		return ret;
1266 
1267 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
1268 				  sizeof(msgs.ake_init));
1269 	if (ret < 0)
1270 		return ret;
1271 
1272 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
1273 				 &msgs.send_cert, sizeof(msgs.send_cert));
1274 	if (ret < 0)
1275 		return ret;
1276 
1277 	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1278 		drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1279 		return -EINVAL;
1280 	}
1281 
1282 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1283 
1284 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1285 					msgs.send_cert.cert_rx.receiver_id,
1286 					1)) {
1287 		drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1288 		return -EPERM;
1289 	}
1290 
1291 	/*
1292 	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1293 	 * stored also.
1294 	 */
1295 	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1296 					      &hdcp->is_paired,
1297 					      &msgs.no_stored_km, &size);
1298 	if (ret < 0)
1299 		return ret;
1300 
1301 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
1302 	if (ret < 0)
1303 		return ret;
1304 
1305 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1306 				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1307 	if (ret < 0)
1308 		return ret;
1309 
1310 	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1311 	if (ret < 0)
1312 		return ret;
1313 
1314 	if (!hdcp->is_paired) {
1315 		/* Pairing is required */
1316 		ret = shim->read_2_2_msg(intel_dig_port,
1317 					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1318 					 &msgs.pairing_info,
1319 					 sizeof(msgs.pairing_info));
1320 		if (ret < 0)
1321 			return ret;
1322 
1323 		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1324 		if (ret < 0)
1325 			return ret;
1326 		hdcp->is_paired = true;
1327 	}
1328 
1329 	return 0;
1330 }
1331 
1332 static int hdcp2_locality_check(struct intel_connector *connector)
1333 {
1334 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1335 	struct intel_hdcp *hdcp = &connector->hdcp;
1336 	union {
1337 		struct hdcp2_lc_init lc_init;
1338 		struct hdcp2_lc_send_lprime send_lprime;
1339 	} msgs;
1340 	const struct intel_hdcp_shim *shim = hdcp->shim;
1341 	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1342 
1343 	for (i = 0; i < tries; i++) {
1344 		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1345 		if (ret < 0)
1346 			continue;
1347 
1348 		ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
1349 				      sizeof(msgs.lc_init));
1350 		if (ret < 0)
1351 			continue;
1352 
1353 		ret = shim->read_2_2_msg(intel_dig_port,
1354 					 HDCP_2_2_LC_SEND_LPRIME,
1355 					 &msgs.send_lprime,
1356 					 sizeof(msgs.send_lprime));
1357 		if (ret < 0)
1358 			continue;
1359 
1360 		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1361 		if (!ret)
1362 			break;
1363 	}
1364 
1365 	return ret;
1366 }
1367 
1368 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1369 {
1370 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1371 	struct intel_hdcp *hdcp = &connector->hdcp;
1372 	struct hdcp2_ske_send_eks send_eks;
1373 	int ret;
1374 
1375 	ret = hdcp2_prepare_skey(connector, &send_eks);
1376 	if (ret < 0)
1377 		return ret;
1378 
1379 	ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
1380 					sizeof(send_eks));
1381 	if (ret < 0)
1382 		return ret;
1383 
1384 	return 0;
1385 }
1386 
1387 static
1388 int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1389 {
1390 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1391 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1392 	struct intel_hdcp *hdcp = &connector->hdcp;
1393 	union {
1394 		struct hdcp2_rep_stream_manage stream_manage;
1395 		struct hdcp2_rep_stream_ready stream_ready;
1396 	} msgs;
1397 	const struct intel_hdcp_shim *shim = hdcp->shim;
1398 	int ret;
1399 
1400 	/* Prepare RepeaterAuth_Stream_Manage msg */
1401 	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1402 	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1403 
1404 	/* K no of streams is fixed as 1. Stored as big-endian. */
1405 	msgs.stream_manage.k = cpu_to_be16(1);
1406 
1407 	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
1408 	msgs.stream_manage.streams[0].stream_id = 0;
1409 	msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
1410 
1411 	/* Send it to Repeater */
1412 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
1413 				  sizeof(msgs.stream_manage));
1414 	if (ret < 0)
1415 		return ret;
1416 
1417 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
1418 				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1419 	if (ret < 0)
1420 		return ret;
1421 
1422 	hdcp->port_data.seq_num_m = hdcp->seq_num_m;
1423 	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1424 
1425 	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1426 	if (ret < 0)
1427 		return ret;
1428 
1429 	hdcp->seq_num_m++;
1430 
1431 	if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1432 		drm_dbg_kms(&i915->drm, "seq_num_m roll over.\n");
1433 		return -1;
1434 	}
1435 
1436 	return 0;
1437 }
1438 
1439 static
1440 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1441 {
1442 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1443 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1444 	struct intel_hdcp *hdcp = &connector->hdcp;
1445 	union {
1446 		struct hdcp2_rep_send_receiverid_list recvid_list;
1447 		struct hdcp2_rep_send_ack rep_ack;
1448 	} msgs;
1449 	const struct intel_hdcp_shim *shim = hdcp->shim;
1450 	u32 seq_num_v, device_cnt;
1451 	u8 *rx_info;
1452 	int ret;
1453 
1454 	ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1455 				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1456 	if (ret < 0)
1457 		return ret;
1458 
1459 	rx_info = msgs.recvid_list.rx_info;
1460 
1461 	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1462 	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1463 		drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
1464 		return -EINVAL;
1465 	}
1466 
1467 	/* Converting and Storing the seq_num_v to local variable as DWORD */
1468 	seq_num_v =
1469 		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1470 
1471 	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1472 		drm_dbg_kms(&dev_priv->drm,
1473 			    "Non zero Seq_num_v at first RecvId_List msg\n");
1474 		return -EINVAL;
1475 	}
1476 
1477 	if (seq_num_v < hdcp->seq_num_v) {
1478 		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1479 		drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1480 		return -EINVAL;
1481 	}
1482 
1483 	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1484 		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1485 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1486 					msgs.recvid_list.receiver_ids,
1487 					device_cnt)) {
1488 		drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1489 		return -EPERM;
1490 	}
1491 
1492 	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1493 						    &msgs.recvid_list,
1494 						    &msgs.rep_ack);
1495 	if (ret < 0)
1496 		return ret;
1497 
1498 	hdcp->seq_num_v = seq_num_v;
1499 	ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
1500 				  sizeof(msgs.rep_ack));
1501 	if (ret < 0)
1502 		return ret;
1503 
1504 	return 0;
1505 }
1506 
1507 static int hdcp2_authenticate_repeater(struct intel_connector *connector)
1508 {
1509 	int ret;
1510 
1511 	ret = hdcp2_authenticate_repeater_topology(connector);
1512 	if (ret < 0)
1513 		return ret;
1514 
1515 	return hdcp2_propagate_stream_management_info(connector);
1516 }
1517 
1518 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1519 {
1520 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1521 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1522 	struct intel_hdcp *hdcp = &connector->hdcp;
1523 	const struct intel_hdcp_shim *shim = hdcp->shim;
1524 	int ret;
1525 
1526 	ret = hdcp2_authentication_key_exchange(connector);
1527 	if (ret < 0) {
1528 		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1529 		return ret;
1530 	}
1531 
1532 	ret = hdcp2_locality_check(connector);
1533 	if (ret < 0) {
1534 		drm_dbg_kms(&i915->drm,
1535 			    "Locality Check failed. Err : %d\n", ret);
1536 		return ret;
1537 	}
1538 
1539 	ret = hdcp2_session_key_exchange(connector);
1540 	if (ret < 0) {
1541 		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1542 		return ret;
1543 	}
1544 
1545 	if (shim->config_stream_type) {
1546 		ret = shim->config_stream_type(intel_dig_port,
1547 					       hdcp->is_repeater,
1548 					       hdcp->content_type);
1549 		if (ret < 0)
1550 			return ret;
1551 	}
1552 
1553 	if (hdcp->is_repeater) {
1554 		ret = hdcp2_authenticate_repeater(connector);
1555 		if (ret < 0) {
1556 			drm_dbg_kms(&i915->drm,
1557 				    "Repeater Auth Failed. Err: %d\n", ret);
1558 			return ret;
1559 		}
1560 	}
1561 
1562 	hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1563 	ret = hdcp2_authenticate_port(connector);
1564 	if (ret < 0)
1565 		return ret;
1566 
1567 	return ret;
1568 }
1569 
1570 static int hdcp2_enable_encryption(struct intel_connector *connector)
1571 {
1572 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1573 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1574 	struct intel_hdcp *hdcp = &connector->hdcp;
1575 	enum port port = intel_dig_port->base.port;
1576 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1577 	int ret;
1578 
1579 	drm_WARN_ON(&dev_priv->drm,
1580 		    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1581 		    LINK_ENCRYPTION_STATUS);
1582 	if (hdcp->shim->toggle_signalling) {
1583 		ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
1584 		if (ret) {
1585 			drm_err(&dev_priv->drm,
1586 				"Failed to enable HDCP signalling. %d\n",
1587 				ret);
1588 			return ret;
1589 		}
1590 	}
1591 
1592 	if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1593 	    LINK_AUTH_STATUS) {
1594 		/* Link is Authenticated. Now set for Encryption */
1595 		intel_de_write(dev_priv,
1596 			       HDCP2_CTL(dev_priv, cpu_transcoder, port),
1597 			       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
1598 	}
1599 
1600 	ret = intel_de_wait_for_set(dev_priv,
1601 				    HDCP2_STATUS(dev_priv, cpu_transcoder,
1602 						 port),
1603 				    LINK_ENCRYPTION_STATUS,
1604 				    ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1605 
1606 	return ret;
1607 }
1608 
1609 static int hdcp2_disable_encryption(struct intel_connector *connector)
1610 {
1611 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1612 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1613 	struct intel_hdcp *hdcp = &connector->hdcp;
1614 	enum port port = intel_dig_port->base.port;
1615 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1616 	int ret;
1617 
1618 	drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1619 				      LINK_ENCRYPTION_STATUS));
1620 
1621 	intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1622 		       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
1623 
1624 	ret = intel_de_wait_for_clear(dev_priv,
1625 				      HDCP2_STATUS(dev_priv, cpu_transcoder,
1626 						   port),
1627 				      LINK_ENCRYPTION_STATUS,
1628 				      ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1629 	if (ret == -ETIMEDOUT)
1630 		drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1631 
1632 	if (hdcp->shim->toggle_signalling) {
1633 		ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
1634 		if (ret) {
1635 			drm_err(&dev_priv->drm,
1636 				"Failed to disable HDCP signalling. %d\n",
1637 				ret);
1638 			return ret;
1639 		}
1640 	}
1641 
1642 	return ret;
1643 }
1644 
1645 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1646 {
1647 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1648 	int ret, i, tries = 3;
1649 
1650 	for (i = 0; i < tries; i++) {
1651 		ret = hdcp2_authenticate_sink(connector);
1652 		if (!ret)
1653 			break;
1654 
1655 		/* Clearing the mei hdcp session */
1656 		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1657 			    i + 1, tries, ret);
1658 		if (hdcp2_deauthenticate_port(connector) < 0)
1659 			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1660 	}
1661 
1662 	if (i != tries) {
1663 		/*
1664 		 * Ensuring the required 200mSec min time interval between
1665 		 * Session Key Exchange and encryption.
1666 		 */
1667 		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1668 		ret = hdcp2_enable_encryption(connector);
1669 		if (ret < 0) {
1670 			drm_dbg_kms(&i915->drm,
1671 				    "Encryption Enable Failed.(%d)\n", ret);
1672 			if (hdcp2_deauthenticate_port(connector) < 0)
1673 				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1674 		}
1675 	}
1676 
1677 	return ret;
1678 }
1679 
1680 static int _intel_hdcp2_enable(struct intel_connector *connector)
1681 {
1682 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1683 	struct intel_hdcp *hdcp = &connector->hdcp;
1684 	int ret;
1685 
1686 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1687 		    connector->base.name, connector->base.base.id,
1688 		    hdcp->content_type);
1689 
1690 	ret = hdcp2_authenticate_and_encrypt(connector);
1691 	if (ret) {
1692 		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1693 			    hdcp->content_type, ret);
1694 		return ret;
1695 	}
1696 
1697 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1698 		    connector->base.name, connector->base.base.id,
1699 		    hdcp->content_type);
1700 
1701 	hdcp->hdcp2_encrypted = true;
1702 	return 0;
1703 }
1704 
1705 static int _intel_hdcp2_disable(struct intel_connector *connector)
1706 {
1707 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1708 	int ret;
1709 
1710 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1711 		    connector->base.name, connector->base.base.id);
1712 
1713 	ret = hdcp2_disable_encryption(connector);
1714 
1715 	if (hdcp2_deauthenticate_port(connector) < 0)
1716 		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1717 
1718 	connector->hdcp.hdcp2_encrypted = false;
1719 
1720 	return ret;
1721 }
1722 
1723 /* Implements the Link Integrity Check for HDCP2.2 */
1724 static int intel_hdcp2_check_link(struct intel_connector *connector)
1725 {
1726 	struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
1727 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1728 	struct intel_hdcp *hdcp = &connector->hdcp;
1729 	enum port port = intel_dig_port->base.port;
1730 	enum transcoder cpu_transcoder;
1731 	int ret = 0;
1732 
1733 	mutex_lock(&hdcp->mutex);
1734 	cpu_transcoder = hdcp->cpu_transcoder;
1735 
1736 	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1737 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1738 	    !hdcp->hdcp2_encrypted) {
1739 		ret = -EINVAL;
1740 		goto out;
1741 	}
1742 
1743 	if (drm_WARN_ON(&dev_priv->drm,
1744 			!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
1745 		drm_err(&dev_priv->drm,
1746 			"HDCP2.2 link stopped the encryption, %x\n",
1747 			intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
1748 		ret = -ENXIO;
1749 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1750 		schedule_work(&hdcp->prop_work);
1751 		goto out;
1752 	}
1753 
1754 	ret = hdcp->shim->check_2_2_link(intel_dig_port);
1755 	if (ret == HDCP_LINK_PROTECTED) {
1756 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1757 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1758 			schedule_work(&hdcp->prop_work);
1759 		}
1760 		goto out;
1761 	}
1762 
1763 	if (ret == HDCP_TOPOLOGY_CHANGE) {
1764 		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1765 			goto out;
1766 
1767 		drm_dbg_kms(&dev_priv->drm,
1768 			    "HDCP2.2 Downstream topology change\n");
1769 		ret = hdcp2_authenticate_repeater_topology(connector);
1770 		if (!ret) {
1771 			hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1772 			schedule_work(&hdcp->prop_work);
1773 			goto out;
1774 		}
1775 		drm_dbg_kms(&dev_priv->drm,
1776 			    "[%s:%d] Repeater topology auth failed.(%d)\n",
1777 			    connector->base.name, connector->base.base.id,
1778 			    ret);
1779 	} else {
1780 		drm_dbg_kms(&dev_priv->drm,
1781 			    "[%s:%d] HDCP2.2 link failed, retrying auth\n",
1782 			    connector->base.name, connector->base.base.id);
1783 	}
1784 
1785 	ret = _intel_hdcp2_disable(connector);
1786 	if (ret) {
1787 		drm_err(&dev_priv->drm,
1788 			"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
1789 			connector->base.name, connector->base.base.id, ret);
1790 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1791 		schedule_work(&hdcp->prop_work);
1792 		goto out;
1793 	}
1794 
1795 	ret = _intel_hdcp2_enable(connector);
1796 	if (ret) {
1797 		drm_dbg_kms(&dev_priv->drm,
1798 			    "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
1799 			    connector->base.name, connector->base.base.id,
1800 			    ret);
1801 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1802 		schedule_work(&hdcp->prop_work);
1803 		goto out;
1804 	}
1805 
1806 out:
1807 	mutex_unlock(&hdcp->mutex);
1808 	return ret;
1809 }
1810 
1811 static void intel_hdcp_check_work(struct work_struct *work)
1812 {
1813 	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
1814 					       struct intel_hdcp,
1815 					       check_work);
1816 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1817 
1818 	if (!intel_hdcp2_check_link(connector))
1819 		schedule_delayed_work(&hdcp->check_work,
1820 				      DRM_HDCP2_CHECK_PERIOD_MS);
1821 	else if (!intel_hdcp_check_link(connector))
1822 		schedule_delayed_work(&hdcp->check_work,
1823 				      DRM_HDCP_CHECK_PERIOD_MS);
1824 }
1825 
1826 static int i915_hdcp_component_bind(struct device *i915_kdev,
1827 				    struct device *mei_kdev, void *data)
1828 {
1829 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1830 
1831 	drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
1832 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1833 	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
1834 	dev_priv->hdcp_master->mei_dev = mei_kdev;
1835 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1836 
1837 	return 0;
1838 }
1839 
1840 static void i915_hdcp_component_unbind(struct device *i915_kdev,
1841 				       struct device *mei_kdev, void *data)
1842 {
1843 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1844 
1845 	drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
1846 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1847 	dev_priv->hdcp_master = NULL;
1848 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1849 }
1850 
1851 static const struct component_ops i915_hdcp_component_ops = {
1852 	.bind   = i915_hdcp_component_bind,
1853 	.unbind = i915_hdcp_component_unbind,
1854 };
1855 
1856 static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
1857 {
1858 	switch (port) {
1859 	case PORT_A:
1860 		return MEI_DDI_A;
1861 	case PORT_B ... PORT_F:
1862 		return (enum mei_fw_ddi)port;
1863 	default:
1864 		return MEI_DDI_INVALID_PORT;
1865 	}
1866 }
1867 
1868 static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
1869 {
1870 	switch (cpu_transcoder) {
1871 	case TRANSCODER_A ... TRANSCODER_D:
1872 		return (enum mei_fw_tc)(cpu_transcoder | 0x10);
1873 	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
1874 		return MEI_INVALID_TRANSCODER;
1875 	}
1876 }
1877 
1878 static int initialize_hdcp_port_data(struct intel_connector *connector,
1879 				     const struct intel_hdcp_shim *shim)
1880 {
1881 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1882 	struct intel_hdcp *hdcp = &connector->hdcp;
1883 	struct hdcp_port_data *data = &hdcp->port_data;
1884 
1885 	if (INTEL_GEN(dev_priv) < 12)
1886 		data->fw_ddi =
1887 			intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port);
1888 	else
1889 		/*
1890 		 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
1891 		 * with zero(INVALID PORT index).
1892 		 */
1893 		data->fw_ddi = MEI_DDI_INVALID_PORT;
1894 
1895 	/*
1896 	 * As associated transcoder is set and modified at modeset, here fw_tc
1897 	 * is initialized to zero (invalid transcoder index). This will be
1898 	 * retained for <Gen12 forever.
1899 	 */
1900 	data->fw_tc = MEI_INVALID_TRANSCODER;
1901 
1902 	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
1903 	data->protocol = (u8)shim->protocol;
1904 
1905 	data->k = 1;
1906 	if (!data->streams)
1907 		data->streams = kcalloc(data->k,
1908 					sizeof(struct hdcp2_streamid_type),
1909 					GFP_KERNEL);
1910 	if (!data->streams) {
1911 		drm_err(&dev_priv->drm, "Out of Memory\n");
1912 		return -ENOMEM;
1913 	}
1914 
1915 	data->streams[0].stream_id = 0;
1916 	data->streams[0].stream_type = hdcp->content_type;
1917 
1918 	return 0;
1919 }
1920 
1921 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
1922 {
1923 	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
1924 		return false;
1925 
1926 	return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
1927 		IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv));
1928 }
1929 
1930 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
1931 {
1932 	int ret;
1933 
1934 	if (!is_hdcp2_supported(dev_priv))
1935 		return;
1936 
1937 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1938 	drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
1939 
1940 	dev_priv->hdcp_comp_added = true;
1941 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1942 	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
1943 				  I915_COMPONENT_HDCP);
1944 	if (ret < 0) {
1945 		drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
1946 			    ret);
1947 		mutex_lock(&dev_priv->hdcp_comp_mutex);
1948 		dev_priv->hdcp_comp_added = false;
1949 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1950 		return;
1951 	}
1952 }
1953 
1954 static void intel_hdcp2_init(struct intel_connector *connector,
1955 			     const struct intel_hdcp_shim *shim)
1956 {
1957 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1958 	struct intel_hdcp *hdcp = &connector->hdcp;
1959 	int ret;
1960 
1961 	ret = initialize_hdcp_port_data(connector, shim);
1962 	if (ret) {
1963 		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
1964 		return;
1965 	}
1966 
1967 	hdcp->hdcp2_supported = true;
1968 }
1969 
1970 int intel_hdcp_init(struct intel_connector *connector,
1971 		    const struct intel_hdcp_shim *shim)
1972 {
1973 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1974 	struct intel_hdcp *hdcp = &connector->hdcp;
1975 	int ret;
1976 
1977 	if (!shim)
1978 		return -EINVAL;
1979 
1980 	if (is_hdcp2_supported(dev_priv))
1981 		intel_hdcp2_init(connector, shim);
1982 
1983 	ret =
1984 	drm_connector_attach_content_protection_property(&connector->base,
1985 							 hdcp->hdcp2_supported);
1986 	if (ret) {
1987 		hdcp->hdcp2_supported = false;
1988 		kfree(hdcp->port_data.streams);
1989 		return ret;
1990 	}
1991 
1992 	hdcp->shim = shim;
1993 	mutex_init(&hdcp->mutex);
1994 	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
1995 	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
1996 	init_waitqueue_head(&hdcp->cp_irq_queue);
1997 
1998 	return 0;
1999 }
2000 
2001 int intel_hdcp_enable(struct intel_connector *connector,
2002 		      enum transcoder cpu_transcoder, u8 content_type)
2003 {
2004 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2005 	struct intel_hdcp *hdcp = &connector->hdcp;
2006 	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2007 	int ret = -EINVAL;
2008 
2009 	if (!hdcp->shim)
2010 		return -ENOENT;
2011 
2012 	mutex_lock(&hdcp->mutex);
2013 	drm_WARN_ON(&dev_priv->drm,
2014 		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2015 	hdcp->content_type = content_type;
2016 
2017 	if (INTEL_GEN(dev_priv) >= 12) {
2018 		hdcp->cpu_transcoder = cpu_transcoder;
2019 		hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
2020 	}
2021 
2022 	/*
2023 	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2024 	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2025 	 */
2026 	if (intel_hdcp2_capable(connector)) {
2027 		ret = _intel_hdcp2_enable(connector);
2028 		if (!ret)
2029 			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2030 	}
2031 
2032 	/*
2033 	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2034 	 * be attempted.
2035 	 */
2036 	if (ret && intel_hdcp_capable(connector) &&
2037 	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2038 		ret = _intel_hdcp_enable(connector);
2039 	}
2040 
2041 	if (!ret) {
2042 		schedule_delayed_work(&hdcp->check_work, check_link_interval);
2043 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
2044 		schedule_work(&hdcp->prop_work);
2045 	}
2046 
2047 	mutex_unlock(&hdcp->mutex);
2048 	return ret;
2049 }
2050 
2051 int intel_hdcp_disable(struct intel_connector *connector)
2052 {
2053 	struct intel_hdcp *hdcp = &connector->hdcp;
2054 	int ret = 0;
2055 
2056 	if (!hdcp->shim)
2057 		return -ENOENT;
2058 
2059 	mutex_lock(&hdcp->mutex);
2060 
2061 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2062 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
2063 		if (hdcp->hdcp2_encrypted)
2064 			ret = _intel_hdcp2_disable(connector);
2065 		else if (hdcp->hdcp_encrypted)
2066 			ret = _intel_hdcp_disable(connector);
2067 	}
2068 
2069 	mutex_unlock(&hdcp->mutex);
2070 	cancel_delayed_work_sync(&hdcp->check_work);
2071 	return ret;
2072 }
2073 
2074 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2075 			    struct intel_encoder *encoder,
2076 			    const struct intel_crtc_state *crtc_state,
2077 			    const struct drm_connector_state *conn_state)
2078 {
2079 	struct intel_connector *connector =
2080 				to_intel_connector(conn_state->connector);
2081 	struct intel_hdcp *hdcp = &connector->hdcp;
2082 	bool content_protection_type_changed =
2083 		(conn_state->hdcp_content_type != hdcp->content_type &&
2084 		 conn_state->content_protection !=
2085 		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2086 
2087 	/*
2088 	 * During the HDCP encryption session if Type change is requested,
2089 	 * disable the HDCP and reenable it with new TYPE value.
2090 	 */
2091 	if (conn_state->content_protection ==
2092 	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2093 	    content_protection_type_changed)
2094 		intel_hdcp_disable(connector);
2095 
2096 	/*
2097 	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2098 	 * change procedure.
2099 	 */
2100 	if (content_protection_type_changed) {
2101 		mutex_lock(&hdcp->mutex);
2102 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2103 		schedule_work(&hdcp->prop_work);
2104 		mutex_unlock(&hdcp->mutex);
2105 	}
2106 
2107 	if (conn_state->content_protection ==
2108 	    DRM_MODE_CONTENT_PROTECTION_DESIRED ||
2109 	    content_protection_type_changed)
2110 		intel_hdcp_enable(connector,
2111 				  crtc_state->cpu_transcoder,
2112 				  (u8)conn_state->hdcp_content_type);
2113 }
2114 
2115 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2116 {
2117 	mutex_lock(&dev_priv->hdcp_comp_mutex);
2118 	if (!dev_priv->hdcp_comp_added) {
2119 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
2120 		return;
2121 	}
2122 
2123 	dev_priv->hdcp_comp_added = false;
2124 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2125 
2126 	component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2127 }
2128 
2129 void intel_hdcp_cleanup(struct intel_connector *connector)
2130 {
2131 	if (!connector->hdcp.shim)
2132 		return;
2133 
2134 	mutex_lock(&connector->hdcp.mutex);
2135 	kfree(connector->hdcp.port_data.streams);
2136 	mutex_unlock(&connector->hdcp.mutex);
2137 }
2138 
2139 void intel_hdcp_atomic_check(struct drm_connector *connector,
2140 			     struct drm_connector_state *old_state,
2141 			     struct drm_connector_state *new_state)
2142 {
2143 	u64 old_cp = old_state->content_protection;
2144 	u64 new_cp = new_state->content_protection;
2145 	struct drm_crtc_state *crtc_state;
2146 
2147 	if (!new_state->crtc) {
2148 		/*
2149 		 * If the connector is being disabled with CP enabled, mark it
2150 		 * desired so it's re-enabled when the connector is brought back
2151 		 */
2152 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2153 			new_state->content_protection =
2154 				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2155 		return;
2156 	}
2157 
2158 	/*
2159 	 * Nothing to do if the state didn't change, or HDCP was activated since
2160 	 * the last commit. And also no change in hdcp content type.
2161 	 */
2162 	if (old_cp == new_cp ||
2163 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2164 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2165 		if (old_state->hdcp_content_type ==
2166 				new_state->hdcp_content_type)
2167 			return;
2168 	}
2169 
2170 	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2171 						   new_state->crtc);
2172 	crtc_state->mode_changed = true;
2173 }
2174 
2175 /* Handles the CP_IRQ raised from the DP HDCP sink */
2176 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2177 {
2178 	struct intel_hdcp *hdcp = &connector->hdcp;
2179 
2180 	if (!hdcp->shim)
2181 		return;
2182 
2183 	atomic_inc(&connector->hdcp.cp_irq_count);
2184 	wake_up_all(&connector->hdcp.cp_irq_queue);
2185 
2186 	schedule_delayed_work(&hdcp->check_work, 0);
2187 }
2188