1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10 
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14 
15 #include <drm/drm_hdcp.h>
16 #include <drm/i915_component.h>
17 
18 #include "i915_drv.h"
19 #include "i915_reg.h"
20 #include "intel_display_power.h"
21 #include "intel_display_types.h"
22 #include "intel_hdcp.h"
23 #include "intel_sideband.h"
24 #include "intel_connector.h"
25 
26 #define KEY_LOAD_TRIES	5
27 #define HDCP2_LC_RETRY_CNT			3
28 
29 static int intel_conn_to_vcpi(struct intel_connector *connector)
30 {
31 	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
32 	return connector->port	? connector->port->vcpi.vcpi : 0;
33 }
34 
35 /*
36  * intel_hdcp_required_content_stream selects the most highest common possible HDCP
37  * content_type for all streams in DP MST topology because security f/w doesn't
38  * have any provision to mark content_type for each stream separately, it marks
39  * all available streams with the content_type proivided at the time of port
40  * authentication. This may prohibit the userspace to use type1 content on
41  * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
42  * DP MST topology. Though it is not compulsory, security fw should change its
43  * policy to mark different content_types for different streams.
44  */
45 static int
46 intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
47 {
48 	struct drm_connector_list_iter conn_iter;
49 	struct intel_digital_port *conn_dig_port;
50 	struct intel_connector *connector;
51 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
52 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
53 	bool enforce_type0 = false;
54 	int k;
55 
56 	data->k = 0;
57 
58 	if (dig_port->hdcp_auth_status)
59 		return 0;
60 
61 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
62 	for_each_intel_connector_iter(connector, &conn_iter) {
63 		if (connector->base.status == connector_status_disconnected)
64 			continue;
65 
66 		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
67 			continue;
68 
69 		conn_dig_port = intel_attached_dig_port(connector);
70 		if (conn_dig_port != dig_port)
71 			continue;
72 
73 		if (!enforce_type0 && !intel_hdcp2_capable(connector))
74 			enforce_type0 = true;
75 
76 		data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
77 		data->k++;
78 
79 		/* if there is only one active stream */
80 		if (dig_port->dp.active_mst_links <= 1)
81 			break;
82 	}
83 	drm_connector_list_iter_end(&conn_iter);
84 
85 	if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
86 		return -EINVAL;
87 
88 	/*
89 	 * Apply common protection level across all streams in DP MST Topology.
90 	 * Use highest supported content type for all streams in DP MST Topology.
91 	 */
92 	for (k = 0; k < data->k; k++)
93 		data->streams[k].stream_type =
94 			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
95 
96 	return 0;
97 }
98 
99 static
100 bool intel_hdcp_is_ksv_valid(u8 *ksv)
101 {
102 	int i, ones = 0;
103 	/* KSV has 20 1's and 20 0's */
104 	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
105 		ones += hweight8(ksv[i]);
106 	if (ones != 20)
107 		return false;
108 
109 	return true;
110 }
111 
112 static
113 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
114 			       const struct intel_hdcp_shim *shim, u8 *bksv)
115 {
116 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
117 	int ret, i, tries = 2;
118 
119 	/* HDCP spec states that we must retry the bksv if it is invalid */
120 	for (i = 0; i < tries; i++) {
121 		ret = shim->read_bksv(dig_port, bksv);
122 		if (ret)
123 			return ret;
124 		if (intel_hdcp_is_ksv_valid(bksv))
125 			break;
126 	}
127 	if (i == tries) {
128 		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
129 		return -ENODEV;
130 	}
131 
132 	return 0;
133 }
134 
135 /* Is HDCP1.4 capable on Platform and Sink */
136 bool intel_hdcp_capable(struct intel_connector *connector)
137 {
138 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
139 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
140 	bool capable = false;
141 	u8 bksv[5];
142 
143 	if (!shim)
144 		return capable;
145 
146 	if (shim->hdcp_capable) {
147 		shim->hdcp_capable(dig_port, &capable);
148 	} else {
149 		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
150 			capable = true;
151 	}
152 
153 	return capable;
154 }
155 
156 /* Is HDCP2.2 capable on Platform and Sink */
157 bool intel_hdcp2_capable(struct intel_connector *connector)
158 {
159 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
160 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
161 	struct intel_hdcp *hdcp = &connector->hdcp;
162 	bool capable = false;
163 
164 	/* I915 support for HDCP2.2 */
165 	if (!hdcp->hdcp2_supported)
166 		return false;
167 
168 	/* MEI interface is solid */
169 	mutex_lock(&dev_priv->hdcp_comp_mutex);
170 	if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
171 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
172 		return false;
173 	}
174 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
175 
176 	/* Sink's capability for HDCP2.2 */
177 	hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
178 
179 	return capable;
180 }
181 
182 static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
183 			      enum transcoder cpu_transcoder, enum port port)
184 {
185 	return intel_de_read(dev_priv,
186 	                     HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
187 	       HDCP_STATUS_ENC;
188 }
189 
190 static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
191 			       enum transcoder cpu_transcoder, enum port port)
192 {
193 	return intel_de_read(dev_priv,
194 	                     HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
195 	       LINK_ENCRYPTION_STATUS;
196 }
197 
198 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
199 				    const struct intel_hdcp_shim *shim)
200 {
201 	int ret, read_ret;
202 	bool ksv_ready;
203 
204 	/* Poll for ksv list ready (spec says max time allowed is 5s) */
205 	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
206 							 &ksv_ready),
207 			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
208 			 100 * 1000);
209 	if (ret)
210 		return ret;
211 	if (read_ret)
212 		return read_ret;
213 	if (!ksv_ready)
214 		return -ETIMEDOUT;
215 
216 	return 0;
217 }
218 
219 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
220 {
221 	enum i915_power_well_id id;
222 	intel_wakeref_t wakeref;
223 	bool enabled = false;
224 
225 	/*
226 	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
227 	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
228 	 */
229 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
230 		id = HSW_DISP_PW_GLOBAL;
231 	else
232 		id = SKL_DISP_PW_1;
233 
234 	/* PG1 (power well #1) needs to be enabled */
235 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
236 		enabled = intel_display_power_well_is_enabled(dev_priv, id);
237 
238 	/*
239 	 * Another req for hdcp key loadability is enabled state of pll for
240 	 * cdclk. Without active crtc we wont land here. So we are assuming that
241 	 * cdclk is already on.
242 	 */
243 
244 	return enabled;
245 }
246 
247 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
248 {
249 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
250 	intel_de_write(dev_priv, HDCP_KEY_STATUS,
251 		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
252 }
253 
254 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
255 {
256 	int ret;
257 	u32 val;
258 
259 	val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
260 	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
261 		return 0;
262 
263 	/*
264 	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
265 	 * out of reset. So if Key is not already loaded, its an error state.
266 	 */
267 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
268 		if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
269 			return -ENXIO;
270 
271 	/*
272 	 * Initiate loading the HDCP key from fuses.
273 	 *
274 	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
275 	 * platforms except BXT and GLK, differ in the key load trigger process
276 	 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
277 	 */
278 	if (IS_GEN9_BC(dev_priv)) {
279 		ret = sandybridge_pcode_write(dev_priv,
280 					      SKL_PCODE_LOAD_HDCP_KEYS, 1);
281 		if (ret) {
282 			drm_err(&dev_priv->drm,
283 				"Failed to initiate HDCP key load (%d)\n",
284 				ret);
285 			return ret;
286 		}
287 	} else {
288 		intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
289 	}
290 
291 	/* Wait for the keys to load (500us) */
292 	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
293 					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
294 					10, 1, &val);
295 	if (ret)
296 		return ret;
297 	else if (!(val & HDCP_KEY_LOAD_STATUS))
298 		return -ENXIO;
299 
300 	/* Send Aksv over to PCH display for use in authentication */
301 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
302 
303 	return 0;
304 }
305 
306 /* Returns updated SHA-1 index */
307 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
308 {
309 	intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
310 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
311 		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
312 		return -ETIMEDOUT;
313 	}
314 	return 0;
315 }
316 
317 static
318 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
319 				enum transcoder cpu_transcoder, enum port port)
320 {
321 	if (INTEL_GEN(dev_priv) >= 12) {
322 		switch (cpu_transcoder) {
323 		case TRANSCODER_A:
324 			return HDCP_TRANSA_REP_PRESENT |
325 			       HDCP_TRANSA_SHA1_M0;
326 		case TRANSCODER_B:
327 			return HDCP_TRANSB_REP_PRESENT |
328 			       HDCP_TRANSB_SHA1_M0;
329 		case TRANSCODER_C:
330 			return HDCP_TRANSC_REP_PRESENT |
331 			       HDCP_TRANSC_SHA1_M0;
332 		case TRANSCODER_D:
333 			return HDCP_TRANSD_REP_PRESENT |
334 			       HDCP_TRANSD_SHA1_M0;
335 		default:
336 			drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
337 				cpu_transcoder);
338 			return -EINVAL;
339 		}
340 	}
341 
342 	switch (port) {
343 	case PORT_A:
344 		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
345 	case PORT_B:
346 		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
347 	case PORT_C:
348 		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
349 	case PORT_D:
350 		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
351 	case PORT_E:
352 		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
353 	default:
354 		drm_err(&dev_priv->drm, "Unknown port %d\n", port);
355 		return -EINVAL;
356 	}
357 }
358 
359 static
360 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
361 				const struct intel_hdcp_shim *shim,
362 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
363 {
364 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
365 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
366 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
367 	enum port port = dig_port->base.port;
368 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
369 	int ret, i, j, sha_idx;
370 
371 	/* Process V' values from the receiver */
372 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
373 		ret = shim->read_v_prime_part(dig_port, i, &vprime);
374 		if (ret)
375 			return ret;
376 		intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
377 	}
378 
379 	/*
380 	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
381 	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
382 	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
383 	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
384 	 * index will keep track of our progress through the 64 bytes as well as
385 	 * helping us work the 40-bit KSVs through our 32-bit register.
386 	 *
387 	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
388 	 */
389 	sha_idx = 0;
390 	sha_text = 0;
391 	sha_leftovers = 0;
392 	rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
393 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
394 	for (i = 0; i < num_downstream; i++) {
395 		unsigned int sha_empty;
396 		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
397 
398 		/* Fill up the empty slots in sha_text and write it out */
399 		sha_empty = sizeof(sha_text) - sha_leftovers;
400 		for (j = 0; j < sha_empty; j++) {
401 			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
402 			sha_text |= ksv[j] << off;
403 		}
404 
405 		ret = intel_write_sha_text(dev_priv, sha_text);
406 		if (ret < 0)
407 			return ret;
408 
409 		/* Programming guide writes this every 64 bytes */
410 		sha_idx += sizeof(sha_text);
411 		if (!(sha_idx % 64))
412 			intel_de_write(dev_priv, HDCP_REP_CTL,
413 				       rep_ctl | HDCP_SHA1_TEXT_32);
414 
415 		/* Store the leftover bytes from the ksv in sha_text */
416 		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
417 		sha_text = 0;
418 		for (j = 0; j < sha_leftovers; j++)
419 			sha_text |= ksv[sha_empty + j] <<
420 					((sizeof(sha_text) - j - 1) * 8);
421 
422 		/*
423 		 * If we still have room in sha_text for more data, continue.
424 		 * Otherwise, write it out immediately.
425 		 */
426 		if (sizeof(sha_text) > sha_leftovers)
427 			continue;
428 
429 		ret = intel_write_sha_text(dev_priv, sha_text);
430 		if (ret < 0)
431 			return ret;
432 		sha_leftovers = 0;
433 		sha_text = 0;
434 		sha_idx += sizeof(sha_text);
435 	}
436 
437 	/*
438 	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
439 	 * bytes are leftover from the last ksv, we might be able to fit them
440 	 * all in sha_text (first 2 cases), or we might need to split them up
441 	 * into 2 writes (last 2 cases).
442 	 */
443 	if (sha_leftovers == 0) {
444 		/* Write 16 bits of text, 16 bits of M0 */
445 		intel_de_write(dev_priv, HDCP_REP_CTL,
446 			       rep_ctl | HDCP_SHA1_TEXT_16);
447 		ret = intel_write_sha_text(dev_priv,
448 					   bstatus[0] << 8 | bstatus[1]);
449 		if (ret < 0)
450 			return ret;
451 		sha_idx += sizeof(sha_text);
452 
453 		/* Write 32 bits of M0 */
454 		intel_de_write(dev_priv, HDCP_REP_CTL,
455 			       rep_ctl | HDCP_SHA1_TEXT_0);
456 		ret = intel_write_sha_text(dev_priv, 0);
457 		if (ret < 0)
458 			return ret;
459 		sha_idx += sizeof(sha_text);
460 
461 		/* Write 16 bits of M0 */
462 		intel_de_write(dev_priv, HDCP_REP_CTL,
463 			       rep_ctl | HDCP_SHA1_TEXT_16);
464 		ret = intel_write_sha_text(dev_priv, 0);
465 		if (ret < 0)
466 			return ret;
467 		sha_idx += sizeof(sha_text);
468 
469 	} else if (sha_leftovers == 1) {
470 		/* Write 24 bits of text, 8 bits of M0 */
471 		intel_de_write(dev_priv, HDCP_REP_CTL,
472 			       rep_ctl | HDCP_SHA1_TEXT_24);
473 		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
474 		/* Only 24-bits of data, must be in the LSB */
475 		sha_text = (sha_text & 0xffffff00) >> 8;
476 		ret = intel_write_sha_text(dev_priv, sha_text);
477 		if (ret < 0)
478 			return ret;
479 		sha_idx += sizeof(sha_text);
480 
481 		/* Write 32 bits of M0 */
482 		intel_de_write(dev_priv, HDCP_REP_CTL,
483 			       rep_ctl | HDCP_SHA1_TEXT_0);
484 		ret = intel_write_sha_text(dev_priv, 0);
485 		if (ret < 0)
486 			return ret;
487 		sha_idx += sizeof(sha_text);
488 
489 		/* Write 24 bits of M0 */
490 		intel_de_write(dev_priv, HDCP_REP_CTL,
491 			       rep_ctl | HDCP_SHA1_TEXT_8);
492 		ret = intel_write_sha_text(dev_priv, 0);
493 		if (ret < 0)
494 			return ret;
495 		sha_idx += sizeof(sha_text);
496 
497 	} else if (sha_leftovers == 2) {
498 		/* Write 32 bits of text */
499 		intel_de_write(dev_priv, HDCP_REP_CTL,
500 			       rep_ctl | HDCP_SHA1_TEXT_32);
501 		sha_text |= bstatus[0] << 8 | bstatus[1];
502 		ret = intel_write_sha_text(dev_priv, sha_text);
503 		if (ret < 0)
504 			return ret;
505 		sha_idx += sizeof(sha_text);
506 
507 		/* Write 64 bits of M0 */
508 		intel_de_write(dev_priv, HDCP_REP_CTL,
509 			       rep_ctl | HDCP_SHA1_TEXT_0);
510 		for (i = 0; i < 2; i++) {
511 			ret = intel_write_sha_text(dev_priv, 0);
512 			if (ret < 0)
513 				return ret;
514 			sha_idx += sizeof(sha_text);
515 		}
516 
517 		/*
518 		 * Terminate the SHA-1 stream by hand. For the other leftover
519 		 * cases this is appended by the hardware.
520 		 */
521 		intel_de_write(dev_priv, HDCP_REP_CTL,
522 			       rep_ctl | HDCP_SHA1_TEXT_32);
523 		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
524 		ret = intel_write_sha_text(dev_priv, sha_text);
525 		if (ret < 0)
526 			return ret;
527 		sha_idx += sizeof(sha_text);
528 	} else if (sha_leftovers == 3) {
529 		/* Write 32 bits of text (filled from LSB) */
530 		intel_de_write(dev_priv, HDCP_REP_CTL,
531 			       rep_ctl | HDCP_SHA1_TEXT_32);
532 		sha_text |= bstatus[0];
533 		ret = intel_write_sha_text(dev_priv, sha_text);
534 		if (ret < 0)
535 			return ret;
536 		sha_idx += sizeof(sha_text);
537 
538 		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
539 		intel_de_write(dev_priv, HDCP_REP_CTL,
540 			       rep_ctl | HDCP_SHA1_TEXT_8);
541 		ret = intel_write_sha_text(dev_priv, bstatus[1]);
542 		if (ret < 0)
543 			return ret;
544 		sha_idx += sizeof(sha_text);
545 
546 		/* Write 32 bits of M0 */
547 		intel_de_write(dev_priv, HDCP_REP_CTL,
548 			       rep_ctl | HDCP_SHA1_TEXT_0);
549 		ret = intel_write_sha_text(dev_priv, 0);
550 		if (ret < 0)
551 			return ret;
552 		sha_idx += sizeof(sha_text);
553 
554 		/* Write 8 bits of M0 */
555 		intel_de_write(dev_priv, HDCP_REP_CTL,
556 			       rep_ctl | HDCP_SHA1_TEXT_24);
557 		ret = intel_write_sha_text(dev_priv, 0);
558 		if (ret < 0)
559 			return ret;
560 		sha_idx += sizeof(sha_text);
561 	} else {
562 		drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
563 			    sha_leftovers);
564 		return -EINVAL;
565 	}
566 
567 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
568 	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
569 	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
570 		ret = intel_write_sha_text(dev_priv, 0);
571 		if (ret < 0)
572 			return ret;
573 		sha_idx += sizeof(sha_text);
574 	}
575 
576 	/*
577 	 * Last write gets the length of the concatenation in bits. That is:
578 	 *  - 5 bytes per device
579 	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
580 	 */
581 	sha_text = (num_downstream * 5 + 10) * 8;
582 	ret = intel_write_sha_text(dev_priv, sha_text);
583 	if (ret < 0)
584 		return ret;
585 
586 	/* Tell the HW we're done with the hash and wait for it to ACK */
587 	intel_de_write(dev_priv, HDCP_REP_CTL,
588 		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
589 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
590 				  HDCP_SHA1_COMPLETE, 1)) {
591 		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
592 		return -ETIMEDOUT;
593 	}
594 	if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
595 		drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
596 		return -ENXIO;
597 	}
598 
599 	return 0;
600 }
601 
602 /* Implements Part 2 of the HDCP authorization procedure */
603 static
604 int intel_hdcp_auth_downstream(struct intel_connector *connector)
605 {
606 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
607 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
608 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
609 	u8 bstatus[2], num_downstream, *ksv_fifo;
610 	int ret, i, tries = 3;
611 
612 	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
613 	if (ret) {
614 		drm_dbg_kms(&dev_priv->drm,
615 			    "KSV list failed to become ready (%d)\n", ret);
616 		return ret;
617 	}
618 
619 	ret = shim->read_bstatus(dig_port, bstatus);
620 	if (ret)
621 		return ret;
622 
623 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
624 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
625 		drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
626 		return -EPERM;
627 	}
628 
629 	/*
630 	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
631 	 * the HDCP encryption. That implies that repeater can't have its own
632 	 * display. As there is no consumption of encrypted content in the
633 	 * repeater with 0 downstream devices, we are failing the
634 	 * authentication.
635 	 */
636 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
637 	if (num_downstream == 0) {
638 		drm_dbg_kms(&dev_priv->drm,
639 			    "Repeater with zero downstream devices\n");
640 		return -EINVAL;
641 	}
642 
643 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
644 	if (!ksv_fifo) {
645 		drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
646 		return -ENOMEM;
647 	}
648 
649 	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
650 	if (ret)
651 		goto err;
652 
653 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
654 					num_downstream) > 0) {
655 		drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
656 		ret = -EPERM;
657 		goto err;
658 	}
659 
660 	/*
661 	 * When V prime mismatches, DP Spec mandates re-read of
662 	 * V prime atleast twice.
663 	 */
664 	for (i = 0; i < tries; i++) {
665 		ret = intel_hdcp_validate_v_prime(connector, shim,
666 						  ksv_fifo, num_downstream,
667 						  bstatus);
668 		if (!ret)
669 			break;
670 	}
671 
672 	if (i == tries) {
673 		drm_dbg_kms(&dev_priv->drm,
674 			    "V Prime validation failed.(%d)\n", ret);
675 		goto err;
676 	}
677 
678 	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
679 		    num_downstream);
680 	ret = 0;
681 err:
682 	kfree(ksv_fifo);
683 	return ret;
684 }
685 
686 /* Implements Part 1 of the HDCP authorization procedure */
687 static int intel_hdcp_auth(struct intel_connector *connector)
688 {
689 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
690 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
691 	struct intel_hdcp *hdcp = &connector->hdcp;
692 	const struct intel_hdcp_shim *shim = hdcp->shim;
693 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
694 	enum port port = dig_port->base.port;
695 	unsigned long r0_prime_gen_start;
696 	int ret, i, tries = 2;
697 	union {
698 		u32 reg[2];
699 		u8 shim[DRM_HDCP_AN_LEN];
700 	} an;
701 	union {
702 		u32 reg[2];
703 		u8 shim[DRM_HDCP_KSV_LEN];
704 	} bksv;
705 	union {
706 		u32 reg;
707 		u8 shim[DRM_HDCP_RI_LEN];
708 	} ri;
709 	bool repeater_present, hdcp_capable;
710 
711 	/*
712 	 * Detects whether the display is HDCP capable. Although we check for
713 	 * valid Bksv below, the HDCP over DP spec requires that we check
714 	 * whether the display supports HDCP before we write An. For HDMI
715 	 * displays, this is not necessary.
716 	 */
717 	if (shim->hdcp_capable) {
718 		ret = shim->hdcp_capable(dig_port, &hdcp_capable);
719 		if (ret)
720 			return ret;
721 		if (!hdcp_capable) {
722 			drm_dbg_kms(&dev_priv->drm,
723 				    "Panel is not HDCP capable\n");
724 			return -EINVAL;
725 		}
726 	}
727 
728 	/* Initialize An with 2 random values and acquire it */
729 	for (i = 0; i < 2; i++)
730 		intel_de_write(dev_priv,
731 			       HDCP_ANINIT(dev_priv, cpu_transcoder, port),
732 			       get_random_u32());
733 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
734 		       HDCP_CONF_CAPTURE_AN);
735 
736 	/* Wait for An to be acquired */
737 	if (intel_de_wait_for_set(dev_priv,
738 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
739 				  HDCP_STATUS_AN_READY, 1)) {
740 		drm_err(&dev_priv->drm, "Timed out waiting for An\n");
741 		return -ETIMEDOUT;
742 	}
743 
744 	an.reg[0] = intel_de_read(dev_priv,
745 				  HDCP_ANLO(dev_priv, cpu_transcoder, port));
746 	an.reg[1] = intel_de_read(dev_priv,
747 				  HDCP_ANHI(dev_priv, cpu_transcoder, port));
748 	ret = shim->write_an_aksv(dig_port, an.shim);
749 	if (ret)
750 		return ret;
751 
752 	r0_prime_gen_start = jiffies;
753 
754 	memset(&bksv, 0, sizeof(bksv));
755 
756 	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
757 	if (ret < 0)
758 		return ret;
759 
760 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
761 		drm_err(&dev_priv->drm, "BKSV is revoked\n");
762 		return -EPERM;
763 	}
764 
765 	intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
766 		       bksv.reg[0]);
767 	intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
768 		       bksv.reg[1]);
769 
770 	ret = shim->repeater_present(dig_port, &repeater_present);
771 	if (ret)
772 		return ret;
773 	if (repeater_present)
774 		intel_de_write(dev_priv, HDCP_REP_CTL,
775 			       intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
776 
777 	ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
778 	if (ret)
779 		return ret;
780 
781 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
782 		       HDCP_CONF_AUTH_AND_ENC);
783 
784 	/* Wait for R0 ready */
785 	if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
786 		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
787 		drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
788 		return -ETIMEDOUT;
789 	}
790 
791 	/*
792 	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
793 	 * some monitors can take longer than this. We'll set the timeout at
794 	 * 300ms just to be sure.
795 	 *
796 	 * On DP, there's an R0_READY bit available but no such bit
797 	 * exists on HDMI. Since the upper-bound is the same, we'll just do
798 	 * the stupid thing instead of polling on one and not the other.
799 	 */
800 	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
801 
802 	tries = 3;
803 
804 	/*
805 	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
806 	 * of R0 mismatch.
807 	 */
808 	for (i = 0; i < tries; i++) {
809 		ri.reg = 0;
810 		ret = shim->read_ri_prime(dig_port, ri.shim);
811 		if (ret)
812 			return ret;
813 		intel_de_write(dev_priv,
814 			       HDCP_RPRIME(dev_priv, cpu_transcoder, port),
815 			       ri.reg);
816 
817 		/* Wait for Ri prime match */
818 		if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
819 			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
820 			break;
821 	}
822 
823 	if (i == tries) {
824 		drm_dbg_kms(&dev_priv->drm,
825 			    "Timed out waiting for Ri prime match (%x)\n",
826 			    intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
827 					  cpu_transcoder, port)));
828 		return -ETIMEDOUT;
829 	}
830 
831 	/* Wait for encryption confirmation */
832 	if (intel_de_wait_for_set(dev_priv,
833 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
834 				  HDCP_STATUS_ENC,
835 				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
836 		drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
837 		return -ETIMEDOUT;
838 	}
839 
840 	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
841 	if (shim->stream_encryption) {
842 		ret = shim->stream_encryption(connector, true);
843 		if (ret) {
844 			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
845 				connector->base.name, connector->base.base.id);
846 			return ret;
847 		}
848 		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
849 			    transcoder_name(hdcp->stream_transcoder));
850 	}
851 
852 	if (repeater_present)
853 		return intel_hdcp_auth_downstream(connector);
854 
855 	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
856 	return 0;
857 }
858 
859 static int _intel_hdcp_disable(struct intel_connector *connector)
860 {
861 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
862 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
863 	struct intel_hdcp *hdcp = &connector->hdcp;
864 	enum port port = dig_port->base.port;
865 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
866 	u32 repeater_ctl;
867 	int ret;
868 
869 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
870 		    connector->base.name, connector->base.base.id);
871 
872 	if (hdcp->shim->stream_encryption) {
873 		ret = hdcp->shim->stream_encryption(connector, false);
874 		if (ret) {
875 			drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
876 				connector->base.name, connector->base.base.id);
877 			return ret;
878 		}
879 		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
880 			    transcoder_name(hdcp->stream_transcoder));
881 		/*
882 		 * If there are other connectors on this port using HDCP,
883 		 * don't disable it until it disabled HDCP encryption for
884 		 * all connectors in MST topology.
885 		 */
886 		if (dig_port->num_hdcp_streams > 0)
887 			return 0;
888 	}
889 
890 	hdcp->hdcp_encrypted = false;
891 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
892 	if (intel_de_wait_for_clear(dev_priv,
893 				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
894 				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
895 		drm_err(&dev_priv->drm,
896 			"Failed to disable HDCP, timeout clearing status\n");
897 		return -ETIMEDOUT;
898 	}
899 
900 	repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
901 						   port);
902 	intel_de_write(dev_priv, HDCP_REP_CTL,
903 		       intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
904 
905 	ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
906 	if (ret) {
907 		drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
908 		return ret;
909 	}
910 
911 	drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
912 	return 0;
913 }
914 
915 static int _intel_hdcp_enable(struct intel_connector *connector)
916 {
917 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
918 	struct intel_hdcp *hdcp = &connector->hdcp;
919 	int i, ret, tries = 3;
920 
921 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
922 		    connector->base.name, connector->base.base.id);
923 
924 	if (!hdcp_key_loadable(dev_priv)) {
925 		drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
926 		return -ENXIO;
927 	}
928 
929 	for (i = 0; i < KEY_LOAD_TRIES; i++) {
930 		ret = intel_hdcp_load_keys(dev_priv);
931 		if (!ret)
932 			break;
933 		intel_hdcp_clear_keys(dev_priv);
934 	}
935 	if (ret) {
936 		drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
937 			ret);
938 		return ret;
939 	}
940 
941 	/* Incase of authentication failures, HDCP spec expects reauth. */
942 	for (i = 0; i < tries; i++) {
943 		ret = intel_hdcp_auth(connector);
944 		if (!ret) {
945 			hdcp->hdcp_encrypted = true;
946 			return 0;
947 		}
948 
949 		drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
950 
951 		/* Ensuring HDCP encryption and signalling are stopped. */
952 		_intel_hdcp_disable(connector);
953 	}
954 
955 	drm_dbg_kms(&dev_priv->drm,
956 		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
957 	return ret;
958 }
959 
960 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
961 {
962 	return container_of(hdcp, struct intel_connector, hdcp);
963 }
964 
965 static void intel_hdcp_update_value(struct intel_connector *connector,
966 				    u64 value, bool update_property)
967 {
968 	struct drm_device *dev = connector->base.dev;
969 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
970 	struct intel_hdcp *hdcp = &connector->hdcp;
971 
972 	drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
973 
974 	if (hdcp->value == value)
975 		return;
976 
977 	drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
978 
979 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
980 		if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
981 			dig_port->num_hdcp_streams--;
982 	} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
983 		dig_port->num_hdcp_streams++;
984 	}
985 
986 	hdcp->value = value;
987 	if (update_property) {
988 		drm_connector_get(&connector->base);
989 		schedule_work(&hdcp->prop_work);
990 	}
991 }
992 
993 /* Implements Part 3 of the HDCP authorization procedure */
994 static int intel_hdcp_check_link(struct intel_connector *connector)
995 {
996 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
997 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
998 	struct intel_hdcp *hdcp = &connector->hdcp;
999 	enum port port = dig_port->base.port;
1000 	enum transcoder cpu_transcoder;
1001 	int ret = 0;
1002 
1003 	mutex_lock(&hdcp->mutex);
1004 	mutex_lock(&dig_port->hdcp_mutex);
1005 
1006 	cpu_transcoder = hdcp->cpu_transcoder;
1007 
1008 	/* Check_link valid only when HDCP1.4 is enabled */
1009 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1010 	    !hdcp->hdcp_encrypted) {
1011 		ret = -EINVAL;
1012 		goto out;
1013 	}
1014 
1015 	if (drm_WARN_ON(&dev_priv->drm,
1016 			!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
1017 		drm_err(&dev_priv->drm,
1018 			"%s:%d HDCP link stopped encryption,%x\n",
1019 			connector->base.name, connector->base.base.id,
1020 			intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
1021 		ret = -ENXIO;
1022 		intel_hdcp_update_value(connector,
1023 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1024 					true);
1025 		goto out;
1026 	}
1027 
1028 	if (hdcp->shim->check_link(dig_port, connector)) {
1029 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1030 			intel_hdcp_update_value(connector,
1031 				DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1032 		}
1033 		goto out;
1034 	}
1035 
1036 	drm_dbg_kms(&dev_priv->drm,
1037 		    "[%s:%d] HDCP link failed, retrying authentication\n",
1038 		    connector->base.name, connector->base.base.id);
1039 
1040 	ret = _intel_hdcp_disable(connector);
1041 	if (ret) {
1042 		drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
1043 		intel_hdcp_update_value(connector,
1044 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1045 					true);
1046 		goto out;
1047 	}
1048 
1049 	ret = _intel_hdcp_enable(connector);
1050 	if (ret) {
1051 		drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
1052 		intel_hdcp_update_value(connector,
1053 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1054 					true);
1055 		goto out;
1056 	}
1057 
1058 out:
1059 	mutex_unlock(&dig_port->hdcp_mutex);
1060 	mutex_unlock(&hdcp->mutex);
1061 	return ret;
1062 }
1063 
1064 static void intel_hdcp_prop_work(struct work_struct *work)
1065 {
1066 	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1067 					       prop_work);
1068 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1069 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1070 
1071 	drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
1072 	mutex_lock(&hdcp->mutex);
1073 
1074 	/*
1075 	 * This worker is only used to flip between ENABLED/DESIRED. Either of
1076 	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1077 	 * we're running just after hdcp has been disabled, so just exit
1078 	 */
1079 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1080 		drm_hdcp_update_content_protection(&connector->base,
1081 						   hdcp->value);
1082 
1083 	mutex_unlock(&hdcp->mutex);
1084 	drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
1085 
1086 	drm_connector_put(&connector->base);
1087 }
1088 
1089 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
1090 {
1091 	return INTEL_INFO(dev_priv)->display.has_hdcp &&
1092 			(INTEL_GEN(dev_priv) >= 12 || port < PORT_E);
1093 }
1094 
1095 static int
1096 hdcp2_prepare_ake_init(struct intel_connector *connector,
1097 		       struct hdcp2_ake_init *ake_data)
1098 {
1099 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1100 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1101 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1102 	struct i915_hdcp_comp_master *comp;
1103 	int ret;
1104 
1105 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1106 	comp = dev_priv->hdcp_master;
1107 
1108 	if (!comp || !comp->ops) {
1109 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1110 		return -EINVAL;
1111 	}
1112 
1113 	ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
1114 	if (ret)
1115 		drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
1116 			    ret);
1117 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1118 
1119 	return ret;
1120 }
1121 
1122 static int
1123 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1124 				struct hdcp2_ake_send_cert *rx_cert,
1125 				bool *paired,
1126 				struct hdcp2_ake_no_stored_km *ek_pub_km,
1127 				size_t *msg_sz)
1128 {
1129 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1130 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1131 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1132 	struct i915_hdcp_comp_master *comp;
1133 	int ret;
1134 
1135 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1136 	comp = dev_priv->hdcp_master;
1137 
1138 	if (!comp || !comp->ops) {
1139 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1140 		return -EINVAL;
1141 	}
1142 
1143 	ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1144 							 rx_cert, paired,
1145 							 ek_pub_km, msg_sz);
1146 	if (ret < 0)
1147 		drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1148 			    ret);
1149 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1150 
1151 	return ret;
1152 }
1153 
1154 static int hdcp2_verify_hprime(struct intel_connector *connector,
1155 			       struct hdcp2_ake_send_hprime *rx_hprime)
1156 {
1157 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1158 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1159 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1160 	struct i915_hdcp_comp_master *comp;
1161 	int ret;
1162 
1163 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1164 	comp = dev_priv->hdcp_master;
1165 
1166 	if (!comp || !comp->ops) {
1167 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1168 		return -EINVAL;
1169 	}
1170 
1171 	ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1172 	if (ret < 0)
1173 		drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1174 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1175 
1176 	return ret;
1177 }
1178 
1179 static int
1180 hdcp2_store_pairing_info(struct intel_connector *connector,
1181 			 struct hdcp2_ake_send_pairing_info *pairing_info)
1182 {
1183 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1184 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1185 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1186 	struct i915_hdcp_comp_master *comp;
1187 	int ret;
1188 
1189 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1190 	comp = dev_priv->hdcp_master;
1191 
1192 	if (!comp || !comp->ops) {
1193 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1194 		return -EINVAL;
1195 	}
1196 
1197 	ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1198 	if (ret < 0)
1199 		drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1200 			    ret);
1201 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1202 
1203 	return ret;
1204 }
1205 
1206 static int
1207 hdcp2_prepare_lc_init(struct intel_connector *connector,
1208 		      struct hdcp2_lc_init *lc_init)
1209 {
1210 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1211 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1212 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1213 	struct i915_hdcp_comp_master *comp;
1214 	int ret;
1215 
1216 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1217 	comp = dev_priv->hdcp_master;
1218 
1219 	if (!comp || !comp->ops) {
1220 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1221 		return -EINVAL;
1222 	}
1223 
1224 	ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1225 	if (ret < 0)
1226 		drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1227 			    ret);
1228 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1229 
1230 	return ret;
1231 }
1232 
1233 static int
1234 hdcp2_verify_lprime(struct intel_connector *connector,
1235 		    struct hdcp2_lc_send_lprime *rx_lprime)
1236 {
1237 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1238 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1239 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1240 	struct i915_hdcp_comp_master *comp;
1241 	int ret;
1242 
1243 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1244 	comp = dev_priv->hdcp_master;
1245 
1246 	if (!comp || !comp->ops) {
1247 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1248 		return -EINVAL;
1249 	}
1250 
1251 	ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1252 	if (ret < 0)
1253 		drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1254 			    ret);
1255 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1256 
1257 	return ret;
1258 }
1259 
1260 static int hdcp2_prepare_skey(struct intel_connector *connector,
1261 			      struct hdcp2_ske_send_eks *ske_data)
1262 {
1263 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1264 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1265 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1266 	struct i915_hdcp_comp_master *comp;
1267 	int ret;
1268 
1269 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1270 	comp = dev_priv->hdcp_master;
1271 
1272 	if (!comp || !comp->ops) {
1273 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1274 		return -EINVAL;
1275 	}
1276 
1277 	ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1278 	if (ret < 0)
1279 		drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1280 			    ret);
1281 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1282 
1283 	return ret;
1284 }
1285 
1286 static int
1287 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1288 				      struct hdcp2_rep_send_receiverid_list
1289 								*rep_topology,
1290 				      struct hdcp2_rep_send_ack *rep_send_ack)
1291 {
1292 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1293 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1294 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1295 	struct i915_hdcp_comp_master *comp;
1296 	int ret;
1297 
1298 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1299 	comp = dev_priv->hdcp_master;
1300 
1301 	if (!comp || !comp->ops) {
1302 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1303 		return -EINVAL;
1304 	}
1305 
1306 	ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1307 							 rep_topology,
1308 							 rep_send_ack);
1309 	if (ret < 0)
1310 		drm_dbg_kms(&dev_priv->drm,
1311 			    "Verify rep topology failed. %d\n", ret);
1312 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1313 
1314 	return ret;
1315 }
1316 
1317 static int
1318 hdcp2_verify_mprime(struct intel_connector *connector,
1319 		    struct hdcp2_rep_stream_ready *stream_ready)
1320 {
1321 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1322 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1323 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1324 	struct i915_hdcp_comp_master *comp;
1325 	int ret;
1326 
1327 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1328 	comp = dev_priv->hdcp_master;
1329 
1330 	if (!comp || !comp->ops) {
1331 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1332 		return -EINVAL;
1333 	}
1334 
1335 	ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1336 	if (ret < 0)
1337 		drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1338 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1339 
1340 	return ret;
1341 }
1342 
1343 static int hdcp2_authenticate_port(struct intel_connector *connector)
1344 {
1345 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1346 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1347 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1348 	struct i915_hdcp_comp_master *comp;
1349 	int ret;
1350 
1351 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1352 	comp = dev_priv->hdcp_master;
1353 
1354 	if (!comp || !comp->ops) {
1355 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1356 		return -EINVAL;
1357 	}
1358 
1359 	ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1360 	if (ret < 0)
1361 		drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1362 			    ret);
1363 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1364 
1365 	return ret;
1366 }
1367 
1368 static int hdcp2_close_mei_session(struct intel_connector *connector)
1369 {
1370 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1371 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1372 	struct i915_hdcp_comp_master *comp;
1373 	int ret;
1374 
1375 	mutex_lock(&dev_priv->hdcp_comp_mutex);
1376 	comp = dev_priv->hdcp_master;
1377 
1378 	if (!comp || !comp->ops) {
1379 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
1380 		return -EINVAL;
1381 	}
1382 
1383 	ret = comp->ops->close_hdcp_session(comp->mei_dev,
1384 					     &dig_port->hdcp_port_data);
1385 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
1386 
1387 	return ret;
1388 }
1389 
1390 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1391 {
1392 	return hdcp2_close_mei_session(connector);
1393 }
1394 
1395 /* Authentication flow starts from here */
1396 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1397 {
1398 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1399 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1400 	struct intel_hdcp *hdcp = &connector->hdcp;
1401 	union {
1402 		struct hdcp2_ake_init ake_init;
1403 		struct hdcp2_ake_send_cert send_cert;
1404 		struct hdcp2_ake_no_stored_km no_stored_km;
1405 		struct hdcp2_ake_send_hprime send_hprime;
1406 		struct hdcp2_ake_send_pairing_info pairing_info;
1407 	} msgs;
1408 	const struct intel_hdcp_shim *shim = hdcp->shim;
1409 	size_t size;
1410 	int ret;
1411 
1412 	/* Init for seq_num */
1413 	hdcp->seq_num_v = 0;
1414 	hdcp->seq_num_m = 0;
1415 
1416 	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1417 	if (ret < 0)
1418 		return ret;
1419 
1420 	ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1421 				  sizeof(msgs.ake_init));
1422 	if (ret < 0)
1423 		return ret;
1424 
1425 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1426 				 &msgs.send_cert, sizeof(msgs.send_cert));
1427 	if (ret < 0)
1428 		return ret;
1429 
1430 	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1431 		drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1432 		return -EINVAL;
1433 	}
1434 
1435 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1436 
1437 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1438 					msgs.send_cert.cert_rx.receiver_id,
1439 					1) > 0) {
1440 		drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1441 		return -EPERM;
1442 	}
1443 
1444 	/*
1445 	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1446 	 * stored also.
1447 	 */
1448 	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1449 					      &hdcp->is_paired,
1450 					      &msgs.no_stored_km, &size);
1451 	if (ret < 0)
1452 		return ret;
1453 
1454 	ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1455 	if (ret < 0)
1456 		return ret;
1457 
1458 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1459 				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1460 	if (ret < 0)
1461 		return ret;
1462 
1463 	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1464 	if (ret < 0)
1465 		return ret;
1466 
1467 	if (!hdcp->is_paired) {
1468 		/* Pairing is required */
1469 		ret = shim->read_2_2_msg(dig_port,
1470 					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1471 					 &msgs.pairing_info,
1472 					 sizeof(msgs.pairing_info));
1473 		if (ret < 0)
1474 			return ret;
1475 
1476 		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1477 		if (ret < 0)
1478 			return ret;
1479 		hdcp->is_paired = true;
1480 	}
1481 
1482 	return 0;
1483 }
1484 
1485 static int hdcp2_locality_check(struct intel_connector *connector)
1486 {
1487 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1488 	struct intel_hdcp *hdcp = &connector->hdcp;
1489 	union {
1490 		struct hdcp2_lc_init lc_init;
1491 		struct hdcp2_lc_send_lprime send_lprime;
1492 	} msgs;
1493 	const struct intel_hdcp_shim *shim = hdcp->shim;
1494 	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1495 
1496 	for (i = 0; i < tries; i++) {
1497 		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1498 		if (ret < 0)
1499 			continue;
1500 
1501 		ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1502 				      sizeof(msgs.lc_init));
1503 		if (ret < 0)
1504 			continue;
1505 
1506 		ret = shim->read_2_2_msg(dig_port,
1507 					 HDCP_2_2_LC_SEND_LPRIME,
1508 					 &msgs.send_lprime,
1509 					 sizeof(msgs.send_lprime));
1510 		if (ret < 0)
1511 			continue;
1512 
1513 		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1514 		if (!ret)
1515 			break;
1516 	}
1517 
1518 	return ret;
1519 }
1520 
1521 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1522 {
1523 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1524 	struct intel_hdcp *hdcp = &connector->hdcp;
1525 	struct hdcp2_ske_send_eks send_eks;
1526 	int ret;
1527 
1528 	ret = hdcp2_prepare_skey(connector, &send_eks);
1529 	if (ret < 0)
1530 		return ret;
1531 
1532 	ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1533 					sizeof(send_eks));
1534 	if (ret < 0)
1535 		return ret;
1536 
1537 	return 0;
1538 }
1539 
1540 static
1541 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1542 {
1543 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1544 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1545 	struct intel_hdcp *hdcp = &connector->hdcp;
1546 	union {
1547 		struct hdcp2_rep_stream_manage stream_manage;
1548 		struct hdcp2_rep_stream_ready stream_ready;
1549 	} msgs;
1550 	const struct intel_hdcp_shim *shim = hdcp->shim;
1551 	int ret, streams_size_delta, i;
1552 
1553 	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1554 		return -ERANGE;
1555 
1556 	/* Prepare RepeaterAuth_Stream_Manage msg */
1557 	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1558 	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1559 
1560 	msgs.stream_manage.k = cpu_to_be16(data->k);
1561 
1562 	for (i = 0; i < data->k; i++) {
1563 		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1564 		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1565 	}
1566 
1567 	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1568 				sizeof(struct hdcp2_streamid_type);
1569 	/* Send it to Repeater */
1570 	ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1571 				  sizeof(msgs.stream_manage) - streams_size_delta);
1572 	if (ret < 0)
1573 		goto out;
1574 
1575 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1576 				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1577 	if (ret < 0)
1578 		goto out;
1579 
1580 	data->seq_num_m = hdcp->seq_num_m;
1581 
1582 	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1583 
1584 out:
1585 	hdcp->seq_num_m++;
1586 
1587 	return ret;
1588 }
1589 
1590 static
1591 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1592 {
1593 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1594 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1595 	struct intel_hdcp *hdcp = &connector->hdcp;
1596 	union {
1597 		struct hdcp2_rep_send_receiverid_list recvid_list;
1598 		struct hdcp2_rep_send_ack rep_ack;
1599 	} msgs;
1600 	const struct intel_hdcp_shim *shim = hdcp->shim;
1601 	u32 seq_num_v, device_cnt;
1602 	u8 *rx_info;
1603 	int ret;
1604 
1605 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1606 				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1607 	if (ret < 0)
1608 		return ret;
1609 
1610 	rx_info = msgs.recvid_list.rx_info;
1611 
1612 	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1613 	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1614 		drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
1615 		return -EINVAL;
1616 	}
1617 
1618 	/* Converting and Storing the seq_num_v to local variable as DWORD */
1619 	seq_num_v =
1620 		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1621 
1622 	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1623 		drm_dbg_kms(&dev_priv->drm,
1624 			    "Non zero Seq_num_v at first RecvId_List msg\n");
1625 		return -EINVAL;
1626 	}
1627 
1628 	if (seq_num_v < hdcp->seq_num_v) {
1629 		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1630 		drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1631 		return -EINVAL;
1632 	}
1633 
1634 	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1635 		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1636 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1637 					msgs.recvid_list.receiver_ids,
1638 					device_cnt) > 0) {
1639 		drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1640 		return -EPERM;
1641 	}
1642 
1643 	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1644 						    &msgs.recvid_list,
1645 						    &msgs.rep_ack);
1646 	if (ret < 0)
1647 		return ret;
1648 
1649 	hdcp->seq_num_v = seq_num_v;
1650 	ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1651 				  sizeof(msgs.rep_ack));
1652 	if (ret < 0)
1653 		return ret;
1654 
1655 	return 0;
1656 }
1657 
1658 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1659 {
1660 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1661 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1662 	struct intel_hdcp *hdcp = &connector->hdcp;
1663 	const struct intel_hdcp_shim *shim = hdcp->shim;
1664 	int ret;
1665 
1666 	ret = hdcp2_authentication_key_exchange(connector);
1667 	if (ret < 0) {
1668 		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1669 		return ret;
1670 	}
1671 
1672 	ret = hdcp2_locality_check(connector);
1673 	if (ret < 0) {
1674 		drm_dbg_kms(&i915->drm,
1675 			    "Locality Check failed. Err : %d\n", ret);
1676 		return ret;
1677 	}
1678 
1679 	ret = hdcp2_session_key_exchange(connector);
1680 	if (ret < 0) {
1681 		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1682 		return ret;
1683 	}
1684 
1685 	if (shim->config_stream_type) {
1686 		ret = shim->config_stream_type(dig_port,
1687 					       hdcp->is_repeater,
1688 					       hdcp->content_type);
1689 		if (ret < 0)
1690 			return ret;
1691 	}
1692 
1693 	if (hdcp->is_repeater) {
1694 		ret = hdcp2_authenticate_repeater_topology(connector);
1695 		if (ret < 0) {
1696 			drm_dbg_kms(&i915->drm,
1697 				    "Repeater Auth Failed. Err: %d\n", ret);
1698 			return ret;
1699 		}
1700 	}
1701 
1702 	return ret;
1703 }
1704 
1705 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1706 {
1707 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1708 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1709 	struct intel_hdcp *hdcp = &connector->hdcp;
1710 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1711 	enum port port = dig_port->base.port;
1712 	int ret = 0;
1713 
1714 	if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1715 			    LINK_ENCRYPTION_STATUS)) {
1716 		drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
1717 			connector->base.name, connector->base.base.id);
1718 		return -EPERM;
1719 	}
1720 
1721 	if (hdcp->shim->stream_2_2_encryption) {
1722 		ret = hdcp->shim->stream_2_2_encryption(connector, true);
1723 		if (ret) {
1724 			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
1725 				connector->base.name, connector->base.base.id);
1726 			return ret;
1727 		}
1728 		drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1729 			    transcoder_name(hdcp->stream_transcoder));
1730 	}
1731 
1732 	return ret;
1733 }
1734 
1735 static int hdcp2_enable_encryption(struct intel_connector *connector)
1736 {
1737 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1738 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1739 	struct intel_hdcp *hdcp = &connector->hdcp;
1740 	enum port port = dig_port->base.port;
1741 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1742 	int ret;
1743 
1744 	drm_WARN_ON(&dev_priv->drm,
1745 		    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1746 		    LINK_ENCRYPTION_STATUS);
1747 	if (hdcp->shim->toggle_signalling) {
1748 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1749 						    true);
1750 		if (ret) {
1751 			drm_err(&dev_priv->drm,
1752 				"Failed to enable HDCP signalling. %d\n",
1753 				ret);
1754 			return ret;
1755 		}
1756 	}
1757 
1758 	if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1759 	    LINK_AUTH_STATUS) {
1760 		/* Link is Authenticated. Now set for Encryption */
1761 		intel_de_write(dev_priv,
1762 			       HDCP2_CTL(dev_priv, cpu_transcoder, port),
1763 			       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
1764 	}
1765 
1766 	ret = intel_de_wait_for_set(dev_priv,
1767 				    HDCP2_STATUS(dev_priv, cpu_transcoder,
1768 						 port),
1769 				    LINK_ENCRYPTION_STATUS,
1770 				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1771 	dig_port->hdcp_auth_status = true;
1772 
1773 	return ret;
1774 }
1775 
1776 static int hdcp2_disable_encryption(struct intel_connector *connector)
1777 {
1778 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1779 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1780 	struct intel_hdcp *hdcp = &connector->hdcp;
1781 	enum port port = dig_port->base.port;
1782 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1783 	int ret;
1784 
1785 	drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1786 				      LINK_ENCRYPTION_STATUS));
1787 
1788 	intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1789 		       intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
1790 
1791 	ret = intel_de_wait_for_clear(dev_priv,
1792 				      HDCP2_STATUS(dev_priv, cpu_transcoder,
1793 						   port),
1794 				      LINK_ENCRYPTION_STATUS,
1795 				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1796 	if (ret == -ETIMEDOUT)
1797 		drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1798 
1799 	if (hdcp->shim->toggle_signalling) {
1800 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1801 						    false);
1802 		if (ret) {
1803 			drm_err(&dev_priv->drm,
1804 				"Failed to disable HDCP signalling. %d\n",
1805 				ret);
1806 			return ret;
1807 		}
1808 	}
1809 
1810 	return ret;
1811 }
1812 
1813 static int
1814 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1815 {
1816 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1817 	int i, tries = 3, ret;
1818 
1819 	if (!connector->hdcp.is_repeater)
1820 		return 0;
1821 
1822 	for (i = 0; i < tries; i++) {
1823 		ret = _hdcp2_propagate_stream_management_info(connector);
1824 		if (!ret)
1825 			break;
1826 
1827 		/* Lets restart the auth incase of seq_num_m roll over */
1828 		if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1829 			drm_dbg_kms(&i915->drm,
1830 				    "seq_num_m roll over.(%d)\n", ret);
1831 			break;
1832 		}
1833 
1834 		drm_dbg_kms(&i915->drm,
1835 			    "HDCP2 stream management %d of %d Failed.(%d)\n",
1836 			    i + 1, tries, ret);
1837 	}
1838 
1839 	return ret;
1840 }
1841 
1842 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1843 {
1844 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1845 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1846 	int ret = 0, i, tries = 3;
1847 
1848 	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1849 		ret = hdcp2_authenticate_sink(connector);
1850 		if (!ret) {
1851 			ret = hdcp2_propagate_stream_management_info(connector);
1852 			if (ret) {
1853 				drm_dbg_kms(&i915->drm,
1854 					    "Stream management failed.(%d)\n",
1855 					    ret);
1856 				break;
1857 			}
1858 
1859 			ret = hdcp2_authenticate_port(connector);
1860 			if (!ret)
1861 				break;
1862 			drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1863 				    ret);
1864 		}
1865 
1866 		/* Clearing the mei hdcp session */
1867 		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1868 			    i + 1, tries, ret);
1869 		if (hdcp2_deauthenticate_port(connector) < 0)
1870 			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1871 	}
1872 
1873 	if (!ret && !dig_port->hdcp_auth_status) {
1874 		/*
1875 		 * Ensuring the required 200mSec min time interval between
1876 		 * Session Key Exchange and encryption.
1877 		 */
1878 		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1879 		ret = hdcp2_enable_encryption(connector);
1880 		if (ret < 0) {
1881 			drm_dbg_kms(&i915->drm,
1882 				    "Encryption Enable Failed.(%d)\n", ret);
1883 			if (hdcp2_deauthenticate_port(connector) < 0)
1884 				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1885 		}
1886 	}
1887 
1888 	ret = hdcp2_enable_stream_encryption(connector);
1889 
1890 	return ret;
1891 }
1892 
1893 static int _intel_hdcp2_enable(struct intel_connector *connector)
1894 {
1895 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1896 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1897 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1898 	struct intel_hdcp *hdcp = &connector->hdcp;
1899 	int ret;
1900 
1901 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1902 		    connector->base.name, connector->base.base.id,
1903 		    hdcp->content_type);
1904 
1905 	/* Stream which requires encryption */
1906 	if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
1907 		data->k = 1;
1908 		data->streams[0].stream_type = hdcp->content_type;
1909 	} else {
1910 		ret = intel_hdcp_required_content_stream(dig_port);
1911 		if (ret)
1912 			return ret;
1913 	}
1914 
1915 	ret = hdcp2_authenticate_and_encrypt(connector);
1916 	if (ret) {
1917 		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1918 			    hdcp->content_type, ret);
1919 		return ret;
1920 	}
1921 
1922 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1923 		    connector->base.name, connector->base.base.id,
1924 		    hdcp->content_type);
1925 
1926 	hdcp->hdcp2_encrypted = true;
1927 	return 0;
1928 }
1929 
1930 static int _intel_hdcp2_disable(struct intel_connector *connector)
1931 {
1932 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1933 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1934 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1935 	struct intel_hdcp *hdcp = &connector->hdcp;
1936 	int ret;
1937 
1938 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1939 		    connector->base.name, connector->base.base.id);
1940 
1941 	if (hdcp->shim->stream_2_2_encryption) {
1942 		ret = hdcp->shim->stream_2_2_encryption(connector, false);
1943 		if (ret) {
1944 			drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
1945 				connector->base.name, connector->base.base.id);
1946 			return ret;
1947 		}
1948 		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
1949 			    transcoder_name(hdcp->stream_transcoder));
1950 
1951 		if (dig_port->num_hdcp_streams > 0)
1952 			return 0;
1953 	}
1954 
1955 	ret = hdcp2_disable_encryption(connector);
1956 
1957 	if (hdcp2_deauthenticate_port(connector) < 0)
1958 		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1959 
1960 	connector->hdcp.hdcp2_encrypted = false;
1961 	dig_port->hdcp_auth_status = false;
1962 	data->k = 0;
1963 
1964 	return ret;
1965 }
1966 
1967 /* Implements the Link Integrity Check for HDCP2.2 */
1968 static int intel_hdcp2_check_link(struct intel_connector *connector)
1969 {
1970 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1971 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1972 	struct intel_hdcp *hdcp = &connector->hdcp;
1973 	enum port port = dig_port->base.port;
1974 	enum transcoder cpu_transcoder;
1975 	int ret = 0;
1976 
1977 	mutex_lock(&hdcp->mutex);
1978 	mutex_lock(&dig_port->hdcp_mutex);
1979 	cpu_transcoder = hdcp->cpu_transcoder;
1980 
1981 	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1982 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1983 	    !hdcp->hdcp2_encrypted) {
1984 		ret = -EINVAL;
1985 		goto out;
1986 	}
1987 
1988 	if (drm_WARN_ON(&dev_priv->drm,
1989 			!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
1990 		drm_err(&dev_priv->drm,
1991 			"HDCP2.2 link stopped the encryption, %x\n",
1992 			intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
1993 		ret = -ENXIO;
1994 		intel_hdcp_update_value(connector,
1995 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1996 					true);
1997 		goto out;
1998 	}
1999 
2000 	ret = hdcp->shim->check_2_2_link(dig_port, connector);
2001 	if (ret == HDCP_LINK_PROTECTED) {
2002 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2003 			intel_hdcp_update_value(connector,
2004 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2005 					true);
2006 		}
2007 		goto out;
2008 	}
2009 
2010 	if (ret == HDCP_TOPOLOGY_CHANGE) {
2011 		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2012 			goto out;
2013 
2014 		drm_dbg_kms(&dev_priv->drm,
2015 			    "HDCP2.2 Downstream topology change\n");
2016 		ret = hdcp2_authenticate_repeater_topology(connector);
2017 		if (!ret) {
2018 			intel_hdcp_update_value(connector,
2019 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2020 					true);
2021 			goto out;
2022 		}
2023 		drm_dbg_kms(&dev_priv->drm,
2024 			    "[%s:%d] Repeater topology auth failed.(%d)\n",
2025 			    connector->base.name, connector->base.base.id,
2026 			    ret);
2027 	} else {
2028 		drm_dbg_kms(&dev_priv->drm,
2029 			    "[%s:%d] HDCP2.2 link failed, retrying auth\n",
2030 			    connector->base.name, connector->base.base.id);
2031 	}
2032 
2033 	ret = _intel_hdcp2_disable(connector);
2034 	if (ret) {
2035 		drm_err(&dev_priv->drm,
2036 			"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
2037 			connector->base.name, connector->base.base.id, ret);
2038 		intel_hdcp_update_value(connector,
2039 				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2040 		goto out;
2041 	}
2042 
2043 	ret = _intel_hdcp2_enable(connector);
2044 	if (ret) {
2045 		drm_dbg_kms(&dev_priv->drm,
2046 			    "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
2047 			    connector->base.name, connector->base.base.id,
2048 			    ret);
2049 		intel_hdcp_update_value(connector,
2050 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2051 					true);
2052 		goto out;
2053 	}
2054 
2055 out:
2056 	mutex_unlock(&dig_port->hdcp_mutex);
2057 	mutex_unlock(&hdcp->mutex);
2058 	return ret;
2059 }
2060 
2061 static void intel_hdcp_check_work(struct work_struct *work)
2062 {
2063 	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2064 					       struct intel_hdcp,
2065 					       check_work);
2066 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2067 
2068 	if (drm_connector_is_unregistered(&connector->base))
2069 		return;
2070 
2071 	if (!intel_hdcp2_check_link(connector))
2072 		schedule_delayed_work(&hdcp->check_work,
2073 				      DRM_HDCP2_CHECK_PERIOD_MS);
2074 	else if (!intel_hdcp_check_link(connector))
2075 		schedule_delayed_work(&hdcp->check_work,
2076 				      DRM_HDCP_CHECK_PERIOD_MS);
2077 }
2078 
2079 static int i915_hdcp_component_bind(struct device *i915_kdev,
2080 				    struct device *mei_kdev, void *data)
2081 {
2082 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2083 
2084 	drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
2085 	mutex_lock(&dev_priv->hdcp_comp_mutex);
2086 	dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
2087 	dev_priv->hdcp_master->mei_dev = mei_kdev;
2088 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2089 
2090 	return 0;
2091 }
2092 
2093 static void i915_hdcp_component_unbind(struct device *i915_kdev,
2094 				       struct device *mei_kdev, void *data)
2095 {
2096 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2097 
2098 	drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
2099 	mutex_lock(&dev_priv->hdcp_comp_mutex);
2100 	dev_priv->hdcp_master = NULL;
2101 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2102 }
2103 
2104 static const struct component_ops i915_hdcp_component_ops = {
2105 	.bind   = i915_hdcp_component_bind,
2106 	.unbind = i915_hdcp_component_unbind,
2107 };
2108 
2109 static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
2110 {
2111 	switch (port) {
2112 	case PORT_A:
2113 		return MEI_DDI_A;
2114 	case PORT_B ... PORT_F:
2115 		return (enum mei_fw_ddi)port;
2116 	default:
2117 		return MEI_DDI_INVALID_PORT;
2118 	}
2119 }
2120 
2121 static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
2122 {
2123 	switch (cpu_transcoder) {
2124 	case TRANSCODER_A ... TRANSCODER_D:
2125 		return (enum mei_fw_tc)(cpu_transcoder | 0x10);
2126 	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2127 		return MEI_INVALID_TRANSCODER;
2128 	}
2129 }
2130 
2131 static int initialize_hdcp_port_data(struct intel_connector *connector,
2132 				     struct intel_digital_port *dig_port,
2133 				     const struct intel_hdcp_shim *shim)
2134 {
2135 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2136 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2137 	struct intel_hdcp *hdcp = &connector->hdcp;
2138 	enum port port = dig_port->base.port;
2139 
2140 	if (INTEL_GEN(dev_priv) < 12)
2141 		data->fw_ddi = intel_get_mei_fw_ddi_index(port);
2142 	else
2143 		/*
2144 		 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
2145 		 * with zero(INVALID PORT index).
2146 		 */
2147 		data->fw_ddi = MEI_DDI_INVALID_PORT;
2148 
2149 	/*
2150 	 * As associated transcoder is set and modified at modeset, here fw_tc
2151 	 * is initialized to zero (invalid transcoder index). This will be
2152 	 * retained for <Gen12 forever.
2153 	 */
2154 	data->fw_tc = MEI_INVALID_TRANSCODER;
2155 
2156 	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2157 	data->protocol = (u8)shim->protocol;
2158 
2159 	if (!data->streams)
2160 		data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
2161 					sizeof(struct hdcp2_streamid_type),
2162 					GFP_KERNEL);
2163 	if (!data->streams) {
2164 		drm_err(&dev_priv->drm, "Out of Memory\n");
2165 		return -ENOMEM;
2166 	}
2167 	/* For SST */
2168 	data->streams[0].stream_id = 0;
2169 	data->streams[0].stream_type = hdcp->content_type;
2170 
2171 	return 0;
2172 }
2173 
2174 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
2175 {
2176 	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2177 		return false;
2178 
2179 	return (INTEL_GEN(dev_priv) >= 10 ||
2180 		IS_GEMINILAKE(dev_priv) ||
2181 		IS_KABYLAKE(dev_priv) ||
2182 		IS_COFFEELAKE(dev_priv) ||
2183 		IS_COMETLAKE(dev_priv));
2184 }
2185 
2186 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
2187 {
2188 	int ret;
2189 
2190 	if (!is_hdcp2_supported(dev_priv))
2191 		return;
2192 
2193 	mutex_lock(&dev_priv->hdcp_comp_mutex);
2194 	drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
2195 
2196 	dev_priv->hdcp_comp_added = true;
2197 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2198 	ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
2199 				  I915_COMPONENT_HDCP);
2200 	if (ret < 0) {
2201 		drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
2202 			    ret);
2203 		mutex_lock(&dev_priv->hdcp_comp_mutex);
2204 		dev_priv->hdcp_comp_added = false;
2205 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
2206 		return;
2207 	}
2208 }
2209 
2210 static void intel_hdcp2_init(struct intel_connector *connector,
2211 			     struct intel_digital_port *dig_port,
2212 			     const struct intel_hdcp_shim *shim)
2213 {
2214 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2215 	struct intel_hdcp *hdcp = &connector->hdcp;
2216 	int ret;
2217 
2218 	ret = initialize_hdcp_port_data(connector, dig_port, shim);
2219 	if (ret) {
2220 		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2221 		return;
2222 	}
2223 
2224 	hdcp->hdcp2_supported = true;
2225 }
2226 
2227 int intel_hdcp_init(struct intel_connector *connector,
2228 		    struct intel_digital_port *dig_port,
2229 		    const struct intel_hdcp_shim *shim)
2230 {
2231 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2232 	struct intel_hdcp *hdcp = &connector->hdcp;
2233 	int ret;
2234 
2235 	if (!shim)
2236 		return -EINVAL;
2237 
2238 	if (is_hdcp2_supported(dev_priv))
2239 		intel_hdcp2_init(connector, dig_port, shim);
2240 
2241 	ret =
2242 	drm_connector_attach_content_protection_property(&connector->base,
2243 							 hdcp->hdcp2_supported);
2244 	if (ret) {
2245 		hdcp->hdcp2_supported = false;
2246 		kfree(dig_port->hdcp_port_data.streams);
2247 		return ret;
2248 	}
2249 
2250 	hdcp->shim = shim;
2251 	mutex_init(&hdcp->mutex);
2252 	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2253 	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2254 	init_waitqueue_head(&hdcp->cp_irq_queue);
2255 
2256 	return 0;
2257 }
2258 
2259 int intel_hdcp_enable(struct intel_connector *connector,
2260 		      const struct intel_crtc_state *pipe_config, u8 content_type)
2261 {
2262 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2263 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2264 	struct intel_hdcp *hdcp = &connector->hdcp;
2265 	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2266 	int ret = -EINVAL;
2267 
2268 	if (!hdcp->shim)
2269 		return -ENOENT;
2270 
2271 	if (!connector->encoder) {
2272 		drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
2273 			connector->base.name, connector->base.base.id);
2274 		return -ENODEV;
2275 	}
2276 
2277 	mutex_lock(&hdcp->mutex);
2278 	mutex_lock(&dig_port->hdcp_mutex);
2279 	drm_WARN_ON(&dev_priv->drm,
2280 		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2281 	hdcp->content_type = content_type;
2282 
2283 	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2284 		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2285 		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2286 	} else {
2287 		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2288 		hdcp->stream_transcoder = INVALID_TRANSCODER;
2289 	}
2290 
2291 	if (INTEL_GEN(dev_priv) >= 12)
2292 		dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
2293 
2294 	/*
2295 	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2296 	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2297 	 */
2298 	if (intel_hdcp2_capable(connector)) {
2299 		ret = _intel_hdcp2_enable(connector);
2300 		if (!ret)
2301 			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2302 	}
2303 
2304 	/*
2305 	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2306 	 * be attempted.
2307 	 */
2308 	if (ret && intel_hdcp_capable(connector) &&
2309 	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2310 		ret = _intel_hdcp_enable(connector);
2311 	}
2312 
2313 	if (!ret) {
2314 		schedule_delayed_work(&hdcp->check_work, check_link_interval);
2315 		intel_hdcp_update_value(connector,
2316 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2317 					true);
2318 	}
2319 
2320 	mutex_unlock(&dig_port->hdcp_mutex);
2321 	mutex_unlock(&hdcp->mutex);
2322 	return ret;
2323 }
2324 
2325 int intel_hdcp_disable(struct intel_connector *connector)
2326 {
2327 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2328 	struct intel_hdcp *hdcp = &connector->hdcp;
2329 	int ret = 0;
2330 
2331 	if (!hdcp->shim)
2332 		return -ENOENT;
2333 
2334 	mutex_lock(&hdcp->mutex);
2335 	mutex_lock(&dig_port->hdcp_mutex);
2336 
2337 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2338 		goto out;
2339 
2340 	intel_hdcp_update_value(connector,
2341 				DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2342 	if (hdcp->hdcp2_encrypted)
2343 		ret = _intel_hdcp2_disable(connector);
2344 	else if (hdcp->hdcp_encrypted)
2345 		ret = _intel_hdcp_disable(connector);
2346 
2347 out:
2348 	mutex_unlock(&dig_port->hdcp_mutex);
2349 	mutex_unlock(&hdcp->mutex);
2350 	cancel_delayed_work_sync(&hdcp->check_work);
2351 	return ret;
2352 }
2353 
2354 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2355 			    struct intel_encoder *encoder,
2356 			    const struct intel_crtc_state *crtc_state,
2357 			    const struct drm_connector_state *conn_state)
2358 {
2359 	struct intel_connector *connector =
2360 				to_intel_connector(conn_state->connector);
2361 	struct intel_hdcp *hdcp = &connector->hdcp;
2362 	bool content_protection_type_changed, desired_and_not_enabled = false;
2363 
2364 	if (!connector->hdcp.shim)
2365 		return;
2366 
2367 	content_protection_type_changed =
2368 		(conn_state->hdcp_content_type != hdcp->content_type &&
2369 		 conn_state->content_protection !=
2370 		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2371 
2372 	/*
2373 	 * During the HDCP encryption session if Type change is requested,
2374 	 * disable the HDCP and reenable it with new TYPE value.
2375 	 */
2376 	if (conn_state->content_protection ==
2377 	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2378 	    content_protection_type_changed)
2379 		intel_hdcp_disable(connector);
2380 
2381 	/*
2382 	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2383 	 * change procedure.
2384 	 */
2385 	if (content_protection_type_changed) {
2386 		mutex_lock(&hdcp->mutex);
2387 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2388 		drm_connector_get(&connector->base);
2389 		schedule_work(&hdcp->prop_work);
2390 		mutex_unlock(&hdcp->mutex);
2391 	}
2392 
2393 	if (conn_state->content_protection ==
2394 	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2395 		mutex_lock(&hdcp->mutex);
2396 		/* Avoid enabling hdcp, if it already ENABLED */
2397 		desired_and_not_enabled =
2398 			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2399 		mutex_unlock(&hdcp->mutex);
2400 		/*
2401 		 * If HDCP already ENABLED and CP property is DESIRED, schedule
2402 		 * prop_work to update correct CP property to user space.
2403 		 */
2404 		if (!desired_and_not_enabled && !content_protection_type_changed) {
2405 			drm_connector_get(&connector->base);
2406 			schedule_work(&hdcp->prop_work);
2407 		}
2408 	}
2409 
2410 	if (desired_and_not_enabled || content_protection_type_changed)
2411 		intel_hdcp_enable(connector,
2412 				  crtc_state,
2413 				  (u8)conn_state->hdcp_content_type);
2414 }
2415 
2416 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2417 {
2418 	mutex_lock(&dev_priv->hdcp_comp_mutex);
2419 	if (!dev_priv->hdcp_comp_added) {
2420 		mutex_unlock(&dev_priv->hdcp_comp_mutex);
2421 		return;
2422 	}
2423 
2424 	dev_priv->hdcp_comp_added = false;
2425 	mutex_unlock(&dev_priv->hdcp_comp_mutex);
2426 
2427 	component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2428 }
2429 
2430 void intel_hdcp_cleanup(struct intel_connector *connector)
2431 {
2432 	struct intel_hdcp *hdcp = &connector->hdcp;
2433 
2434 	if (!hdcp->shim)
2435 		return;
2436 
2437 	/*
2438 	 * If the connector is registered, it's possible userspace could kick
2439 	 * off another HDCP enable, which would re-spawn the workers.
2440 	 */
2441 	drm_WARN_ON(connector->base.dev,
2442 		connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2443 
2444 	/*
2445 	 * Now that the connector is not registered, check_work won't be run,
2446 	 * but cancel any outstanding instances of it
2447 	 */
2448 	cancel_delayed_work_sync(&hdcp->check_work);
2449 
2450 	/*
2451 	 * We don't cancel prop_work in the same way as check_work since it
2452 	 * requires connection_mutex which could be held while calling this
2453 	 * function. Instead, we rely on the connector references grabbed before
2454 	 * scheduling prop_work to ensure the connector is alive when prop_work
2455 	 * is run. So if we're in the destroy path (which is where this
2456 	 * function should be called), we're "guaranteed" that prop_work is not
2457 	 * active (tl;dr This Should Never Happen).
2458 	 */
2459 	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2460 
2461 	mutex_lock(&hdcp->mutex);
2462 	hdcp->shim = NULL;
2463 	mutex_unlock(&hdcp->mutex);
2464 }
2465 
2466 void intel_hdcp_atomic_check(struct drm_connector *connector,
2467 			     struct drm_connector_state *old_state,
2468 			     struct drm_connector_state *new_state)
2469 {
2470 	u64 old_cp = old_state->content_protection;
2471 	u64 new_cp = new_state->content_protection;
2472 	struct drm_crtc_state *crtc_state;
2473 
2474 	if (!new_state->crtc) {
2475 		/*
2476 		 * If the connector is being disabled with CP enabled, mark it
2477 		 * desired so it's re-enabled when the connector is brought back
2478 		 */
2479 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2480 			new_state->content_protection =
2481 				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2482 		return;
2483 	}
2484 
2485 	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2486 						   new_state->crtc);
2487 	/*
2488 	 * Fix the HDCP uapi content protection state in case of modeset.
2489 	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2490 	 * need to be sent if there is transition from ENABLED->DESIRED.
2491 	 */
2492 	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2493 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2494 	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2495 		new_state->content_protection =
2496 			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2497 
2498 	/*
2499 	 * Nothing to do if the state didn't change, or HDCP was activated since
2500 	 * the last commit. And also no change in hdcp content type.
2501 	 */
2502 	if (old_cp == new_cp ||
2503 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2504 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2505 		if (old_state->hdcp_content_type ==
2506 				new_state->hdcp_content_type)
2507 			return;
2508 	}
2509 
2510 	crtc_state->mode_changed = true;
2511 }
2512 
2513 /* Handles the CP_IRQ raised from the DP HDCP sink */
2514 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2515 {
2516 	struct intel_hdcp *hdcp = &connector->hdcp;
2517 
2518 	if (!hdcp->shim)
2519 		return;
2520 
2521 	atomic_inc(&connector->hdcp.cp_irq_count);
2522 	wake_up_all(&connector->hdcp.cp_irq_queue);
2523 
2524 	schedule_delayed_work(&hdcp->check_work, 0);
2525 }
2526