1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10 
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14 
15 #include <drm/display/drm_hdcp_helper.h>
16 #include <drm/i915_component.h>
17 
18 #include "i915_drv.h"
19 #include "i915_reg.h"
20 #include "intel_connector.h"
21 #include "intel_de.h"
22 #include "intel_display_power.h"
23 #include "intel_display_power_well.h"
24 #include "intel_display_types.h"
25 #include "intel_hdcp.h"
26 #include "intel_hdcp_gsc.h"
27 #include "intel_hdcp_regs.h"
28 #include "intel_pcode.h"
29 
30 #define KEY_LOAD_TRIES	5
31 #define HDCP2_LC_RETRY_CNT			3
32 
33 static int intel_conn_to_vcpi(struct intel_connector *connector)
34 {
35 	struct drm_dp_mst_topology_mgr *mgr;
36 	struct drm_dp_mst_atomic_payload *payload;
37 	struct drm_dp_mst_topology_state *mst_state;
38 	int vcpi = 0;
39 
40 	/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
41 	if (!connector->port)
42 		return 0;
43 	mgr = connector->port->mgr;
44 
45 	drm_modeset_lock(&mgr->base.lock, NULL);
46 	mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
47 	payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
48 	if (drm_WARN_ON(mgr->dev, !payload))
49 		goto out;
50 
51 	vcpi = payload->vcpi;
52 	if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
53 		vcpi = 0;
54 		goto out;
55 	}
56 out:
57 	drm_modeset_unlock(&mgr->base.lock);
58 	return vcpi;
59 }
60 
61 /*
62  * intel_hdcp_required_content_stream selects the most highest common possible HDCP
63  * content_type for all streams in DP MST topology because security f/w doesn't
64  * have any provision to mark content_type for each stream separately, it marks
65  * all available streams with the content_type proivided at the time of port
66  * authentication. This may prohibit the userspace to use type1 content on
67  * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
68  * DP MST topology. Though it is not compulsory, security fw should change its
69  * policy to mark different content_types for different streams.
70  */
71 static int
72 intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
73 {
74 	struct drm_connector_list_iter conn_iter;
75 	struct intel_digital_port *conn_dig_port;
76 	struct intel_connector *connector;
77 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
78 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
79 	bool enforce_type0 = false;
80 	int k;
81 
82 	data->k = 0;
83 
84 	if (dig_port->hdcp_auth_status)
85 		return 0;
86 
87 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
88 	for_each_intel_connector_iter(connector, &conn_iter) {
89 		if (connector->base.status == connector_status_disconnected)
90 			continue;
91 
92 		if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
93 			continue;
94 
95 		conn_dig_port = intel_attached_dig_port(connector);
96 		if (conn_dig_port != dig_port)
97 			continue;
98 
99 		if (!enforce_type0 && !dig_port->hdcp_mst_type1_capable)
100 			enforce_type0 = true;
101 
102 		data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
103 		data->k++;
104 
105 		/* if there is only one active stream */
106 		if (dig_port->dp.active_mst_links <= 1)
107 			break;
108 	}
109 	drm_connector_list_iter_end(&conn_iter);
110 
111 	if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
112 		return -EINVAL;
113 
114 	/*
115 	 * Apply common protection level across all streams in DP MST Topology.
116 	 * Use highest supported content type for all streams in DP MST Topology.
117 	 */
118 	for (k = 0; k < data->k; k++)
119 		data->streams[k].stream_type =
120 			enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
121 
122 	return 0;
123 }
124 
125 static int intel_hdcp_prepare_streams(struct intel_connector *connector)
126 {
127 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
128 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
129 	struct intel_hdcp *hdcp = &connector->hdcp;
130 	int ret;
131 
132 	if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
133 		data->k = 1;
134 		data->streams[0].stream_type = hdcp->content_type;
135 	} else {
136 		ret = intel_hdcp_required_content_stream(dig_port);
137 		if (ret)
138 			return ret;
139 	}
140 
141 	return 0;
142 }
143 
144 static
145 bool intel_hdcp_is_ksv_valid(u8 *ksv)
146 {
147 	int i, ones = 0;
148 	/* KSV has 20 1's and 20 0's */
149 	for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
150 		ones += hweight8(ksv[i]);
151 	if (ones != 20)
152 		return false;
153 
154 	return true;
155 }
156 
157 static
158 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
159 			       const struct intel_hdcp_shim *shim, u8 *bksv)
160 {
161 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
162 	int ret, i, tries = 2;
163 
164 	/* HDCP spec states that we must retry the bksv if it is invalid */
165 	for (i = 0; i < tries; i++) {
166 		ret = shim->read_bksv(dig_port, bksv);
167 		if (ret)
168 			return ret;
169 		if (intel_hdcp_is_ksv_valid(bksv))
170 			break;
171 	}
172 	if (i == tries) {
173 		drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
174 		return -ENODEV;
175 	}
176 
177 	return 0;
178 }
179 
180 /* Is HDCP1.4 capable on Platform and Sink */
181 bool intel_hdcp_capable(struct intel_connector *connector)
182 {
183 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
184 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
185 	bool capable = false;
186 	u8 bksv[5];
187 
188 	if (!shim)
189 		return capable;
190 
191 	if (shim->hdcp_capable) {
192 		shim->hdcp_capable(dig_port, &capable);
193 	} else {
194 		if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
195 			capable = true;
196 	}
197 
198 	return capable;
199 }
200 
201 /* Is HDCP2.2 capable on Platform and Sink */
202 bool intel_hdcp2_capable(struct intel_connector *connector)
203 {
204 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
205 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
206 	struct intel_hdcp *hdcp = &connector->hdcp;
207 	struct intel_gt *gt = dev_priv->media_gt;
208 	struct intel_gsc_uc *gsc = &gt->uc.gsc;
209 	bool capable = false;
210 
211 	/* I915 support for HDCP2.2 */
212 	if (!hdcp->hdcp2_supported)
213 		return false;
214 
215 	/* If MTL+ make sure gsc is loaded and proxy is setup */
216 	if (intel_hdcp_gsc_cs_required(dev_priv))
217 		if (!intel_uc_fw_is_running(&gsc->fw))
218 			return false;
219 
220 	/* MEI/GSC interface is solid depending on which is used */
221 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
222 	if (!dev_priv->display.hdcp.comp_added ||  !dev_priv->display.hdcp.master) {
223 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
224 		return false;
225 	}
226 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
227 
228 	/* Sink's capability for HDCP2.2 */
229 	hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
230 
231 	return capable;
232 }
233 
234 static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
235 			      enum transcoder cpu_transcoder, enum port port)
236 {
237 	return intel_de_read(dev_priv,
238 	                     HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
239 	       HDCP_STATUS_ENC;
240 }
241 
242 static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
243 			       enum transcoder cpu_transcoder, enum port port)
244 {
245 	return intel_de_read(dev_priv,
246 	                     HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
247 	       LINK_ENCRYPTION_STATUS;
248 }
249 
250 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
251 				    const struct intel_hdcp_shim *shim)
252 {
253 	int ret, read_ret;
254 	bool ksv_ready;
255 
256 	/* Poll for ksv list ready (spec says max time allowed is 5s) */
257 	ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
258 							 &ksv_ready),
259 			 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
260 			 100 * 1000);
261 	if (ret)
262 		return ret;
263 	if (read_ret)
264 		return read_ret;
265 	if (!ksv_ready)
266 		return -ETIMEDOUT;
267 
268 	return 0;
269 }
270 
271 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
272 {
273 	enum i915_power_well_id id;
274 	intel_wakeref_t wakeref;
275 	bool enabled = false;
276 
277 	/*
278 	 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
279 	 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
280 	 */
281 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
282 		id = HSW_DISP_PW_GLOBAL;
283 	else
284 		id = SKL_DISP_PW_1;
285 
286 	/* PG1 (power well #1) needs to be enabled */
287 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
288 		enabled = intel_display_power_well_is_enabled(dev_priv, id);
289 
290 	/*
291 	 * Another req for hdcp key loadability is enabled state of pll for
292 	 * cdclk. Without active crtc we wont land here. So we are assuming that
293 	 * cdclk is already on.
294 	 */
295 
296 	return enabled;
297 }
298 
299 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
300 {
301 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
302 	intel_de_write(dev_priv, HDCP_KEY_STATUS,
303 		       HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
304 }
305 
306 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
307 {
308 	int ret;
309 	u32 val;
310 
311 	val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
312 	if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
313 		return 0;
314 
315 	/*
316 	 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
317 	 * out of reset. So if Key is not already loaded, its an error state.
318 	 */
319 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
320 		if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
321 			return -ENXIO;
322 
323 	/*
324 	 * Initiate loading the HDCP key from fuses.
325 	 *
326 	 * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
327 	 * version 9 platforms (minus BXT) differ in the key load trigger
328 	 * process from other platforms. These platforms use the GT Driver
329 	 * Mailbox interface.
330 	 */
331 	if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
332 		ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
333 		if (ret) {
334 			drm_err(&dev_priv->drm,
335 				"Failed to initiate HDCP key load (%d)\n",
336 				ret);
337 			return ret;
338 		}
339 	} else {
340 		intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
341 	}
342 
343 	/* Wait for the keys to load (500us) */
344 	ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
345 					HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
346 					10, 1, &val);
347 	if (ret)
348 		return ret;
349 	else if (!(val & HDCP_KEY_LOAD_STATUS))
350 		return -ENXIO;
351 
352 	/* Send Aksv over to PCH display for use in authentication */
353 	intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
354 
355 	return 0;
356 }
357 
358 /* Returns updated SHA-1 index */
359 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
360 {
361 	intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
362 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
363 		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
364 		return -ETIMEDOUT;
365 	}
366 	return 0;
367 }
368 
369 static
370 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
371 				enum transcoder cpu_transcoder, enum port port)
372 {
373 	if (DISPLAY_VER(dev_priv) >= 12) {
374 		switch (cpu_transcoder) {
375 		case TRANSCODER_A:
376 			return HDCP_TRANSA_REP_PRESENT |
377 			       HDCP_TRANSA_SHA1_M0;
378 		case TRANSCODER_B:
379 			return HDCP_TRANSB_REP_PRESENT |
380 			       HDCP_TRANSB_SHA1_M0;
381 		case TRANSCODER_C:
382 			return HDCP_TRANSC_REP_PRESENT |
383 			       HDCP_TRANSC_SHA1_M0;
384 		case TRANSCODER_D:
385 			return HDCP_TRANSD_REP_PRESENT |
386 			       HDCP_TRANSD_SHA1_M0;
387 		default:
388 			drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
389 				cpu_transcoder);
390 			return -EINVAL;
391 		}
392 	}
393 
394 	switch (port) {
395 	case PORT_A:
396 		return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
397 	case PORT_B:
398 		return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
399 	case PORT_C:
400 		return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
401 	case PORT_D:
402 		return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
403 	case PORT_E:
404 		return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
405 	default:
406 		drm_err(&dev_priv->drm, "Unknown port %d\n", port);
407 		return -EINVAL;
408 	}
409 }
410 
411 static
412 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
413 				const struct intel_hdcp_shim *shim,
414 				u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
415 {
416 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
417 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
418 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
419 	enum port port = dig_port->base.port;
420 	u32 vprime, sha_text, sha_leftovers, rep_ctl;
421 	int ret, i, j, sha_idx;
422 
423 	/* Process V' values from the receiver */
424 	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
425 		ret = shim->read_v_prime_part(dig_port, i, &vprime);
426 		if (ret)
427 			return ret;
428 		intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
429 	}
430 
431 	/*
432 	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
433 	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
434 	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
435 	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
436 	 * index will keep track of our progress through the 64 bytes as well as
437 	 * helping us work the 40-bit KSVs through our 32-bit register.
438 	 *
439 	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
440 	 */
441 	sha_idx = 0;
442 	sha_text = 0;
443 	sha_leftovers = 0;
444 	rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
445 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
446 	for (i = 0; i < num_downstream; i++) {
447 		unsigned int sha_empty;
448 		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
449 
450 		/* Fill up the empty slots in sha_text and write it out */
451 		sha_empty = sizeof(sha_text) - sha_leftovers;
452 		for (j = 0; j < sha_empty; j++) {
453 			u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
454 			sha_text |= ksv[j] << off;
455 		}
456 
457 		ret = intel_write_sha_text(dev_priv, sha_text);
458 		if (ret < 0)
459 			return ret;
460 
461 		/* Programming guide writes this every 64 bytes */
462 		sha_idx += sizeof(sha_text);
463 		if (!(sha_idx % 64))
464 			intel_de_write(dev_priv, HDCP_REP_CTL,
465 				       rep_ctl | HDCP_SHA1_TEXT_32);
466 
467 		/* Store the leftover bytes from the ksv in sha_text */
468 		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
469 		sha_text = 0;
470 		for (j = 0; j < sha_leftovers; j++)
471 			sha_text |= ksv[sha_empty + j] <<
472 					((sizeof(sha_text) - j - 1) * 8);
473 
474 		/*
475 		 * If we still have room in sha_text for more data, continue.
476 		 * Otherwise, write it out immediately.
477 		 */
478 		if (sizeof(sha_text) > sha_leftovers)
479 			continue;
480 
481 		ret = intel_write_sha_text(dev_priv, sha_text);
482 		if (ret < 0)
483 			return ret;
484 		sha_leftovers = 0;
485 		sha_text = 0;
486 		sha_idx += sizeof(sha_text);
487 	}
488 
489 	/*
490 	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
491 	 * bytes are leftover from the last ksv, we might be able to fit them
492 	 * all in sha_text (first 2 cases), or we might need to split them up
493 	 * into 2 writes (last 2 cases).
494 	 */
495 	if (sha_leftovers == 0) {
496 		/* Write 16 bits of text, 16 bits of M0 */
497 		intel_de_write(dev_priv, HDCP_REP_CTL,
498 			       rep_ctl | HDCP_SHA1_TEXT_16);
499 		ret = intel_write_sha_text(dev_priv,
500 					   bstatus[0] << 8 | bstatus[1]);
501 		if (ret < 0)
502 			return ret;
503 		sha_idx += sizeof(sha_text);
504 
505 		/* Write 32 bits of M0 */
506 		intel_de_write(dev_priv, HDCP_REP_CTL,
507 			       rep_ctl | HDCP_SHA1_TEXT_0);
508 		ret = intel_write_sha_text(dev_priv, 0);
509 		if (ret < 0)
510 			return ret;
511 		sha_idx += sizeof(sha_text);
512 
513 		/* Write 16 bits of M0 */
514 		intel_de_write(dev_priv, HDCP_REP_CTL,
515 			       rep_ctl | HDCP_SHA1_TEXT_16);
516 		ret = intel_write_sha_text(dev_priv, 0);
517 		if (ret < 0)
518 			return ret;
519 		sha_idx += sizeof(sha_text);
520 
521 	} else if (sha_leftovers == 1) {
522 		/* Write 24 bits of text, 8 bits of M0 */
523 		intel_de_write(dev_priv, HDCP_REP_CTL,
524 			       rep_ctl | HDCP_SHA1_TEXT_24);
525 		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
526 		/* Only 24-bits of data, must be in the LSB */
527 		sha_text = (sha_text & 0xffffff00) >> 8;
528 		ret = intel_write_sha_text(dev_priv, sha_text);
529 		if (ret < 0)
530 			return ret;
531 		sha_idx += sizeof(sha_text);
532 
533 		/* Write 32 bits of M0 */
534 		intel_de_write(dev_priv, HDCP_REP_CTL,
535 			       rep_ctl | HDCP_SHA1_TEXT_0);
536 		ret = intel_write_sha_text(dev_priv, 0);
537 		if (ret < 0)
538 			return ret;
539 		sha_idx += sizeof(sha_text);
540 
541 		/* Write 24 bits of M0 */
542 		intel_de_write(dev_priv, HDCP_REP_CTL,
543 			       rep_ctl | HDCP_SHA1_TEXT_8);
544 		ret = intel_write_sha_text(dev_priv, 0);
545 		if (ret < 0)
546 			return ret;
547 		sha_idx += sizeof(sha_text);
548 
549 	} else if (sha_leftovers == 2) {
550 		/* Write 32 bits of text */
551 		intel_de_write(dev_priv, HDCP_REP_CTL,
552 			       rep_ctl | HDCP_SHA1_TEXT_32);
553 		sha_text |= bstatus[0] << 8 | bstatus[1];
554 		ret = intel_write_sha_text(dev_priv, sha_text);
555 		if (ret < 0)
556 			return ret;
557 		sha_idx += sizeof(sha_text);
558 
559 		/* Write 64 bits of M0 */
560 		intel_de_write(dev_priv, HDCP_REP_CTL,
561 			       rep_ctl | HDCP_SHA1_TEXT_0);
562 		for (i = 0; i < 2; i++) {
563 			ret = intel_write_sha_text(dev_priv, 0);
564 			if (ret < 0)
565 				return ret;
566 			sha_idx += sizeof(sha_text);
567 		}
568 
569 		/*
570 		 * Terminate the SHA-1 stream by hand. For the other leftover
571 		 * cases this is appended by the hardware.
572 		 */
573 		intel_de_write(dev_priv, HDCP_REP_CTL,
574 			       rep_ctl | HDCP_SHA1_TEXT_32);
575 		sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
576 		ret = intel_write_sha_text(dev_priv, sha_text);
577 		if (ret < 0)
578 			return ret;
579 		sha_idx += sizeof(sha_text);
580 	} else if (sha_leftovers == 3) {
581 		/* Write 32 bits of text (filled from LSB) */
582 		intel_de_write(dev_priv, HDCP_REP_CTL,
583 			       rep_ctl | HDCP_SHA1_TEXT_32);
584 		sha_text |= bstatus[0];
585 		ret = intel_write_sha_text(dev_priv, sha_text);
586 		if (ret < 0)
587 			return ret;
588 		sha_idx += sizeof(sha_text);
589 
590 		/* Write 8 bits of text (filled from LSB), 24 bits of M0 */
591 		intel_de_write(dev_priv, HDCP_REP_CTL,
592 			       rep_ctl | HDCP_SHA1_TEXT_8);
593 		ret = intel_write_sha_text(dev_priv, bstatus[1]);
594 		if (ret < 0)
595 			return ret;
596 		sha_idx += sizeof(sha_text);
597 
598 		/* Write 32 bits of M0 */
599 		intel_de_write(dev_priv, HDCP_REP_CTL,
600 			       rep_ctl | HDCP_SHA1_TEXT_0);
601 		ret = intel_write_sha_text(dev_priv, 0);
602 		if (ret < 0)
603 			return ret;
604 		sha_idx += sizeof(sha_text);
605 
606 		/* Write 8 bits of M0 */
607 		intel_de_write(dev_priv, HDCP_REP_CTL,
608 			       rep_ctl | HDCP_SHA1_TEXT_24);
609 		ret = intel_write_sha_text(dev_priv, 0);
610 		if (ret < 0)
611 			return ret;
612 		sha_idx += sizeof(sha_text);
613 	} else {
614 		drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
615 			    sha_leftovers);
616 		return -EINVAL;
617 	}
618 
619 	intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
620 	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
621 	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
622 		ret = intel_write_sha_text(dev_priv, 0);
623 		if (ret < 0)
624 			return ret;
625 		sha_idx += sizeof(sha_text);
626 	}
627 
628 	/*
629 	 * Last write gets the length of the concatenation in bits. That is:
630 	 *  - 5 bytes per device
631 	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
632 	 */
633 	sha_text = (num_downstream * 5 + 10) * 8;
634 	ret = intel_write_sha_text(dev_priv, sha_text);
635 	if (ret < 0)
636 		return ret;
637 
638 	/* Tell the HW we're done with the hash and wait for it to ACK */
639 	intel_de_write(dev_priv, HDCP_REP_CTL,
640 		       rep_ctl | HDCP_SHA1_COMPLETE_HASH);
641 	if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
642 				  HDCP_SHA1_COMPLETE, 1)) {
643 		drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
644 		return -ETIMEDOUT;
645 	}
646 	if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
647 		drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
648 		return -ENXIO;
649 	}
650 
651 	return 0;
652 }
653 
654 /* Implements Part 2 of the HDCP authorization procedure */
655 static
656 int intel_hdcp_auth_downstream(struct intel_connector *connector)
657 {
658 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
659 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
660 	const struct intel_hdcp_shim *shim = connector->hdcp.shim;
661 	u8 bstatus[2], num_downstream, *ksv_fifo;
662 	int ret, i, tries = 3;
663 
664 	ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
665 	if (ret) {
666 		drm_dbg_kms(&dev_priv->drm,
667 			    "KSV list failed to become ready (%d)\n", ret);
668 		return ret;
669 	}
670 
671 	ret = shim->read_bstatus(dig_port, bstatus);
672 	if (ret)
673 		return ret;
674 
675 	if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
676 	    DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
677 		drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
678 		return -EPERM;
679 	}
680 
681 	/*
682 	 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
683 	 * the HDCP encryption. That implies that repeater can't have its own
684 	 * display. As there is no consumption of encrypted content in the
685 	 * repeater with 0 downstream devices, we are failing the
686 	 * authentication.
687 	 */
688 	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
689 	if (num_downstream == 0) {
690 		drm_dbg_kms(&dev_priv->drm,
691 			    "Repeater with zero downstream devices\n");
692 		return -EINVAL;
693 	}
694 
695 	ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
696 	if (!ksv_fifo) {
697 		drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
698 		return -ENOMEM;
699 	}
700 
701 	ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
702 	if (ret)
703 		goto err;
704 
705 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
706 					num_downstream) > 0) {
707 		drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
708 		ret = -EPERM;
709 		goto err;
710 	}
711 
712 	/*
713 	 * When V prime mismatches, DP Spec mandates re-read of
714 	 * V prime atleast twice.
715 	 */
716 	for (i = 0; i < tries; i++) {
717 		ret = intel_hdcp_validate_v_prime(connector, shim,
718 						  ksv_fifo, num_downstream,
719 						  bstatus);
720 		if (!ret)
721 			break;
722 	}
723 
724 	if (i == tries) {
725 		drm_dbg_kms(&dev_priv->drm,
726 			    "V Prime validation failed.(%d)\n", ret);
727 		goto err;
728 	}
729 
730 	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
731 		    num_downstream);
732 	ret = 0;
733 err:
734 	kfree(ksv_fifo);
735 	return ret;
736 }
737 
738 /* Implements Part 1 of the HDCP authorization procedure */
739 static int intel_hdcp_auth(struct intel_connector *connector)
740 {
741 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
742 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
743 	struct intel_hdcp *hdcp = &connector->hdcp;
744 	const struct intel_hdcp_shim *shim = hdcp->shim;
745 	enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
746 	enum port port = dig_port->base.port;
747 	unsigned long r0_prime_gen_start;
748 	int ret, i, tries = 2;
749 	union {
750 		u32 reg[2];
751 		u8 shim[DRM_HDCP_AN_LEN];
752 	} an;
753 	union {
754 		u32 reg[2];
755 		u8 shim[DRM_HDCP_KSV_LEN];
756 	} bksv;
757 	union {
758 		u32 reg;
759 		u8 shim[DRM_HDCP_RI_LEN];
760 	} ri;
761 	bool repeater_present, hdcp_capable;
762 
763 	/*
764 	 * Detects whether the display is HDCP capable. Although we check for
765 	 * valid Bksv below, the HDCP over DP spec requires that we check
766 	 * whether the display supports HDCP before we write An. For HDMI
767 	 * displays, this is not necessary.
768 	 */
769 	if (shim->hdcp_capable) {
770 		ret = shim->hdcp_capable(dig_port, &hdcp_capable);
771 		if (ret)
772 			return ret;
773 		if (!hdcp_capable) {
774 			drm_dbg_kms(&dev_priv->drm,
775 				    "Panel is not HDCP capable\n");
776 			return -EINVAL;
777 		}
778 	}
779 
780 	/* Initialize An with 2 random values and acquire it */
781 	for (i = 0; i < 2; i++)
782 		intel_de_write(dev_priv,
783 			       HDCP_ANINIT(dev_priv, cpu_transcoder, port),
784 			       get_random_u32());
785 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
786 		       HDCP_CONF_CAPTURE_AN);
787 
788 	/* Wait for An to be acquired */
789 	if (intel_de_wait_for_set(dev_priv,
790 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
791 				  HDCP_STATUS_AN_READY, 1)) {
792 		drm_err(&dev_priv->drm, "Timed out waiting for An\n");
793 		return -ETIMEDOUT;
794 	}
795 
796 	an.reg[0] = intel_de_read(dev_priv,
797 				  HDCP_ANLO(dev_priv, cpu_transcoder, port));
798 	an.reg[1] = intel_de_read(dev_priv,
799 				  HDCP_ANHI(dev_priv, cpu_transcoder, port));
800 	ret = shim->write_an_aksv(dig_port, an.shim);
801 	if (ret)
802 		return ret;
803 
804 	r0_prime_gen_start = jiffies;
805 
806 	memset(&bksv, 0, sizeof(bksv));
807 
808 	ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
809 	if (ret < 0)
810 		return ret;
811 
812 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
813 		drm_err(&dev_priv->drm, "BKSV is revoked\n");
814 		return -EPERM;
815 	}
816 
817 	intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
818 		       bksv.reg[0]);
819 	intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
820 		       bksv.reg[1]);
821 
822 	ret = shim->repeater_present(dig_port, &repeater_present);
823 	if (ret)
824 		return ret;
825 	if (repeater_present)
826 		intel_de_write(dev_priv, HDCP_REP_CTL,
827 			       intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
828 
829 	ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
830 	if (ret)
831 		return ret;
832 
833 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
834 		       HDCP_CONF_AUTH_AND_ENC);
835 
836 	/* Wait for R0 ready */
837 	if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
838 		     (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
839 		drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
840 		return -ETIMEDOUT;
841 	}
842 
843 	/*
844 	 * Wait for R0' to become available. The spec says 100ms from Aksv, but
845 	 * some monitors can take longer than this. We'll set the timeout at
846 	 * 300ms just to be sure.
847 	 *
848 	 * On DP, there's an R0_READY bit available but no such bit
849 	 * exists on HDMI. Since the upper-bound is the same, we'll just do
850 	 * the stupid thing instead of polling on one and not the other.
851 	 */
852 	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
853 
854 	tries = 3;
855 
856 	/*
857 	 * DP HDCP Spec mandates the two more reattempt to read R0, incase
858 	 * of R0 mismatch.
859 	 */
860 	for (i = 0; i < tries; i++) {
861 		ri.reg = 0;
862 		ret = shim->read_ri_prime(dig_port, ri.shim);
863 		if (ret)
864 			return ret;
865 		intel_de_write(dev_priv,
866 			       HDCP_RPRIME(dev_priv, cpu_transcoder, port),
867 			       ri.reg);
868 
869 		/* Wait for Ri prime match */
870 		if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
871 			      (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
872 			break;
873 	}
874 
875 	if (i == tries) {
876 		drm_dbg_kms(&dev_priv->drm,
877 			    "Timed out waiting for Ri prime match (%x)\n",
878 			    intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
879 					  cpu_transcoder, port)));
880 		return -ETIMEDOUT;
881 	}
882 
883 	/* Wait for encryption confirmation */
884 	if (intel_de_wait_for_set(dev_priv,
885 				  HDCP_STATUS(dev_priv, cpu_transcoder, port),
886 				  HDCP_STATUS_ENC,
887 				  HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
888 		drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
889 		return -ETIMEDOUT;
890 	}
891 
892 	/* DP MST Auth Part 1 Step 2.a and Step 2.b */
893 	if (shim->stream_encryption) {
894 		ret = shim->stream_encryption(connector, true);
895 		if (ret) {
896 			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
897 				connector->base.name, connector->base.base.id);
898 			return ret;
899 		}
900 		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
901 			    transcoder_name(hdcp->stream_transcoder));
902 	}
903 
904 	if (repeater_present)
905 		return intel_hdcp_auth_downstream(connector);
906 
907 	drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
908 	return 0;
909 }
910 
911 static int _intel_hdcp_disable(struct intel_connector *connector)
912 {
913 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
914 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
915 	struct intel_hdcp *hdcp = &connector->hdcp;
916 	enum port port = dig_port->base.port;
917 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
918 	u32 repeater_ctl;
919 	int ret;
920 
921 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
922 		    connector->base.name, connector->base.base.id);
923 
924 	if (hdcp->shim->stream_encryption) {
925 		ret = hdcp->shim->stream_encryption(connector, false);
926 		if (ret) {
927 			drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
928 				connector->base.name, connector->base.base.id);
929 			return ret;
930 		}
931 		drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
932 			    transcoder_name(hdcp->stream_transcoder));
933 		/*
934 		 * If there are other connectors on this port using HDCP,
935 		 * don't disable it until it disabled HDCP encryption for
936 		 * all connectors in MST topology.
937 		 */
938 		if (dig_port->num_hdcp_streams > 0)
939 			return 0;
940 	}
941 
942 	hdcp->hdcp_encrypted = false;
943 	intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
944 	if (intel_de_wait_for_clear(dev_priv,
945 				    HDCP_STATUS(dev_priv, cpu_transcoder, port),
946 				    ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
947 		drm_err(&dev_priv->drm,
948 			"Failed to disable HDCP, timeout clearing status\n");
949 		return -ETIMEDOUT;
950 	}
951 
952 	repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
953 						   port);
954 	intel_de_rmw(dev_priv, HDCP_REP_CTL, repeater_ctl, 0);
955 
956 	ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
957 	if (ret) {
958 		drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
959 		return ret;
960 	}
961 
962 	drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
963 	return 0;
964 }
965 
966 static int _intel_hdcp_enable(struct intel_connector *connector)
967 {
968 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
969 	struct intel_hdcp *hdcp = &connector->hdcp;
970 	int i, ret, tries = 3;
971 
972 	drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
973 		    connector->base.name, connector->base.base.id);
974 
975 	if (!hdcp_key_loadable(dev_priv)) {
976 		drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
977 		return -ENXIO;
978 	}
979 
980 	for (i = 0; i < KEY_LOAD_TRIES; i++) {
981 		ret = intel_hdcp_load_keys(dev_priv);
982 		if (!ret)
983 			break;
984 		intel_hdcp_clear_keys(dev_priv);
985 	}
986 	if (ret) {
987 		drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
988 			ret);
989 		return ret;
990 	}
991 
992 	/* Incase of authentication failures, HDCP spec expects reauth. */
993 	for (i = 0; i < tries; i++) {
994 		ret = intel_hdcp_auth(connector);
995 		if (!ret) {
996 			hdcp->hdcp_encrypted = true;
997 			return 0;
998 		}
999 
1000 		drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
1001 
1002 		/* Ensuring HDCP encryption and signalling are stopped. */
1003 		_intel_hdcp_disable(connector);
1004 	}
1005 
1006 	drm_dbg_kms(&dev_priv->drm,
1007 		    "HDCP authentication failed (%d tries/%d)\n", tries, ret);
1008 	return ret;
1009 }
1010 
1011 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
1012 {
1013 	return container_of(hdcp, struct intel_connector, hdcp);
1014 }
1015 
1016 static void intel_hdcp_update_value(struct intel_connector *connector,
1017 				    u64 value, bool update_property)
1018 {
1019 	struct drm_device *dev = connector->base.dev;
1020 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1021 	struct intel_hdcp *hdcp = &connector->hdcp;
1022 
1023 	drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
1024 
1025 	if (hdcp->value == value)
1026 		return;
1027 
1028 	drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
1029 
1030 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1031 		if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
1032 			dig_port->num_hdcp_streams--;
1033 	} else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1034 		dig_port->num_hdcp_streams++;
1035 	}
1036 
1037 	hdcp->value = value;
1038 	if (update_property) {
1039 		drm_connector_get(&connector->base);
1040 		schedule_work(&hdcp->prop_work);
1041 	}
1042 }
1043 
1044 /* Implements Part 3 of the HDCP authorization procedure */
1045 static int intel_hdcp_check_link(struct intel_connector *connector)
1046 {
1047 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1048 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1049 	struct intel_hdcp *hdcp = &connector->hdcp;
1050 	enum port port = dig_port->base.port;
1051 	enum transcoder cpu_transcoder;
1052 	int ret = 0;
1053 
1054 	mutex_lock(&hdcp->mutex);
1055 	mutex_lock(&dig_port->hdcp_mutex);
1056 
1057 	cpu_transcoder = hdcp->cpu_transcoder;
1058 
1059 	/* Check_link valid only when HDCP1.4 is enabled */
1060 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1061 	    !hdcp->hdcp_encrypted) {
1062 		ret = -EINVAL;
1063 		goto out;
1064 	}
1065 
1066 	if (drm_WARN_ON(&dev_priv->drm,
1067 			!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
1068 		drm_err(&dev_priv->drm,
1069 			"%s:%d HDCP link stopped encryption,%x\n",
1070 			connector->base.name, connector->base.base.id,
1071 			intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
1072 		ret = -ENXIO;
1073 		intel_hdcp_update_value(connector,
1074 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1075 					true);
1076 		goto out;
1077 	}
1078 
1079 	if (hdcp->shim->check_link(dig_port, connector)) {
1080 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1081 			intel_hdcp_update_value(connector,
1082 				DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1083 		}
1084 		goto out;
1085 	}
1086 
1087 	drm_dbg_kms(&dev_priv->drm,
1088 		    "[%s:%d] HDCP link failed, retrying authentication\n",
1089 		    connector->base.name, connector->base.base.id);
1090 
1091 	ret = _intel_hdcp_disable(connector);
1092 	if (ret) {
1093 		drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
1094 		intel_hdcp_update_value(connector,
1095 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1096 					true);
1097 		goto out;
1098 	}
1099 
1100 	ret = _intel_hdcp_enable(connector);
1101 	if (ret) {
1102 		drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
1103 		intel_hdcp_update_value(connector,
1104 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
1105 					true);
1106 		goto out;
1107 	}
1108 
1109 out:
1110 	mutex_unlock(&dig_port->hdcp_mutex);
1111 	mutex_unlock(&hdcp->mutex);
1112 	return ret;
1113 }
1114 
1115 static void intel_hdcp_prop_work(struct work_struct *work)
1116 {
1117 	struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1118 					       prop_work);
1119 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1120 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1121 
1122 	drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
1123 	mutex_lock(&hdcp->mutex);
1124 
1125 	/*
1126 	 * This worker is only used to flip between ENABLED/DESIRED. Either of
1127 	 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1128 	 * we're running just after hdcp has been disabled, so just exit
1129 	 */
1130 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1131 		drm_hdcp_update_content_protection(&connector->base,
1132 						   hdcp->value);
1133 
1134 	mutex_unlock(&hdcp->mutex);
1135 	drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
1136 
1137 	drm_connector_put(&connector->base);
1138 }
1139 
1140 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
1141 {
1142 	return RUNTIME_INFO(dev_priv)->has_hdcp &&
1143 		(DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
1144 }
1145 
1146 static int
1147 hdcp2_prepare_ake_init(struct intel_connector *connector,
1148 		       struct hdcp2_ake_init *ake_data)
1149 {
1150 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1151 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1152 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1153 	struct i915_hdcp_master *arbiter;
1154 	int ret;
1155 
1156 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1157 	arbiter = dev_priv->display.hdcp.master;
1158 
1159 	if (!arbiter || !arbiter->ops) {
1160 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1161 		return -EINVAL;
1162 	}
1163 
1164 	ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
1165 	if (ret)
1166 		drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
1167 			    ret);
1168 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1169 
1170 	return ret;
1171 }
1172 
1173 static int
1174 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1175 				struct hdcp2_ake_send_cert *rx_cert,
1176 				bool *paired,
1177 				struct hdcp2_ake_no_stored_km *ek_pub_km,
1178 				size_t *msg_sz)
1179 {
1180 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1181 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1182 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1183 	struct i915_hdcp_master *arbiter;
1184 	int ret;
1185 
1186 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1187 	arbiter = dev_priv->display.hdcp.master;
1188 
1189 	if (!arbiter || !arbiter->ops) {
1190 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1191 		return -EINVAL;
1192 	}
1193 
1194 	ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
1195 							 rx_cert, paired,
1196 							 ek_pub_km, msg_sz);
1197 	if (ret < 0)
1198 		drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1199 			    ret);
1200 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1201 
1202 	return ret;
1203 }
1204 
1205 static int hdcp2_verify_hprime(struct intel_connector *connector,
1206 			       struct hdcp2_ake_send_hprime *rx_hprime)
1207 {
1208 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1209 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1210 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1211 	struct i915_hdcp_master *arbiter;
1212 	int ret;
1213 
1214 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1215 	arbiter = dev_priv->display.hdcp.master;
1216 
1217 	if (!arbiter || !arbiter->ops) {
1218 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1219 		return -EINVAL;
1220 	}
1221 
1222 	ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
1223 	if (ret < 0)
1224 		drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1225 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1226 
1227 	return ret;
1228 }
1229 
1230 static int
1231 hdcp2_store_pairing_info(struct intel_connector *connector,
1232 			 struct hdcp2_ake_send_pairing_info *pairing_info)
1233 {
1234 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1235 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1236 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1237 	struct i915_hdcp_master *arbiter;
1238 	int ret;
1239 
1240 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1241 	arbiter = dev_priv->display.hdcp.master;
1242 
1243 	if (!arbiter || !arbiter->ops) {
1244 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1245 		return -EINVAL;
1246 	}
1247 
1248 	ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
1249 	if (ret < 0)
1250 		drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1251 			    ret);
1252 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1253 
1254 	return ret;
1255 }
1256 
1257 static int
1258 hdcp2_prepare_lc_init(struct intel_connector *connector,
1259 		      struct hdcp2_lc_init *lc_init)
1260 {
1261 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1262 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1263 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1264 	struct i915_hdcp_master *arbiter;
1265 	int ret;
1266 
1267 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1268 	arbiter = dev_priv->display.hdcp.master;
1269 
1270 	if (!arbiter || !arbiter->ops) {
1271 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1272 		return -EINVAL;
1273 	}
1274 
1275 	ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
1276 	if (ret < 0)
1277 		drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1278 			    ret);
1279 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1280 
1281 	return ret;
1282 }
1283 
1284 static int
1285 hdcp2_verify_lprime(struct intel_connector *connector,
1286 		    struct hdcp2_lc_send_lprime *rx_lprime)
1287 {
1288 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1289 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1290 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1291 	struct i915_hdcp_master *arbiter;
1292 	int ret;
1293 
1294 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1295 	arbiter = dev_priv->display.hdcp.master;
1296 
1297 	if (!arbiter || !arbiter->ops) {
1298 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1299 		return -EINVAL;
1300 	}
1301 
1302 	ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
1303 	if (ret < 0)
1304 		drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1305 			    ret);
1306 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1307 
1308 	return ret;
1309 }
1310 
1311 static int hdcp2_prepare_skey(struct intel_connector *connector,
1312 			      struct hdcp2_ske_send_eks *ske_data)
1313 {
1314 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1315 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1316 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1317 	struct i915_hdcp_master *arbiter;
1318 	int ret;
1319 
1320 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1321 	arbiter = dev_priv->display.hdcp.master;
1322 
1323 	if (!arbiter || !arbiter->ops) {
1324 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1325 		return -EINVAL;
1326 	}
1327 
1328 	ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
1329 	if (ret < 0)
1330 		drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1331 			    ret);
1332 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1333 
1334 	return ret;
1335 }
1336 
1337 static int
1338 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1339 				      struct hdcp2_rep_send_receiverid_list
1340 								*rep_topology,
1341 				      struct hdcp2_rep_send_ack *rep_send_ack)
1342 {
1343 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1344 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1345 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1346 	struct i915_hdcp_master *arbiter;
1347 	int ret;
1348 
1349 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1350 	arbiter = dev_priv->display.hdcp.master;
1351 
1352 	if (!arbiter || !arbiter->ops) {
1353 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1354 		return -EINVAL;
1355 	}
1356 
1357 	ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
1358 							    data,
1359 							    rep_topology,
1360 							    rep_send_ack);
1361 	if (ret < 0)
1362 		drm_dbg_kms(&dev_priv->drm,
1363 			    "Verify rep topology failed. %d\n", ret);
1364 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1365 
1366 	return ret;
1367 }
1368 
1369 static int
1370 hdcp2_verify_mprime(struct intel_connector *connector,
1371 		    struct hdcp2_rep_stream_ready *stream_ready)
1372 {
1373 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1374 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1375 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1376 	struct i915_hdcp_master *arbiter;
1377 	int ret;
1378 
1379 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1380 	arbiter = dev_priv->display.hdcp.master;
1381 
1382 	if (!arbiter || !arbiter->ops) {
1383 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1384 		return -EINVAL;
1385 	}
1386 
1387 	ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
1388 	if (ret < 0)
1389 		drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1390 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1391 
1392 	return ret;
1393 }
1394 
1395 static int hdcp2_authenticate_port(struct intel_connector *connector)
1396 {
1397 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1398 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1399 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1400 	struct i915_hdcp_master *arbiter;
1401 	int ret;
1402 
1403 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1404 	arbiter = dev_priv->display.hdcp.master;
1405 
1406 	if (!arbiter || !arbiter->ops) {
1407 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1408 		return -EINVAL;
1409 	}
1410 
1411 	ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
1412 	if (ret < 0)
1413 		drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1414 			    ret);
1415 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1416 
1417 	return ret;
1418 }
1419 
1420 static int hdcp2_close_session(struct intel_connector *connector)
1421 {
1422 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1423 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1424 	struct i915_hdcp_master *arbiter;
1425 	int ret;
1426 
1427 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1428 	arbiter = dev_priv->display.hdcp.master;
1429 
1430 	if (!arbiter || !arbiter->ops) {
1431 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1432 		return -EINVAL;
1433 	}
1434 
1435 	ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
1436 					     &dig_port->hdcp_port_data);
1437 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1438 
1439 	return ret;
1440 }
1441 
1442 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1443 {
1444 	return hdcp2_close_session(connector);
1445 }
1446 
1447 /* Authentication flow starts from here */
1448 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1449 {
1450 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1451 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1452 	struct intel_hdcp *hdcp = &connector->hdcp;
1453 	union {
1454 		struct hdcp2_ake_init ake_init;
1455 		struct hdcp2_ake_send_cert send_cert;
1456 		struct hdcp2_ake_no_stored_km no_stored_km;
1457 		struct hdcp2_ake_send_hprime send_hprime;
1458 		struct hdcp2_ake_send_pairing_info pairing_info;
1459 	} msgs;
1460 	const struct intel_hdcp_shim *shim = hdcp->shim;
1461 	size_t size;
1462 	int ret;
1463 
1464 	/* Init for seq_num */
1465 	hdcp->seq_num_v = 0;
1466 	hdcp->seq_num_m = 0;
1467 
1468 	ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1469 	if (ret < 0)
1470 		return ret;
1471 
1472 	ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1473 				  sizeof(msgs.ake_init));
1474 	if (ret < 0)
1475 		return ret;
1476 
1477 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1478 				 &msgs.send_cert, sizeof(msgs.send_cert));
1479 	if (ret < 0)
1480 		return ret;
1481 
1482 	if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1483 		drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1484 		return -EINVAL;
1485 	}
1486 
1487 	hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1488 
1489 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1490 					msgs.send_cert.cert_rx.receiver_id,
1491 					1) > 0) {
1492 		drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1493 		return -EPERM;
1494 	}
1495 
1496 	/*
1497 	 * Here msgs.no_stored_km will hold msgs corresponding to the km
1498 	 * stored also.
1499 	 */
1500 	ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1501 					      &hdcp->is_paired,
1502 					      &msgs.no_stored_km, &size);
1503 	if (ret < 0)
1504 		return ret;
1505 
1506 	ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1507 	if (ret < 0)
1508 		return ret;
1509 
1510 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1511 				 &msgs.send_hprime, sizeof(msgs.send_hprime));
1512 	if (ret < 0)
1513 		return ret;
1514 
1515 	ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1516 	if (ret < 0)
1517 		return ret;
1518 
1519 	if (!hdcp->is_paired) {
1520 		/* Pairing is required */
1521 		ret = shim->read_2_2_msg(dig_port,
1522 					 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1523 					 &msgs.pairing_info,
1524 					 sizeof(msgs.pairing_info));
1525 		if (ret < 0)
1526 			return ret;
1527 
1528 		ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1529 		if (ret < 0)
1530 			return ret;
1531 		hdcp->is_paired = true;
1532 	}
1533 
1534 	return 0;
1535 }
1536 
1537 static int hdcp2_locality_check(struct intel_connector *connector)
1538 {
1539 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1540 	struct intel_hdcp *hdcp = &connector->hdcp;
1541 	union {
1542 		struct hdcp2_lc_init lc_init;
1543 		struct hdcp2_lc_send_lprime send_lprime;
1544 	} msgs;
1545 	const struct intel_hdcp_shim *shim = hdcp->shim;
1546 	int tries = HDCP2_LC_RETRY_CNT, ret, i;
1547 
1548 	for (i = 0; i < tries; i++) {
1549 		ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1550 		if (ret < 0)
1551 			continue;
1552 
1553 		ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1554 				      sizeof(msgs.lc_init));
1555 		if (ret < 0)
1556 			continue;
1557 
1558 		ret = shim->read_2_2_msg(dig_port,
1559 					 HDCP_2_2_LC_SEND_LPRIME,
1560 					 &msgs.send_lprime,
1561 					 sizeof(msgs.send_lprime));
1562 		if (ret < 0)
1563 			continue;
1564 
1565 		ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1566 		if (!ret)
1567 			break;
1568 	}
1569 
1570 	return ret;
1571 }
1572 
1573 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1574 {
1575 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1576 	struct intel_hdcp *hdcp = &connector->hdcp;
1577 	struct hdcp2_ske_send_eks send_eks;
1578 	int ret;
1579 
1580 	ret = hdcp2_prepare_skey(connector, &send_eks);
1581 	if (ret < 0)
1582 		return ret;
1583 
1584 	ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1585 					sizeof(send_eks));
1586 	if (ret < 0)
1587 		return ret;
1588 
1589 	return 0;
1590 }
1591 
1592 static
1593 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1594 {
1595 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1596 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1597 	struct intel_hdcp *hdcp = &connector->hdcp;
1598 	union {
1599 		struct hdcp2_rep_stream_manage stream_manage;
1600 		struct hdcp2_rep_stream_ready stream_ready;
1601 	} msgs;
1602 	const struct intel_hdcp_shim *shim = hdcp->shim;
1603 	int ret, streams_size_delta, i;
1604 
1605 	if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1606 		return -ERANGE;
1607 
1608 	/* Prepare RepeaterAuth_Stream_Manage msg */
1609 	msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1610 	drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1611 
1612 	msgs.stream_manage.k = cpu_to_be16(data->k);
1613 
1614 	for (i = 0; i < data->k; i++) {
1615 		msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1616 		msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1617 	}
1618 
1619 	streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1620 				sizeof(struct hdcp2_streamid_type);
1621 	/* Send it to Repeater */
1622 	ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1623 				  sizeof(msgs.stream_manage) - streams_size_delta);
1624 	if (ret < 0)
1625 		goto out;
1626 
1627 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1628 				 &msgs.stream_ready, sizeof(msgs.stream_ready));
1629 	if (ret < 0)
1630 		goto out;
1631 
1632 	data->seq_num_m = hdcp->seq_num_m;
1633 
1634 	ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1635 
1636 out:
1637 	hdcp->seq_num_m++;
1638 
1639 	return ret;
1640 }
1641 
1642 static
1643 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1644 {
1645 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1646 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1647 	struct intel_hdcp *hdcp = &connector->hdcp;
1648 	union {
1649 		struct hdcp2_rep_send_receiverid_list recvid_list;
1650 		struct hdcp2_rep_send_ack rep_ack;
1651 	} msgs;
1652 	const struct intel_hdcp_shim *shim = hdcp->shim;
1653 	u32 seq_num_v, device_cnt;
1654 	u8 *rx_info;
1655 	int ret;
1656 
1657 	ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1658 				 &msgs.recvid_list, sizeof(msgs.recvid_list));
1659 	if (ret < 0)
1660 		return ret;
1661 
1662 	rx_info = msgs.recvid_list.rx_info;
1663 
1664 	if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1665 	    HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1666 		drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
1667 		return -EINVAL;
1668 	}
1669 
1670 	/*
1671 	 * MST topology is not Type 1 capable if it contains a downstream
1672 	 * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1673 	 */
1674 	dig_port->hdcp_mst_type1_capable =
1675 		!HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1676 		!HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1677 
1678 	/* Converting and Storing the seq_num_v to local variable as DWORD */
1679 	seq_num_v =
1680 		drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1681 
1682 	if (!hdcp->hdcp2_encrypted && seq_num_v) {
1683 		drm_dbg_kms(&dev_priv->drm,
1684 			    "Non zero Seq_num_v at first RecvId_List msg\n");
1685 		return -EINVAL;
1686 	}
1687 
1688 	if (seq_num_v < hdcp->seq_num_v) {
1689 		/* Roll over of the seq_num_v from repeater. Reauthenticate. */
1690 		drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1691 		return -EINVAL;
1692 	}
1693 
1694 	device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1695 		      HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1696 	if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1697 					msgs.recvid_list.receiver_ids,
1698 					device_cnt) > 0) {
1699 		drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1700 		return -EPERM;
1701 	}
1702 
1703 	ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1704 						    &msgs.recvid_list,
1705 						    &msgs.rep_ack);
1706 	if (ret < 0)
1707 		return ret;
1708 
1709 	hdcp->seq_num_v = seq_num_v;
1710 	ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1711 				  sizeof(msgs.rep_ack));
1712 	if (ret < 0)
1713 		return ret;
1714 
1715 	return 0;
1716 }
1717 
1718 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1719 {
1720 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1721 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1722 	struct intel_hdcp *hdcp = &connector->hdcp;
1723 	const struct intel_hdcp_shim *shim = hdcp->shim;
1724 	int ret;
1725 
1726 	ret = hdcp2_authentication_key_exchange(connector);
1727 	if (ret < 0) {
1728 		drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1729 		return ret;
1730 	}
1731 
1732 	ret = hdcp2_locality_check(connector);
1733 	if (ret < 0) {
1734 		drm_dbg_kms(&i915->drm,
1735 			    "Locality Check failed. Err : %d\n", ret);
1736 		return ret;
1737 	}
1738 
1739 	ret = hdcp2_session_key_exchange(connector);
1740 	if (ret < 0) {
1741 		drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1742 		return ret;
1743 	}
1744 
1745 	if (shim->config_stream_type) {
1746 		ret = shim->config_stream_type(dig_port,
1747 					       hdcp->is_repeater,
1748 					       hdcp->content_type);
1749 		if (ret < 0)
1750 			return ret;
1751 	}
1752 
1753 	if (hdcp->is_repeater) {
1754 		ret = hdcp2_authenticate_repeater_topology(connector);
1755 		if (ret < 0) {
1756 			drm_dbg_kms(&i915->drm,
1757 				    "Repeater Auth Failed. Err: %d\n", ret);
1758 			return ret;
1759 		}
1760 	}
1761 
1762 	return ret;
1763 }
1764 
1765 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1766 {
1767 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1768 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1769 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1770 	struct intel_hdcp *hdcp = &connector->hdcp;
1771 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1772 	enum port port = dig_port->base.port;
1773 	int ret = 0;
1774 
1775 	if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1776 			    LINK_ENCRYPTION_STATUS)) {
1777 		drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
1778 			connector->base.name, connector->base.base.id);
1779 		ret = -EPERM;
1780 		goto link_recover;
1781 	}
1782 
1783 	if (hdcp->shim->stream_2_2_encryption) {
1784 		ret = hdcp->shim->stream_2_2_encryption(connector, true);
1785 		if (ret) {
1786 			drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
1787 				connector->base.name, connector->base.base.id);
1788 			return ret;
1789 		}
1790 		drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1791 			    transcoder_name(hdcp->stream_transcoder));
1792 	}
1793 
1794 	return 0;
1795 
1796 link_recover:
1797 	if (hdcp2_deauthenticate_port(connector) < 0)
1798 		drm_dbg_kms(&dev_priv->drm, "Port deauth failed.\n");
1799 
1800 	dig_port->hdcp_auth_status = false;
1801 	data->k = 0;
1802 
1803 	return ret;
1804 }
1805 
1806 static int hdcp2_enable_encryption(struct intel_connector *connector)
1807 {
1808 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1809 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1810 	struct intel_hdcp *hdcp = &connector->hdcp;
1811 	enum port port = dig_port->base.port;
1812 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1813 	int ret;
1814 
1815 	drm_WARN_ON(&dev_priv->drm,
1816 		    intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1817 		    LINK_ENCRYPTION_STATUS);
1818 	if (hdcp->shim->toggle_signalling) {
1819 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1820 						    true);
1821 		if (ret) {
1822 			drm_err(&dev_priv->drm,
1823 				"Failed to enable HDCP signalling. %d\n",
1824 				ret);
1825 			return ret;
1826 		}
1827 	}
1828 
1829 	if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1830 	    LINK_AUTH_STATUS)
1831 		/* Link is Authenticated. Now set for Encryption */
1832 		intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1833 			     0, CTL_LINK_ENCRYPTION_REQ);
1834 
1835 	ret = intel_de_wait_for_set(dev_priv,
1836 				    HDCP2_STATUS(dev_priv, cpu_transcoder,
1837 						 port),
1838 				    LINK_ENCRYPTION_STATUS,
1839 				    HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1840 	dig_port->hdcp_auth_status = true;
1841 
1842 	return ret;
1843 }
1844 
1845 static int hdcp2_disable_encryption(struct intel_connector *connector)
1846 {
1847 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1848 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1849 	struct intel_hdcp *hdcp = &connector->hdcp;
1850 	enum port port = dig_port->base.port;
1851 	enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1852 	int ret;
1853 
1854 	drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1855 				      LINK_ENCRYPTION_STATUS));
1856 
1857 	intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1858 		     CTL_LINK_ENCRYPTION_REQ, 0);
1859 
1860 	ret = intel_de_wait_for_clear(dev_priv,
1861 				      HDCP2_STATUS(dev_priv, cpu_transcoder,
1862 						   port),
1863 				      LINK_ENCRYPTION_STATUS,
1864 				      HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1865 	if (ret == -ETIMEDOUT)
1866 		drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1867 
1868 	if (hdcp->shim->toggle_signalling) {
1869 		ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1870 						    false);
1871 		if (ret) {
1872 			drm_err(&dev_priv->drm,
1873 				"Failed to disable HDCP signalling. %d\n",
1874 				ret);
1875 			return ret;
1876 		}
1877 	}
1878 
1879 	return ret;
1880 }
1881 
1882 static int
1883 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1884 {
1885 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1886 	int i, tries = 3, ret;
1887 
1888 	if (!connector->hdcp.is_repeater)
1889 		return 0;
1890 
1891 	for (i = 0; i < tries; i++) {
1892 		ret = _hdcp2_propagate_stream_management_info(connector);
1893 		if (!ret)
1894 			break;
1895 
1896 		/* Lets restart the auth incase of seq_num_m roll over */
1897 		if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1898 			drm_dbg_kms(&i915->drm,
1899 				    "seq_num_m roll over.(%d)\n", ret);
1900 			break;
1901 		}
1902 
1903 		drm_dbg_kms(&i915->drm,
1904 			    "HDCP2 stream management %d of %d Failed.(%d)\n",
1905 			    i + 1, tries, ret);
1906 	}
1907 
1908 	return ret;
1909 }
1910 
1911 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1912 {
1913 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1914 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1915 	int ret = 0, i, tries = 3;
1916 
1917 	for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1918 		ret = hdcp2_authenticate_sink(connector);
1919 		if (!ret) {
1920 			ret = intel_hdcp_prepare_streams(connector);
1921 			if (ret) {
1922 				drm_dbg_kms(&i915->drm,
1923 					    "Prepare streams failed.(%d)\n",
1924 					    ret);
1925 				break;
1926 			}
1927 
1928 			ret = hdcp2_propagate_stream_management_info(connector);
1929 			if (ret) {
1930 				drm_dbg_kms(&i915->drm,
1931 					    "Stream management failed.(%d)\n",
1932 					    ret);
1933 				break;
1934 			}
1935 
1936 			ret = hdcp2_authenticate_port(connector);
1937 			if (!ret)
1938 				break;
1939 			drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1940 				    ret);
1941 		}
1942 
1943 		/* Clearing the mei hdcp session */
1944 		drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1945 			    i + 1, tries, ret);
1946 		if (hdcp2_deauthenticate_port(connector) < 0)
1947 			drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1948 	}
1949 
1950 	if (!ret && !dig_port->hdcp_auth_status) {
1951 		/*
1952 		 * Ensuring the required 200mSec min time interval between
1953 		 * Session Key Exchange and encryption.
1954 		 */
1955 		msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1956 		ret = hdcp2_enable_encryption(connector);
1957 		if (ret < 0) {
1958 			drm_dbg_kms(&i915->drm,
1959 				    "Encryption Enable Failed.(%d)\n", ret);
1960 			if (hdcp2_deauthenticate_port(connector) < 0)
1961 				drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1962 		}
1963 	}
1964 
1965 	if (!ret)
1966 		ret = hdcp2_enable_stream_encryption(connector);
1967 
1968 	return ret;
1969 }
1970 
1971 static int _intel_hdcp2_enable(struct intel_connector *connector)
1972 {
1973 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
1974 	struct intel_hdcp *hdcp = &connector->hdcp;
1975 	int ret;
1976 
1977 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1978 		    connector->base.name, connector->base.base.id,
1979 		    hdcp->content_type);
1980 
1981 	ret = hdcp2_authenticate_and_encrypt(connector);
1982 	if (ret) {
1983 		drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1984 			    hdcp->content_type, ret);
1985 		return ret;
1986 	}
1987 
1988 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1989 		    connector->base.name, connector->base.base.id,
1990 		    hdcp->content_type);
1991 
1992 	hdcp->hdcp2_encrypted = true;
1993 	return 0;
1994 }
1995 
1996 static int
1997 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
1998 {
1999 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2000 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2001 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2002 	struct intel_hdcp *hdcp = &connector->hdcp;
2003 	int ret;
2004 
2005 	drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
2006 		    connector->base.name, connector->base.base.id);
2007 
2008 	if (hdcp->shim->stream_2_2_encryption) {
2009 		ret = hdcp->shim->stream_2_2_encryption(connector, false);
2010 		if (ret) {
2011 			drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
2012 				connector->base.name, connector->base.base.id);
2013 			return ret;
2014 		}
2015 		drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
2016 			    transcoder_name(hdcp->stream_transcoder));
2017 
2018 		if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
2019 			return 0;
2020 	}
2021 
2022 	ret = hdcp2_disable_encryption(connector);
2023 
2024 	if (hdcp2_deauthenticate_port(connector) < 0)
2025 		drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
2026 
2027 	connector->hdcp.hdcp2_encrypted = false;
2028 	dig_port->hdcp_auth_status = false;
2029 	data->k = 0;
2030 
2031 	return ret;
2032 }
2033 
2034 /* Implements the Link Integrity Check for HDCP2.2 */
2035 static int intel_hdcp2_check_link(struct intel_connector *connector)
2036 {
2037 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2038 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2039 	struct intel_hdcp *hdcp = &connector->hdcp;
2040 	enum port port = dig_port->base.port;
2041 	enum transcoder cpu_transcoder;
2042 	int ret = 0;
2043 
2044 	mutex_lock(&hdcp->mutex);
2045 	mutex_lock(&dig_port->hdcp_mutex);
2046 	cpu_transcoder = hdcp->cpu_transcoder;
2047 
2048 	/* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2049 	if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2050 	    !hdcp->hdcp2_encrypted) {
2051 		ret = -EINVAL;
2052 		goto out;
2053 	}
2054 
2055 	if (drm_WARN_ON(&dev_priv->drm,
2056 			!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
2057 		drm_err(&dev_priv->drm,
2058 			"HDCP2.2 link stopped the encryption, %x\n",
2059 			intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
2060 		ret = -ENXIO;
2061 		_intel_hdcp2_disable(connector, true);
2062 		intel_hdcp_update_value(connector,
2063 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2064 					true);
2065 		goto out;
2066 	}
2067 
2068 	ret = hdcp->shim->check_2_2_link(dig_port, connector);
2069 	if (ret == HDCP_LINK_PROTECTED) {
2070 		if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2071 			intel_hdcp_update_value(connector,
2072 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2073 					true);
2074 		}
2075 		goto out;
2076 	}
2077 
2078 	if (ret == HDCP_TOPOLOGY_CHANGE) {
2079 		if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2080 			goto out;
2081 
2082 		drm_dbg_kms(&dev_priv->drm,
2083 			    "HDCP2.2 Downstream topology change\n");
2084 		ret = hdcp2_authenticate_repeater_topology(connector);
2085 		if (!ret) {
2086 			intel_hdcp_update_value(connector,
2087 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2088 					true);
2089 			goto out;
2090 		}
2091 		drm_dbg_kms(&dev_priv->drm,
2092 			    "[%s:%d] Repeater topology auth failed.(%d)\n",
2093 			    connector->base.name, connector->base.base.id,
2094 			    ret);
2095 	} else {
2096 		drm_dbg_kms(&dev_priv->drm,
2097 			    "[%s:%d] HDCP2.2 link failed, retrying auth\n",
2098 			    connector->base.name, connector->base.base.id);
2099 	}
2100 
2101 	ret = _intel_hdcp2_disable(connector, true);
2102 	if (ret) {
2103 		drm_err(&dev_priv->drm,
2104 			"[%s:%d] Failed to disable hdcp2.2 (%d)\n",
2105 			connector->base.name, connector->base.base.id, ret);
2106 		intel_hdcp_update_value(connector,
2107 				DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2108 		goto out;
2109 	}
2110 
2111 	ret = _intel_hdcp2_enable(connector);
2112 	if (ret) {
2113 		drm_dbg_kms(&dev_priv->drm,
2114 			    "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
2115 			    connector->base.name, connector->base.base.id,
2116 			    ret);
2117 		intel_hdcp_update_value(connector,
2118 					DRM_MODE_CONTENT_PROTECTION_DESIRED,
2119 					true);
2120 		goto out;
2121 	}
2122 
2123 out:
2124 	mutex_unlock(&dig_port->hdcp_mutex);
2125 	mutex_unlock(&hdcp->mutex);
2126 	return ret;
2127 }
2128 
2129 static void intel_hdcp_check_work(struct work_struct *work)
2130 {
2131 	struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2132 					       struct intel_hdcp,
2133 					       check_work);
2134 	struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2135 
2136 	if (drm_connector_is_unregistered(&connector->base))
2137 		return;
2138 
2139 	if (!intel_hdcp2_check_link(connector))
2140 		schedule_delayed_work(&hdcp->check_work,
2141 				      DRM_HDCP2_CHECK_PERIOD_MS);
2142 	else if (!intel_hdcp_check_link(connector))
2143 		schedule_delayed_work(&hdcp->check_work,
2144 				      DRM_HDCP_CHECK_PERIOD_MS);
2145 }
2146 
2147 static int i915_hdcp_component_bind(struct device *i915_kdev,
2148 				    struct device *mei_kdev, void *data)
2149 {
2150 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2151 
2152 	drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
2153 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2154 	dev_priv->display.hdcp.master = (struct i915_hdcp_master *)data;
2155 	dev_priv->display.hdcp.master->hdcp_dev = mei_kdev;
2156 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2157 
2158 	return 0;
2159 }
2160 
2161 static void i915_hdcp_component_unbind(struct device *i915_kdev,
2162 				       struct device *mei_kdev, void *data)
2163 {
2164 	struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2165 
2166 	drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
2167 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2168 	dev_priv->display.hdcp.master = NULL;
2169 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2170 }
2171 
2172 static const struct component_ops i915_hdcp_ops = {
2173 	.bind   = i915_hdcp_component_bind,
2174 	.unbind = i915_hdcp_component_unbind,
2175 };
2176 
2177 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
2178 {
2179 	switch (port) {
2180 	case PORT_A:
2181 		return HDCP_DDI_A;
2182 	case PORT_B ... PORT_F:
2183 		return (enum hdcp_ddi)port;
2184 	default:
2185 		return HDCP_DDI_INVALID_PORT;
2186 	}
2187 }
2188 
2189 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
2190 {
2191 	switch (cpu_transcoder) {
2192 	case TRANSCODER_A ... TRANSCODER_D:
2193 		return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
2194 	default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2195 		return HDCP_INVALID_TRANSCODER;
2196 	}
2197 }
2198 
2199 static int initialize_hdcp_port_data(struct intel_connector *connector,
2200 				     struct intel_digital_port *dig_port,
2201 				     const struct intel_hdcp_shim *shim)
2202 {
2203 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2204 	struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2205 	struct intel_hdcp *hdcp = &connector->hdcp;
2206 	enum port port = dig_port->base.port;
2207 
2208 	if (DISPLAY_VER(dev_priv) < 12)
2209 		data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
2210 	else
2211 		/*
2212 		 * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
2213 		 * with zero(INVALID PORT index).
2214 		 */
2215 		data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
2216 
2217 	/*
2218 	 * As associated transcoder is set and modified at modeset, here hdcp_transcoder
2219 	 * is initialized to zero (invalid transcoder index). This will be
2220 	 * retained for <Gen12 forever.
2221 	 */
2222 	data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
2223 
2224 	data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2225 	data->protocol = (u8)shim->protocol;
2226 
2227 	if (!data->streams)
2228 		data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
2229 					sizeof(struct hdcp2_streamid_type),
2230 					GFP_KERNEL);
2231 	if (!data->streams) {
2232 		drm_err(&dev_priv->drm, "Out of Memory\n");
2233 		return -ENOMEM;
2234 	}
2235 	/* For SST */
2236 	data->streams[0].stream_id = 0;
2237 	data->streams[0].stream_type = hdcp->content_type;
2238 
2239 	return 0;
2240 }
2241 
2242 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
2243 {
2244 	if (intel_hdcp_gsc_cs_required(dev_priv))
2245 		return true;
2246 
2247 	if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2248 		return false;
2249 
2250 	return (DISPLAY_VER(dev_priv) >= 10 ||
2251 		IS_KABYLAKE(dev_priv) ||
2252 		IS_COFFEELAKE(dev_priv) ||
2253 		IS_COMETLAKE(dev_priv));
2254 }
2255 
2256 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
2257 {
2258 	int ret;
2259 
2260 	if (!is_hdcp2_supported(dev_priv))
2261 		return;
2262 
2263 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2264 	drm_WARN_ON(&dev_priv->drm, dev_priv->display.hdcp.comp_added);
2265 
2266 	dev_priv->display.hdcp.comp_added = true;
2267 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2268 	if (intel_hdcp_gsc_cs_required(dev_priv))
2269 		ret = intel_hdcp_gsc_init(dev_priv);
2270 	else
2271 		ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_ops,
2272 					  I915_COMPONENT_HDCP);
2273 
2274 	if (ret < 0) {
2275 		drm_dbg_kms(&dev_priv->drm, "Failed at fw component add(%d)\n",
2276 			    ret);
2277 		mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2278 		dev_priv->display.hdcp.comp_added = false;
2279 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2280 		return;
2281 	}
2282 }
2283 
2284 static void intel_hdcp2_init(struct intel_connector *connector,
2285 			     struct intel_digital_port *dig_port,
2286 			     const struct intel_hdcp_shim *shim)
2287 {
2288 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
2289 	struct intel_hdcp *hdcp = &connector->hdcp;
2290 	int ret;
2291 
2292 	ret = initialize_hdcp_port_data(connector, dig_port, shim);
2293 	if (ret) {
2294 		drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2295 		return;
2296 	}
2297 
2298 	hdcp->hdcp2_supported = true;
2299 }
2300 
2301 int intel_hdcp_init(struct intel_connector *connector,
2302 		    struct intel_digital_port *dig_port,
2303 		    const struct intel_hdcp_shim *shim)
2304 {
2305 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2306 	struct intel_hdcp *hdcp = &connector->hdcp;
2307 	int ret;
2308 
2309 	if (!shim)
2310 		return -EINVAL;
2311 
2312 	if (is_hdcp2_supported(dev_priv))
2313 		intel_hdcp2_init(connector, dig_port, shim);
2314 
2315 	ret =
2316 	drm_connector_attach_content_protection_property(&connector->base,
2317 							 hdcp->hdcp2_supported);
2318 	if (ret) {
2319 		hdcp->hdcp2_supported = false;
2320 		kfree(dig_port->hdcp_port_data.streams);
2321 		return ret;
2322 	}
2323 
2324 	hdcp->shim = shim;
2325 	mutex_init(&hdcp->mutex);
2326 	INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2327 	INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2328 	init_waitqueue_head(&hdcp->cp_irq_queue);
2329 
2330 	return 0;
2331 }
2332 
2333 int intel_hdcp_enable(struct intel_connector *connector,
2334 		      const struct intel_crtc_state *pipe_config, u8 content_type)
2335 {
2336 	struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2337 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2338 	struct intel_hdcp *hdcp = &connector->hdcp;
2339 	unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2340 	int ret = -EINVAL;
2341 
2342 	if (!hdcp->shim)
2343 		return -ENOENT;
2344 
2345 	if (!connector->encoder) {
2346 		drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
2347 			connector->base.name, connector->base.base.id);
2348 		return -ENODEV;
2349 	}
2350 
2351 	mutex_lock(&hdcp->mutex);
2352 	mutex_lock(&dig_port->hdcp_mutex);
2353 	drm_WARN_ON(&dev_priv->drm,
2354 		    hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2355 	hdcp->content_type = content_type;
2356 
2357 	if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2358 		hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2359 		hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2360 	} else {
2361 		hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2362 		hdcp->stream_transcoder = INVALID_TRANSCODER;
2363 	}
2364 
2365 	if (DISPLAY_VER(dev_priv) >= 12)
2366 		dig_port->hdcp_port_data.hdcp_transcoder =
2367 			intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
2368 
2369 	/*
2370 	 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2371 	 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2372 	 */
2373 	if (intel_hdcp2_capable(connector)) {
2374 		ret = _intel_hdcp2_enable(connector);
2375 		if (!ret)
2376 			check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2377 	}
2378 
2379 	/*
2380 	 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2381 	 * be attempted.
2382 	 */
2383 	if (ret && intel_hdcp_capable(connector) &&
2384 	    hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2385 		ret = _intel_hdcp_enable(connector);
2386 	}
2387 
2388 	if (!ret) {
2389 		schedule_delayed_work(&hdcp->check_work, check_link_interval);
2390 		intel_hdcp_update_value(connector,
2391 					DRM_MODE_CONTENT_PROTECTION_ENABLED,
2392 					true);
2393 	}
2394 
2395 	mutex_unlock(&dig_port->hdcp_mutex);
2396 	mutex_unlock(&hdcp->mutex);
2397 	return ret;
2398 }
2399 
2400 int intel_hdcp_disable(struct intel_connector *connector)
2401 {
2402 	struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2403 	struct intel_hdcp *hdcp = &connector->hdcp;
2404 	int ret = 0;
2405 
2406 	if (!hdcp->shim)
2407 		return -ENOENT;
2408 
2409 	mutex_lock(&hdcp->mutex);
2410 	mutex_lock(&dig_port->hdcp_mutex);
2411 
2412 	if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2413 		goto out;
2414 
2415 	intel_hdcp_update_value(connector,
2416 				DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2417 	if (hdcp->hdcp2_encrypted)
2418 		ret = _intel_hdcp2_disable(connector, false);
2419 	else if (hdcp->hdcp_encrypted)
2420 		ret = _intel_hdcp_disable(connector);
2421 
2422 out:
2423 	mutex_unlock(&dig_port->hdcp_mutex);
2424 	mutex_unlock(&hdcp->mutex);
2425 	cancel_delayed_work_sync(&hdcp->check_work);
2426 	return ret;
2427 }
2428 
2429 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2430 			    struct intel_encoder *encoder,
2431 			    const struct intel_crtc_state *crtc_state,
2432 			    const struct drm_connector_state *conn_state)
2433 {
2434 	struct intel_connector *connector =
2435 				to_intel_connector(conn_state->connector);
2436 	struct intel_hdcp *hdcp = &connector->hdcp;
2437 	bool content_protection_type_changed, desired_and_not_enabled = false;
2438 
2439 	if (!connector->hdcp.shim)
2440 		return;
2441 
2442 	content_protection_type_changed =
2443 		(conn_state->hdcp_content_type != hdcp->content_type &&
2444 		 conn_state->content_protection !=
2445 		 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2446 
2447 	/*
2448 	 * During the HDCP encryption session if Type change is requested,
2449 	 * disable the HDCP and reenable it with new TYPE value.
2450 	 */
2451 	if (conn_state->content_protection ==
2452 	    DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2453 	    content_protection_type_changed)
2454 		intel_hdcp_disable(connector);
2455 
2456 	/*
2457 	 * Mark the hdcp state as DESIRED after the hdcp disable of type
2458 	 * change procedure.
2459 	 */
2460 	if (content_protection_type_changed) {
2461 		mutex_lock(&hdcp->mutex);
2462 		hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2463 		drm_connector_get(&connector->base);
2464 		schedule_work(&hdcp->prop_work);
2465 		mutex_unlock(&hdcp->mutex);
2466 	}
2467 
2468 	if (conn_state->content_protection ==
2469 	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2470 		mutex_lock(&hdcp->mutex);
2471 		/* Avoid enabling hdcp, if it already ENABLED */
2472 		desired_and_not_enabled =
2473 			hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2474 		mutex_unlock(&hdcp->mutex);
2475 		/*
2476 		 * If HDCP already ENABLED and CP property is DESIRED, schedule
2477 		 * prop_work to update correct CP property to user space.
2478 		 */
2479 		if (!desired_and_not_enabled && !content_protection_type_changed) {
2480 			drm_connector_get(&connector->base);
2481 			schedule_work(&hdcp->prop_work);
2482 		}
2483 	}
2484 
2485 	if (desired_and_not_enabled || content_protection_type_changed)
2486 		intel_hdcp_enable(connector,
2487 				  crtc_state,
2488 				  (u8)conn_state->hdcp_content_type);
2489 }
2490 
2491 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2492 {
2493 	mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2494 	if (!dev_priv->display.hdcp.comp_added) {
2495 		mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2496 		return;
2497 	}
2498 
2499 	dev_priv->display.hdcp.comp_added = false;
2500 	mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2501 
2502 	if (intel_hdcp_gsc_cs_required(dev_priv))
2503 		intel_hdcp_gsc_fini(dev_priv);
2504 	else
2505 		component_del(dev_priv->drm.dev, &i915_hdcp_ops);
2506 }
2507 
2508 void intel_hdcp_cleanup(struct intel_connector *connector)
2509 {
2510 	struct intel_hdcp *hdcp = &connector->hdcp;
2511 
2512 	if (!hdcp->shim)
2513 		return;
2514 
2515 	/*
2516 	 * If the connector is registered, it's possible userspace could kick
2517 	 * off another HDCP enable, which would re-spawn the workers.
2518 	 */
2519 	drm_WARN_ON(connector->base.dev,
2520 		connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2521 
2522 	/*
2523 	 * Now that the connector is not registered, check_work won't be run,
2524 	 * but cancel any outstanding instances of it
2525 	 */
2526 	cancel_delayed_work_sync(&hdcp->check_work);
2527 
2528 	/*
2529 	 * We don't cancel prop_work in the same way as check_work since it
2530 	 * requires connection_mutex which could be held while calling this
2531 	 * function. Instead, we rely on the connector references grabbed before
2532 	 * scheduling prop_work to ensure the connector is alive when prop_work
2533 	 * is run. So if we're in the destroy path (which is where this
2534 	 * function should be called), we're "guaranteed" that prop_work is not
2535 	 * active (tl;dr This Should Never Happen).
2536 	 */
2537 	drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2538 
2539 	mutex_lock(&hdcp->mutex);
2540 	hdcp->shim = NULL;
2541 	mutex_unlock(&hdcp->mutex);
2542 }
2543 
2544 void intel_hdcp_atomic_check(struct drm_connector *connector,
2545 			     struct drm_connector_state *old_state,
2546 			     struct drm_connector_state *new_state)
2547 {
2548 	u64 old_cp = old_state->content_protection;
2549 	u64 new_cp = new_state->content_protection;
2550 	struct drm_crtc_state *crtc_state;
2551 
2552 	if (!new_state->crtc) {
2553 		/*
2554 		 * If the connector is being disabled with CP enabled, mark it
2555 		 * desired so it's re-enabled when the connector is brought back
2556 		 */
2557 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2558 			new_state->content_protection =
2559 				DRM_MODE_CONTENT_PROTECTION_DESIRED;
2560 		return;
2561 	}
2562 
2563 	crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2564 						   new_state->crtc);
2565 	/*
2566 	 * Fix the HDCP uapi content protection state in case of modeset.
2567 	 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2568 	 * need to be sent if there is transition from ENABLED->DESIRED.
2569 	 */
2570 	if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2571 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2572 	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2573 		new_state->content_protection =
2574 			DRM_MODE_CONTENT_PROTECTION_DESIRED;
2575 
2576 	/*
2577 	 * Nothing to do if the state didn't change, or HDCP was activated since
2578 	 * the last commit. And also no change in hdcp content type.
2579 	 */
2580 	if (old_cp == new_cp ||
2581 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2582 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2583 		if (old_state->hdcp_content_type ==
2584 				new_state->hdcp_content_type)
2585 			return;
2586 	}
2587 
2588 	crtc_state->mode_changed = true;
2589 }
2590 
2591 /* Handles the CP_IRQ raised from the DP HDCP sink */
2592 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2593 {
2594 	struct intel_hdcp *hdcp = &connector->hdcp;
2595 
2596 	if (!hdcp->shim)
2597 		return;
2598 
2599 	atomic_inc(&connector->hdcp.cp_irq_count);
2600 	wake_up_all(&connector->hdcp.cp_irq_queue);
2601 
2602 	schedule_delayed_work(&hdcp->check_work, 0);
2603 }
2604