1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cadence MHDP8546 DP bridge driver.
4  *
5  * Copyright (C) 2020 Cadence Design Systems, Inc.
6  *
7  * Authors: Quentin Schulz <quentin.schulz@free-electrons.com>
8  *          Swapnil Jakhade <sjakhade@cadence.com>
9  *          Yuti Amonkar <yamonkar@cadence.com>
10  *          Tomi Valkeinen <tomi.valkeinen@ti.com>
11  *          Jyri Sarha <jsarha@ti.com>
12  *
13  * TODO:
14  *     - Implement optimized mailbox communication using mailbox interrupts
15  *     - Add support for power management
16  *     - Add support for features like audio, MST and fast link training
17  *     - Implement request_fw_cancel to handle HW_STATE
18  *     - Fix asynchronous loading of firmware implementation
19  *     - Add DRM helper function for cdns_mhdp_lower_link_rate
20  */
21 
22 #include <linux/clk.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/firmware.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/media-bus-format.h>
30 #include <linux/module.h>
31 #include <linux/of.h>
32 #include <linux/of_device.h>
33 #include <linux/phy/phy.h>
34 #include <linux/phy/phy-dp.h>
35 #include <linux/platform_device.h>
36 #include <linux/slab.h>
37 #include <linux/wait.h>
38 
39 #include <drm/display/drm_dp_helper.h>
40 #include <drm/display/drm_hdcp_helper.h>
41 #include <drm/drm_atomic.h>
42 #include <drm/drm_atomic_helper.h>
43 #include <drm/drm_atomic_state_helper.h>
44 #include <drm/drm_bridge.h>
45 #include <drm/drm_connector.h>
46 #include <drm/drm_edid.h>
47 #include <drm/drm_modeset_helper_vtables.h>
48 #include <drm/drm_print.h>
49 #include <drm/drm_probe_helper.h>
50 
51 #include <asm/unaligned.h>
52 
53 #include "cdns-mhdp8546-core.h"
54 #include "cdns-mhdp8546-hdcp.h"
55 #include "cdns-mhdp8546-j721e.h"
56 
57 static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
58 {
59 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
60 
61 	/* Enable SW event interrupts */
62 	if (mhdp->bridge_attached)
63 		writel(readl(mhdp->regs + CDNS_APB_INT_MASK) &
64 		       ~CDNS_APB_INT_MASK_SW_EVENT_INT,
65 		       mhdp->regs + CDNS_APB_INT_MASK);
66 }
67 
68 static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
69 {
70 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
71 
72 	writel(readl(mhdp->regs + CDNS_APB_INT_MASK) |
73 	       CDNS_APB_INT_MASK_SW_EVENT_INT,
74 	       mhdp->regs + CDNS_APB_INT_MASK);
75 }
76 
77 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
78 {
79 	int ret, empty;
80 
81 	WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
82 
83 	ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
84 				 empty, !empty, MAILBOX_RETRY_US,
85 				 MAILBOX_TIMEOUT_US);
86 	if (ret < 0)
87 		return ret;
88 
89 	return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
90 }
91 
92 static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
93 {
94 	int ret, full;
95 
96 	WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
97 
98 	ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
99 				 full, !full, MAILBOX_RETRY_US,
100 				 MAILBOX_TIMEOUT_US);
101 	if (ret < 0)
102 		return ret;
103 
104 	writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
105 
106 	return 0;
107 }
108 
109 static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp,
110 					 u8 module_id, u8 opcode,
111 					 u16 req_size)
112 {
113 	u32 mbox_size, i;
114 	u8 header[4];
115 	int ret;
116 
117 	/* read the header of the message */
118 	for (i = 0; i < sizeof(header); i++) {
119 		ret = cdns_mhdp_mailbox_read(mhdp);
120 		if (ret < 0)
121 			return ret;
122 
123 		header[i] = ret;
124 	}
125 
126 	mbox_size = get_unaligned_be16(header + 2);
127 
128 	if (opcode != header[0] || module_id != header[1] ||
129 	    req_size != mbox_size) {
130 		/*
131 		 * If the message in mailbox is not what we want, we need to
132 		 * clear the mailbox by reading its contents.
133 		 */
134 		for (i = 0; i < mbox_size; i++)
135 			if (cdns_mhdp_mailbox_read(mhdp) < 0)
136 				break;
137 
138 		return -EINVAL;
139 	}
140 
141 	return 0;
142 }
143 
144 static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp,
145 				       u8 *buff, u16 buff_size)
146 {
147 	u32 i;
148 	int ret;
149 
150 	for (i = 0; i < buff_size; i++) {
151 		ret = cdns_mhdp_mailbox_read(mhdp);
152 		if (ret < 0)
153 			return ret;
154 
155 		buff[i] = ret;
156 	}
157 
158 	return 0;
159 }
160 
161 static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
162 				  u8 opcode, u16 size, u8 *message)
163 {
164 	u8 header[4];
165 	int ret, i;
166 
167 	header[0] = opcode;
168 	header[1] = module_id;
169 	put_unaligned_be16(size, header + 2);
170 
171 	for (i = 0; i < sizeof(header); i++) {
172 		ret = cdns_mhdp_mailbox_write(mhdp, header[i]);
173 		if (ret)
174 			return ret;
175 	}
176 
177 	for (i = 0; i < size; i++) {
178 		ret = cdns_mhdp_mailbox_write(mhdp, message[i]);
179 		if (ret)
180 			return ret;
181 	}
182 
183 	return 0;
184 }
185 
186 static
187 int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
188 {
189 	u8 msg[4], resp[8];
190 	int ret;
191 
192 	put_unaligned_be32(addr, msg);
193 
194 	mutex_lock(&mhdp->mbox_mutex);
195 
196 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
197 				     GENERAL_REGISTER_READ,
198 				     sizeof(msg), msg);
199 	if (ret)
200 		goto out;
201 
202 	ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL,
203 					    GENERAL_REGISTER_READ,
204 					    sizeof(resp));
205 	if (ret)
206 		goto out;
207 
208 	ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp));
209 	if (ret)
210 		goto out;
211 
212 	/* Returned address value should be the same as requested */
213 	if (memcmp(msg, resp, sizeof(msg))) {
214 		ret = -EINVAL;
215 		goto out;
216 	}
217 
218 	*value = get_unaligned_be32(resp + 4);
219 
220 out:
221 	mutex_unlock(&mhdp->mbox_mutex);
222 	if (ret) {
223 		dev_err(mhdp->dev, "Failed to read register\n");
224 		*value = 0;
225 	}
226 
227 	return ret;
228 }
229 
230 static
231 int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
232 {
233 	u8 msg[6];
234 	int ret;
235 
236 	put_unaligned_be16(addr, msg);
237 	put_unaligned_be32(val, msg + 2);
238 
239 	mutex_lock(&mhdp->mbox_mutex);
240 
241 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
242 				     DPTX_WRITE_REGISTER, sizeof(msg), msg);
243 
244 	mutex_unlock(&mhdp->mbox_mutex);
245 
246 	return ret;
247 }
248 
249 static
250 int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
251 			    u8 start_bit, u8 bits_no, u32 val)
252 {
253 	u8 field[8];
254 	int ret;
255 
256 	put_unaligned_be16(addr, field);
257 	field[2] = start_bit;
258 	field[3] = bits_no;
259 	put_unaligned_be32(val, field + 4);
260 
261 	mutex_lock(&mhdp->mbox_mutex);
262 
263 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
264 				     DPTX_WRITE_FIELD, sizeof(field), field);
265 
266 	mutex_unlock(&mhdp->mbox_mutex);
267 
268 	return ret;
269 }
270 
271 static
272 int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
273 			u32 addr, u8 *data, u16 len)
274 {
275 	u8 msg[5], reg[5];
276 	int ret;
277 
278 	put_unaligned_be16(len, msg);
279 	put_unaligned_be24(addr, msg + 2);
280 
281 	mutex_lock(&mhdp->mbox_mutex);
282 
283 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
284 				     DPTX_READ_DPCD, sizeof(msg), msg);
285 	if (ret)
286 		goto out;
287 
288 	ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
289 					    DPTX_READ_DPCD,
290 					    sizeof(reg) + len);
291 	if (ret)
292 		goto out;
293 
294 	ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
295 	if (ret)
296 		goto out;
297 
298 	ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len);
299 
300 out:
301 	mutex_unlock(&mhdp->mbox_mutex);
302 
303 	return ret;
304 }
305 
306 static
307 int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
308 {
309 	u8 msg[6], reg[5];
310 	int ret;
311 
312 	put_unaligned_be16(1, msg);
313 	put_unaligned_be24(addr, msg + 2);
314 	msg[5] = value;
315 
316 	mutex_lock(&mhdp->mbox_mutex);
317 
318 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
319 				     DPTX_WRITE_DPCD, sizeof(msg), msg);
320 	if (ret)
321 		goto out;
322 
323 	ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
324 					    DPTX_WRITE_DPCD, sizeof(reg));
325 	if (ret)
326 		goto out;
327 
328 	ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
329 	if (ret)
330 		goto out;
331 
332 	if (addr != get_unaligned_be24(reg + 2))
333 		ret = -EINVAL;
334 
335 out:
336 	mutex_unlock(&mhdp->mbox_mutex);
337 
338 	if (ret)
339 		dev_err(mhdp->dev, "dpcd write failed: %d\n", ret);
340 	return ret;
341 }
342 
343 static
344 int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
345 {
346 	u8 msg[5];
347 	int ret, i;
348 
349 	msg[0] = GENERAL_MAIN_CONTROL;
350 	msg[1] = MB_MODULE_ID_GENERAL;
351 	msg[2] = 0;
352 	msg[3] = 1;
353 	msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
354 
355 	mutex_lock(&mhdp->mbox_mutex);
356 
357 	for (i = 0; i < sizeof(msg); i++) {
358 		ret = cdns_mhdp_mailbox_write(mhdp, msg[i]);
359 		if (ret)
360 			goto out;
361 	}
362 
363 	/* read the firmware state */
364 	ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg));
365 	if (ret)
366 		goto out;
367 
368 	ret = 0;
369 
370 out:
371 	mutex_unlock(&mhdp->mbox_mutex);
372 
373 	if (ret < 0)
374 		dev_err(mhdp->dev, "set firmware active failed\n");
375 	return ret;
376 }
377 
378 static
379 int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
380 {
381 	u8 status;
382 	int ret;
383 
384 	mutex_lock(&mhdp->mbox_mutex);
385 
386 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
387 				     DPTX_HPD_STATE, 0, NULL);
388 	if (ret)
389 		goto err_get_hpd;
390 
391 	ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
392 					    DPTX_HPD_STATE,
393 					    sizeof(status));
394 	if (ret)
395 		goto err_get_hpd;
396 
397 	ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status));
398 	if (ret)
399 		goto err_get_hpd;
400 
401 	mutex_unlock(&mhdp->mbox_mutex);
402 
403 	dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__,
404 		status ? "" : "un");
405 
406 	return status;
407 
408 err_get_hpd:
409 	mutex_unlock(&mhdp->mbox_mutex);
410 
411 	return ret;
412 }
413 
414 static
415 int cdns_mhdp_get_edid_block(void *data, u8 *edid,
416 			     unsigned int block, size_t length)
417 {
418 	struct cdns_mhdp_device *mhdp = data;
419 	u8 msg[2], reg[2], i;
420 	int ret;
421 
422 	mutex_lock(&mhdp->mbox_mutex);
423 
424 	for (i = 0; i < 4; i++) {
425 		msg[0] = block / 2;
426 		msg[1] = block % 2;
427 
428 		ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
429 					     DPTX_GET_EDID, sizeof(msg), msg);
430 		if (ret)
431 			continue;
432 
433 		ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
434 						    DPTX_GET_EDID,
435 						    sizeof(reg) + length);
436 		if (ret)
437 			continue;
438 
439 		ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
440 		if (ret)
441 			continue;
442 
443 		ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length);
444 		if (ret)
445 			continue;
446 
447 		if (reg[0] == length && reg[1] == block / 2)
448 			break;
449 	}
450 
451 	mutex_unlock(&mhdp->mbox_mutex);
452 
453 	if (ret)
454 		dev_err(mhdp->dev, "get block[%d] edid failed: %d\n",
455 			block, ret);
456 
457 	return ret;
458 }
459 
460 static
461 int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp)
462 {
463 	u8 event = 0;
464 	int ret;
465 
466 	mutex_lock(&mhdp->mbox_mutex);
467 
468 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
469 				     DPTX_READ_EVENT, 0, NULL);
470 	if (ret)
471 		goto out;
472 
473 	ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
474 					    DPTX_READ_EVENT, sizeof(event));
475 	if (ret < 0)
476 		goto out;
477 
478 	ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event));
479 out:
480 	mutex_unlock(&mhdp->mbox_mutex);
481 
482 	if (ret < 0)
483 		return ret;
484 
485 	dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__,
486 		(event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "",
487 		(event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "",
488 		(event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "",
489 		(event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : "");
490 
491 	return event;
492 }
493 
494 static
495 int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes,
496 			unsigned int udelay, const u8 *lanes_data,
497 			u8 link_status[DP_LINK_STATUS_SIZE])
498 {
499 	u8 payload[7];
500 	u8 hdr[5]; /* For DPCD read response header */
501 	u32 addr;
502 	int ret;
503 
504 	if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
505 		dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes);
506 		ret = -EINVAL;
507 		goto out;
508 	}
509 
510 	payload[0] = nlanes;
511 	put_unaligned_be16(udelay, payload + 1);
512 	memcpy(payload + 3, lanes_data, nlanes);
513 
514 	mutex_lock(&mhdp->mbox_mutex);
515 
516 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
517 				     DPTX_ADJUST_LT,
518 				     sizeof(payload), payload);
519 	if (ret)
520 		goto out;
521 
522 	/* Yes, read the DPCD read command response */
523 	ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
524 					    DPTX_READ_DPCD,
525 					    sizeof(hdr) + DP_LINK_STATUS_SIZE);
526 	if (ret)
527 		goto out;
528 
529 	ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr));
530 	if (ret)
531 		goto out;
532 
533 	addr = get_unaligned_be24(hdr + 2);
534 	if (addr != DP_LANE0_1_STATUS)
535 		goto out;
536 
537 	ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status,
538 					  DP_LINK_STATUS_SIZE);
539 
540 out:
541 	mutex_unlock(&mhdp->mbox_mutex);
542 
543 	if (ret)
544 		dev_err(mhdp->dev, "Failed to adjust Link Training.\n");
545 
546 	return ret;
547 }
548 
549 /**
550  * cdns_mhdp_link_power_up() - power up a DisplayPort link
551  * @aux: DisplayPort AUX channel
552  * @link: pointer to a structure containing the link configuration
553  *
554  * Returns 0 on success or a negative error code on failure.
555  */
556 static
557 int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
558 {
559 	u8 value;
560 	int err;
561 
562 	/* DP_SET_POWER register is only available on DPCD v1.1 and later */
563 	if (link->revision < 0x11)
564 		return 0;
565 
566 	err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
567 	if (err < 0)
568 		return err;
569 
570 	value &= ~DP_SET_POWER_MASK;
571 	value |= DP_SET_POWER_D0;
572 
573 	err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
574 	if (err < 0)
575 		return err;
576 
577 	/*
578 	 * According to the DP 1.1 specification, a "Sink Device must exit the
579 	 * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
580 	 * Control Field" (register 0x600).
581 	 */
582 	usleep_range(1000, 2000);
583 
584 	return 0;
585 }
586 
587 /**
588  * cdns_mhdp_link_power_down() - power down a DisplayPort link
589  * @aux: DisplayPort AUX channel
590  * @link: pointer to a structure containing the link configuration
591  *
592  * Returns 0 on success or a negative error code on failure.
593  */
594 static
595 int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
596 			      struct cdns_mhdp_link *link)
597 {
598 	u8 value;
599 	int err;
600 
601 	/* DP_SET_POWER register is only available on DPCD v1.1 and later */
602 	if (link->revision < 0x11)
603 		return 0;
604 
605 	err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
606 	if (err < 0)
607 		return err;
608 
609 	value &= ~DP_SET_POWER_MASK;
610 	value |= DP_SET_POWER_D3;
611 
612 	err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
613 	if (err < 0)
614 		return err;
615 
616 	return 0;
617 }
618 
619 /**
620  * cdns_mhdp_link_configure() - configure a DisplayPort link
621  * @aux: DisplayPort AUX channel
622  * @link: pointer to a structure containing the link configuration
623  *
624  * Returns 0 on success or a negative error code on failure.
625  */
626 static
627 int cdns_mhdp_link_configure(struct drm_dp_aux *aux,
628 			     struct cdns_mhdp_link *link)
629 {
630 	u8 values[2];
631 	int err;
632 
633 	values[0] = drm_dp_link_rate_to_bw_code(link->rate);
634 	values[1] = link->num_lanes;
635 
636 	if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
637 		values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
638 
639 	err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
640 	if (err < 0)
641 		return err;
642 
643 	return 0;
644 }
645 
646 static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp)
647 {
648 	return min(mhdp->host.link_rate, mhdp->sink.link_rate);
649 }
650 
651 static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp)
652 {
653 	return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt);
654 }
655 
656 static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp)
657 {
658 	return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp);
659 }
660 
661 static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp)
662 {
663 	/* Check if SSC is supported by both sides */
664 	return mhdp->host.ssc && mhdp->sink.ssc;
665 }
666 
667 static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp)
668 {
669 	dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged);
670 
671 	if (mhdp->plugged)
672 		return connector_status_connected;
673 	else
674 		return connector_status_disconnected;
675 }
676 
677 static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp)
678 {
679 	u32 major_num, minor_num, revision;
680 	u32 fw_ver, lib_ver;
681 
682 	fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8)
683 	       | readl(mhdp->regs + CDNS_VER_L);
684 
685 	lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8)
686 		| readl(mhdp->regs + CDNS_LIB_L_ADDR);
687 
688 	if (lib_ver < 33984) {
689 		/*
690 		 * Older FW versions with major number 1, used to store FW
691 		 * version information by storing repository revision number
692 		 * in registers. This is for identifying these FW versions.
693 		 */
694 		major_num = 1;
695 		minor_num = 2;
696 		if (fw_ver == 26098) {
697 			revision = 15;
698 		} else if (lib_ver == 0 && fw_ver == 0) {
699 			revision = 17;
700 		} else {
701 			dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
702 				fw_ver, lib_ver);
703 			return -ENODEV;
704 		}
705 	} else {
706 		/* To identify newer FW versions with major number 2 onwards. */
707 		major_num = fw_ver / 10000;
708 		minor_num = (fw_ver / 100) % 100;
709 		revision = (fw_ver % 10000) % 100;
710 	}
711 
712 	dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num,
713 		revision);
714 	return 0;
715 }
716 
717 static int cdns_mhdp_fw_activate(const struct firmware *fw,
718 				 struct cdns_mhdp_device *mhdp)
719 {
720 	unsigned int reg;
721 	int ret;
722 
723 	/* Release uCPU reset and stall it. */
724 	writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
725 
726 	memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
727 
728 	/* Leave debug mode, release stall */
729 	writel(0, mhdp->regs + CDNS_APB_CTRL);
730 
731 	/*
732 	 * Wait for the KEEP_ALIVE "message" on the first 8 bits.
733 	 * Updated each sched "tick" (~2ms)
734 	 */
735 	ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
736 				 reg & CDNS_KEEP_ALIVE_MASK, 500,
737 				 CDNS_KEEP_ALIVE_TIMEOUT);
738 	if (ret) {
739 		dev_err(mhdp->dev,
740 			"device didn't give any life sign: reg %d\n", reg);
741 		return ret;
742 	}
743 
744 	ret = cdns_mhdp_check_fw_version(mhdp);
745 	if (ret)
746 		return ret;
747 
748 	/* Init events to 0 as it's not cleared by FW at boot but on read */
749 	readl(mhdp->regs + CDNS_SW_EVENT0);
750 	readl(mhdp->regs + CDNS_SW_EVENT1);
751 	readl(mhdp->regs + CDNS_SW_EVENT2);
752 	readl(mhdp->regs + CDNS_SW_EVENT3);
753 
754 	/* Activate uCPU */
755 	ret = cdns_mhdp_set_firmware_active(mhdp, true);
756 	if (ret)
757 		return ret;
758 
759 	spin_lock(&mhdp->start_lock);
760 
761 	mhdp->hw_state = MHDP_HW_READY;
762 
763 	/*
764 	 * Here we must keep the lock while enabling the interrupts
765 	 * since it would otherwise be possible that interrupt enable
766 	 * code is executed after the bridge is detached. The similar
767 	 * situation is not possible in attach()/detach() callbacks
768 	 * since the hw_state changes from MHDP_HW_READY to
769 	 * MHDP_HW_STOPPED happens only due to driver removal when
770 	 * bridge should already be detached.
771 	 */
772 	cdns_mhdp_bridge_hpd_enable(&mhdp->bridge);
773 
774 	spin_unlock(&mhdp->start_lock);
775 
776 	wake_up(&mhdp->fw_load_wq);
777 	dev_dbg(mhdp->dev, "DP FW activated\n");
778 
779 	return 0;
780 }
781 
782 static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context)
783 {
784 	struct cdns_mhdp_device *mhdp = context;
785 	bool bridge_attached;
786 	int ret;
787 
788 	dev_dbg(mhdp->dev, "firmware callback\n");
789 
790 	if (!fw || !fw->data) {
791 		dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
792 		return;
793 	}
794 
795 	ret = cdns_mhdp_fw_activate(fw, mhdp);
796 
797 	release_firmware(fw);
798 
799 	if (ret)
800 		return;
801 
802 	/*
803 	 *  XXX how to make sure the bridge is still attached when
804 	 *      calling drm_kms_helper_hotplug_event() after releasing
805 	 *      the lock? We should not hold the spin lock when
806 	 *      calling drm_kms_helper_hotplug_event() since it may
807 	 *      cause a dead lock. FB-dev console calls detect from the
808 	 *      same thread just down the call stack started here.
809 	 */
810 	spin_lock(&mhdp->start_lock);
811 	bridge_attached = mhdp->bridge_attached;
812 	spin_unlock(&mhdp->start_lock);
813 	if (bridge_attached) {
814 		if (mhdp->connector.dev)
815 			drm_kms_helper_hotplug_event(mhdp->bridge.dev);
816 		else
817 			drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
818 	}
819 }
820 
821 static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp)
822 {
823 	int ret;
824 
825 	ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
826 				      GFP_KERNEL, mhdp, cdns_mhdp_fw_cb);
827 	if (ret) {
828 		dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
829 			FW_NAME, ret);
830 		return ret;
831 	}
832 
833 	return 0;
834 }
835 
836 static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
837 				  struct drm_dp_aux_msg *msg)
838 {
839 	struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
840 	int ret;
841 
842 	if (msg->request != DP_AUX_NATIVE_WRITE &&
843 	    msg->request != DP_AUX_NATIVE_READ)
844 		return -EOPNOTSUPP;
845 
846 	if (msg->request == DP_AUX_NATIVE_WRITE) {
847 		const u8 *buf = msg->buffer;
848 		unsigned int i;
849 
850 		for (i = 0; i < msg->size; ++i) {
851 			ret = cdns_mhdp_dpcd_write(mhdp,
852 						   msg->address + i, buf[i]);
853 			if (!ret)
854 				continue;
855 
856 			dev_err(mhdp->dev,
857 				"Failed to write DPCD addr %u\n",
858 				msg->address + i);
859 
860 			return ret;
861 		}
862 	} else {
863 		ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
864 					  msg->buffer, msg->size);
865 		if (ret) {
866 			dev_err(mhdp->dev,
867 				"Failed to read DPCD addr %u\n",
868 				msg->address);
869 
870 			return ret;
871 		}
872 	}
873 
874 	return msg->size;
875 }
876 
877 static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
878 {
879 	union phy_configure_opts phy_cfg;
880 	u32 reg32;
881 	int ret;
882 
883 	drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
884 			   DP_TRAINING_PATTERN_DISABLE);
885 
886 	/* Reset PHY configuration */
887 	reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
888 	if (!mhdp->host.scrambler)
889 		reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
890 
891 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
892 
893 	cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
894 			    mhdp->sink.enhanced & mhdp->host.enhanced);
895 
896 	cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
897 			    CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
898 
899 	cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link);
900 	phy_cfg.dp.link_rate = mhdp->link.rate / 100;
901 	phy_cfg.dp.lanes = mhdp->link.num_lanes;
902 
903 	memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage));
904 	memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre));
905 
906 	phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
907 	phy_cfg.dp.set_lanes = true;
908 	phy_cfg.dp.set_rate = true;
909 	phy_cfg.dp.set_voltages = true;
910 	ret = phy_configure(mhdp->phy,  &phy_cfg);
911 	if (ret) {
912 		dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
913 			__func__, ret);
914 		return ret;
915 	}
916 
917 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
918 			    CDNS_PHY_COMMON_CONFIG |
919 			    CDNS_PHY_TRAINING_EN |
920 			    CDNS_PHY_TRAINING_TYPE(1) |
921 			    CDNS_PHY_SCRAMBLER_BYPASS);
922 
923 	drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
924 			   DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
925 
926 	return 0;
927 }
928 
929 static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
930 				       u8 link_status[DP_LINK_STATUS_SIZE],
931 				       u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
932 				       union phy_configure_opts *phy_cfg)
933 {
934 	u8 adjust, max_pre_emph, max_volt_swing;
935 	u8 set_volt, set_pre;
936 	unsigned int i;
937 
938 	max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
939 			   << DP_TRAIN_PRE_EMPHASIS_SHIFT;
940 	max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
941 
942 	for (i = 0; i < mhdp->link.num_lanes; i++) {
943 		/* Check if Voltage swing and pre-emphasis are within limits */
944 		adjust = drm_dp_get_adjust_request_voltage(link_status, i);
945 		set_volt = min(adjust, max_volt_swing);
946 
947 		adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
948 		set_pre = min(adjust, max_pre_emph)
949 			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
950 
951 		/*
952 		 * Voltage swing level and pre-emphasis level combination is
953 		 * not allowed: leaving pre-emphasis as-is, and adjusting
954 		 * voltage swing.
955 		 */
956 		if (set_volt + set_pre > 3)
957 			set_volt = 3 - set_pre;
958 
959 		phy_cfg->dp.voltage[i] = set_volt;
960 		lanes_data[i] = set_volt;
961 
962 		if (set_volt == max_volt_swing)
963 			lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
964 
965 		phy_cfg->dp.pre[i] = set_pre;
966 		lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
967 
968 		if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
969 			lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
970 	}
971 }
972 
973 static
974 void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
975 					  unsigned int lane, u8 volt)
976 {
977 	unsigned int s = ((lane & 1) ?
978 			  DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
979 			  DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
980 	unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
981 
982 	link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
983 	link_status[idx] |= volt << s;
984 }
985 
986 static
987 void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
988 					       unsigned int lane, u8 pre_emphasis)
989 {
990 	unsigned int s = ((lane & 1) ?
991 			  DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
992 			  DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
993 	unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
994 
995 	link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
996 	link_status[idx] |= pre_emphasis << s;
997 }
998 
999 static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
1000 					  u8 link_status[DP_LINK_STATUS_SIZE])
1001 {
1002 	u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1003 	u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1004 	unsigned int i;
1005 	u8 volt, pre;
1006 
1007 	for (i = 0; i < mhdp->link.num_lanes; i++) {
1008 		volt = drm_dp_get_adjust_request_voltage(link_status, i);
1009 		pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
1010 		if (volt + pre > 3)
1011 			cdns_mhdp_set_adjust_request_voltage(link_status, i,
1012 							     3 - pre);
1013 		if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
1014 			cdns_mhdp_set_adjust_request_voltage(link_status, i,
1015 							     max_volt);
1016 		if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
1017 			cdns_mhdp_set_adjust_request_pre_emphasis(link_status,
1018 								  i, max_pre);
1019 	}
1020 }
1021 
1022 static void cdns_mhdp_print_lt_status(const char *prefix,
1023 				      struct cdns_mhdp_device *mhdp,
1024 				      union phy_configure_opts *phy_cfg)
1025 {
1026 	char vs[8] = "0/0/0/0";
1027 	char pe[8] = "0/0/0/0";
1028 	unsigned int i;
1029 
1030 	for (i = 0; i < mhdp->link.num_lanes; i++) {
1031 		vs[i * 2] = '0' + phy_cfg->dp.voltage[i];
1032 		pe[i * 2] = '0' + phy_cfg->dp.pre[i];
1033 	}
1034 
1035 	vs[i * 2 - 1] = '\0';
1036 	pe[i * 2 - 1] = '\0';
1037 
1038 	dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
1039 		prefix,
1040 		mhdp->link.num_lanes, mhdp->link.rate / 100,
1041 		vs, pe);
1042 }
1043 
1044 static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
1045 					       u8 eq_tps,
1046 					       unsigned int training_interval)
1047 {
1048 	u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
1049 	u8 link_status[DP_LINK_STATUS_SIZE];
1050 	union phy_configure_opts phy_cfg;
1051 	u32 reg32;
1052 	int ret;
1053 	bool r;
1054 
1055 	dev_dbg(mhdp->dev, "Starting EQ phase\n");
1056 
1057 	/* Enable link training TPS[eq_tps] in PHY */
1058 	reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
1059 		CDNS_PHY_TRAINING_TYPE(eq_tps);
1060 	if (eq_tps != 4)
1061 		reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1062 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1063 
1064 	drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1065 			   (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
1066 			   CDNS_DP_TRAINING_PATTERN_4);
1067 
1068 	drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1069 
1070 	do {
1071 		cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1072 					   &phy_cfg);
1073 		phy_cfg.dp.lanes = mhdp->link.num_lanes;
1074 		phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1075 		phy_cfg.dp.set_lanes = false;
1076 		phy_cfg.dp.set_rate = false;
1077 		phy_cfg.dp.set_voltages = true;
1078 		ret = phy_configure(mhdp->phy,  &phy_cfg);
1079 		if (ret) {
1080 			dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1081 				__func__, ret);
1082 			goto err;
1083 		}
1084 
1085 		cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
1086 				    training_interval, lanes_data, link_status);
1087 
1088 		r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes);
1089 		if (!r)
1090 			goto err;
1091 
1092 		if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
1093 			cdns_mhdp_print_lt_status("EQ phase ok", mhdp,
1094 						  &phy_cfg);
1095 			return true;
1096 		}
1097 
1098 		fail_counter_short++;
1099 
1100 		cdns_mhdp_adjust_requested_eq(mhdp, link_status);
1101 	} while (fail_counter_short < 5);
1102 
1103 err:
1104 	cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg);
1105 
1106 	return false;
1107 }
1108 
1109 static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
1110 					  u8 link_status[DP_LINK_STATUS_SIZE],
1111 					  u8 *req_volt, u8 *req_pre)
1112 {
1113 	const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1114 	const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1115 	unsigned int i;
1116 
1117 	for (i = 0; i < mhdp->link.num_lanes; i++) {
1118 		u8 val;
1119 
1120 		val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
1121 		      max_volt : req_volt[i];
1122 		cdns_mhdp_set_adjust_request_voltage(link_status, i, val);
1123 
1124 		val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
1125 		      max_pre : req_pre[i];
1126 		cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
1127 	}
1128 }
1129 
1130 static
1131 void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
1132 			   bool *same_before_adjust, bool *max_swing_reached,
1133 			   u8 before_cr[CDNS_DP_MAX_NUM_LANES],
1134 			   u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
1135 			   u8 *req_pre)
1136 {
1137 	const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1138 	const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1139 	bool same_pre, same_volt;
1140 	unsigned int i;
1141 	u8 adjust;
1142 
1143 	*same_before_adjust = false;
1144 	*max_swing_reached = false;
1145 	*cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
1146 
1147 	for (i = 0; i < mhdp->link.num_lanes; i++) {
1148 		adjust = drm_dp_get_adjust_request_voltage(after_cr, i);
1149 		req_volt[i] = min(adjust, max_volt);
1150 
1151 		adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
1152 		      DP_TRAIN_PRE_EMPHASIS_SHIFT;
1153 		req_pre[i] = min(adjust, max_pre);
1154 
1155 		same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
1156 			   req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1157 		same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
1158 			    req_volt[i];
1159 		if (same_pre && same_volt)
1160 			*same_before_adjust = true;
1161 
1162 		/* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
1163 		if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
1164 			*max_swing_reached = true;
1165 			return;
1166 		}
1167 	}
1168 }
1169 
1170 static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
1171 {
1172 	u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
1173 	fail_counter_short = 0, fail_counter_cr_long = 0;
1174 	u8 link_status[DP_LINK_STATUS_SIZE];
1175 	bool cr_done;
1176 	union phy_configure_opts phy_cfg;
1177 	int ret;
1178 
1179 	dev_dbg(mhdp->dev, "Starting CR phase\n");
1180 
1181 	ret = cdns_mhdp_link_training_init(mhdp);
1182 	if (ret)
1183 		goto err;
1184 
1185 	drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1186 
1187 	do {
1188 		u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
1189 		u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
1190 		bool same_before_adjust, max_swing_reached;
1191 
1192 		cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1193 					   &phy_cfg);
1194 		phy_cfg.dp.lanes = mhdp->link.num_lanes;
1195 		phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1196 		phy_cfg.dp.set_lanes = false;
1197 		phy_cfg.dp.set_rate = false;
1198 		phy_cfg.dp.set_voltages = true;
1199 		ret = phy_configure(mhdp->phy,  &phy_cfg);
1200 		if (ret) {
1201 			dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1202 				__func__, ret);
1203 			goto err;
1204 		}
1205 
1206 		cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
1207 				    lanes_data, link_status);
1208 
1209 		cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
1210 				      &max_swing_reached, lanes_data,
1211 				      link_status,
1212 				      requested_adjust_volt_swing,
1213 				      requested_adjust_pre_emphasis);
1214 
1215 		if (max_swing_reached) {
1216 			dev_err(mhdp->dev, "CR: max swing reached\n");
1217 			goto err;
1218 		}
1219 
1220 		if (cr_done) {
1221 			cdns_mhdp_print_lt_status("CR phase ok", mhdp,
1222 						  &phy_cfg);
1223 			return true;
1224 		}
1225 
1226 		/* Not all CR_DONE bits set */
1227 		fail_counter_cr_long++;
1228 
1229 		if (same_before_adjust) {
1230 			fail_counter_short++;
1231 			continue;
1232 		}
1233 
1234 		fail_counter_short = 0;
1235 		/*
1236 		 * Voltage swing/pre-emphasis adjust requested
1237 		 * during CR phase
1238 		 */
1239 		cdns_mhdp_adjust_requested_cr(mhdp, link_status,
1240 					      requested_adjust_volt_swing,
1241 					      requested_adjust_pre_emphasis);
1242 	} while (fail_counter_short < 5 && fail_counter_cr_long < 10);
1243 
1244 err:
1245 	cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg);
1246 
1247 	return false;
1248 }
1249 
1250 static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link)
1251 {
1252 	switch (drm_dp_link_rate_to_bw_code(link->rate)) {
1253 	case DP_LINK_BW_2_7:
1254 		link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
1255 		break;
1256 	case DP_LINK_BW_5_4:
1257 		link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
1258 		break;
1259 	case DP_LINK_BW_8_1:
1260 		link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
1261 		break;
1262 	}
1263 }
1264 
1265 static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp,
1266 				   unsigned int training_interval)
1267 {
1268 	u32 reg32;
1269 	const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp);
1270 	int ret;
1271 
1272 	while (1) {
1273 		if (!cdns_mhdp_link_training_cr(mhdp)) {
1274 			if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1275 			    DP_LINK_BW_1_62) {
1276 				dev_dbg(mhdp->dev,
1277 					"Reducing link rate during CR phase\n");
1278 				cdns_mhdp_lower_link_rate(&mhdp->link);
1279 
1280 				continue;
1281 			} else if (mhdp->link.num_lanes > 1) {
1282 				dev_dbg(mhdp->dev,
1283 					"Reducing lanes number during CR phase\n");
1284 				mhdp->link.num_lanes >>= 1;
1285 				mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1286 
1287 				continue;
1288 			}
1289 
1290 			dev_err(mhdp->dev,
1291 				"Link training failed during CR phase\n");
1292 			goto err;
1293 		}
1294 
1295 		if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps,
1296 						       training_interval))
1297 			break;
1298 
1299 		if (mhdp->link.num_lanes > 1) {
1300 			dev_dbg(mhdp->dev,
1301 				"Reducing lanes number during EQ phase\n");
1302 			mhdp->link.num_lanes >>= 1;
1303 
1304 			continue;
1305 		} else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1306 			   DP_LINK_BW_1_62) {
1307 			dev_dbg(mhdp->dev,
1308 				"Reducing link rate during EQ phase\n");
1309 			cdns_mhdp_lower_link_rate(&mhdp->link);
1310 			mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1311 
1312 			continue;
1313 		}
1314 
1315 		dev_err(mhdp->dev, "Link training failed during EQ phase\n");
1316 		goto err;
1317 	}
1318 
1319 	dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n",
1320 		mhdp->link.num_lanes, mhdp->link.rate / 100);
1321 
1322 	drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1323 			   mhdp->host.scrambler ? 0 :
1324 			   DP_LINK_SCRAMBLING_DISABLE);
1325 
1326 	ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &reg32);
1327 	if (ret < 0) {
1328 		dev_err(mhdp->dev,
1329 			"Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1330 			ret);
1331 		return ret;
1332 	}
1333 	reg32 &= ~GENMASK(1, 0);
1334 	reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
1335 	reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC;
1336 	reg32 |= CDNS_DP_FRAMER_EN;
1337 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
1338 
1339 	/* Reset PHY config */
1340 	reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1341 	if (!mhdp->host.scrambler)
1342 		reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1343 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1344 
1345 	return 0;
1346 err:
1347 	/* Reset PHY config */
1348 	reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1349 	if (!mhdp->host.scrambler)
1350 		reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1351 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1352 
1353 	drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1354 			   DP_TRAINING_PATTERN_DISABLE);
1355 
1356 	return -EIO;
1357 }
1358 
1359 static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp,
1360 					      u32 interval)
1361 {
1362 	if (interval == 0)
1363 		return 400;
1364 	if (interval < 5)
1365 		return 4000 << (interval - 1);
1366 	dev_err(mhdp->dev,
1367 		"wrong training interval returned by DPCD: %d\n", interval);
1368 	return 0;
1369 }
1370 
1371 static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp)
1372 {
1373 	unsigned int link_rate;
1374 
1375 	/* Get source capabilities based on PHY attributes */
1376 
1377 	mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width;
1378 	if (!mhdp->host.lanes_cnt)
1379 		mhdp->host.lanes_cnt = 4;
1380 
1381 	link_rate = mhdp->phy->attrs.max_link_rate;
1382 	if (!link_rate)
1383 		link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
1384 	else
1385 		/* PHY uses Mb/s, DRM uses tens of kb/s. */
1386 		link_rate *= 100;
1387 
1388 	mhdp->host.link_rate = link_rate;
1389 	mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
1390 	mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
1391 	mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
1392 				  CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
1393 				  CDNS_SUPPORT_TPS(4);
1394 	mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
1395 	mhdp->host.fast_link = false;
1396 	mhdp->host.enhanced = true;
1397 	mhdp->host.scrambler = true;
1398 	mhdp->host.ssc = false;
1399 }
1400 
1401 static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp,
1402 				     u8 dpcd[DP_RECEIVER_CAP_SIZE])
1403 {
1404 	mhdp->sink.link_rate = mhdp->link.rate;
1405 	mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
1406 	mhdp->sink.enhanced = !!(mhdp->link.capabilities &
1407 				 DP_LINK_CAP_ENHANCED_FRAMING);
1408 
1409 	/* Set SSC support */
1410 	mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] &
1411 				  DP_MAX_DOWNSPREAD_0_5);
1412 
1413 	/* Set TPS support */
1414 	mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
1415 	if (drm_dp_tps3_supported(dpcd))
1416 		mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
1417 	if (drm_dp_tps4_supported(dpcd))
1418 		mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
1419 
1420 	/* Set fast link support */
1421 	mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] &
1422 				  DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
1423 }
1424 
1425 static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
1426 {
1427 	u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2];
1428 	u32 resp, interval, interval_us;
1429 	u8 ext_cap_chk = 0;
1430 	unsigned int addr;
1431 	int err;
1432 
1433 	WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1434 
1435 	drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL,
1436 			  &ext_cap_chk);
1437 
1438 	if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)
1439 		addr = DP_DP13_DPCD_REV;
1440 	else
1441 		addr = DP_DPCD_REV;
1442 
1443 	err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE);
1444 	if (err < 0) {
1445 		dev_err(mhdp->dev, "Failed to read receiver capabilities\n");
1446 		return err;
1447 	}
1448 
1449 	mhdp->link.revision = dpcd[0];
1450 	mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]);
1451 	mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
1452 
1453 	if (dpcd[2] & DP_ENHANCED_FRAME_CAP)
1454 		mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
1455 
1456 	dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
1457 	cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
1458 
1459 	cdns_mhdp_fill_sink_caps(mhdp, dpcd);
1460 
1461 	mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1462 	mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1463 
1464 	/* Disable framer for link training */
1465 	err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
1466 	if (err < 0) {
1467 		dev_err(mhdp->dev,
1468 			"Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1469 			err);
1470 		return err;
1471 	}
1472 
1473 	resp &= ~CDNS_DP_FRAMER_EN;
1474 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
1475 
1476 	/* Spread AMP if required, enable 8b/10b coding */
1477 	amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0;
1478 	amp[1] = DP_SET_ANSI_8B10B;
1479 	drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
1480 
1481 	if (mhdp->host.fast_link & mhdp->sink.fast_link) {
1482 		dev_err(mhdp->dev, "fastlink not supported\n");
1483 		return -EOPNOTSUPP;
1484 	}
1485 
1486 	interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK;
1487 	interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval);
1488 	if (!interval_us ||
1489 	    cdns_mhdp_link_training(mhdp, interval_us)) {
1490 		dev_err(mhdp->dev, "Link training failed. Exiting.\n");
1491 		return -EIO;
1492 	}
1493 
1494 	mhdp->link_up = true;
1495 
1496 	return 0;
1497 }
1498 
1499 static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
1500 {
1501 	WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1502 
1503 	if (mhdp->plugged)
1504 		cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
1505 
1506 	mhdp->link_up = false;
1507 }
1508 
1509 static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp,
1510 				       struct drm_connector *connector)
1511 {
1512 	if (!mhdp->plugged)
1513 		return NULL;
1514 
1515 	return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
1516 }
1517 
1518 static int cdns_mhdp_get_modes(struct drm_connector *connector)
1519 {
1520 	struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
1521 	struct edid *edid;
1522 	int num_modes;
1523 
1524 	if (!mhdp->plugged)
1525 		return 0;
1526 
1527 	edid = cdns_mhdp_get_edid(mhdp, connector);
1528 	if (!edid) {
1529 		dev_err(mhdp->dev, "Failed to read EDID\n");
1530 		return 0;
1531 	}
1532 
1533 	drm_connector_update_edid_property(connector, edid);
1534 	num_modes = drm_add_edid_modes(connector, edid);
1535 	kfree(edid);
1536 
1537 	/*
1538 	 * HACK: Warn about unsupported display formats until we deal
1539 	 *       with them correctly.
1540 	 */
1541 	if (connector->display_info.color_formats &&
1542 	    !(connector->display_info.color_formats &
1543 	      mhdp->display_fmt.color_format))
1544 		dev_warn(mhdp->dev,
1545 			 "%s: No supported color_format found (0x%08x)\n",
1546 			__func__, connector->display_info.color_formats);
1547 
1548 	if (connector->display_info.bpc &&
1549 	    connector->display_info.bpc < mhdp->display_fmt.bpc)
1550 		dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
1551 			 __func__, connector->display_info.bpc,
1552 			 mhdp->display_fmt.bpc);
1553 
1554 	return num_modes;
1555 }
1556 
1557 static int cdns_mhdp_connector_detect(struct drm_connector *conn,
1558 				      struct drm_modeset_acquire_ctx *ctx,
1559 				      bool force)
1560 {
1561 	struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1562 
1563 	return cdns_mhdp_detect(mhdp);
1564 }
1565 
1566 static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
1567 {
1568 	u32 bpp;
1569 
1570 	if (fmt->y_only)
1571 		return fmt->bpc;
1572 
1573 	switch (fmt->color_format) {
1574 	case DRM_COLOR_FORMAT_RGB444:
1575 	case DRM_COLOR_FORMAT_YCBCR444:
1576 		bpp = fmt->bpc * 3;
1577 		break;
1578 	case DRM_COLOR_FORMAT_YCBCR422:
1579 		bpp = fmt->bpc * 2;
1580 		break;
1581 	case DRM_COLOR_FORMAT_YCBCR420:
1582 		bpp = fmt->bpc * 3 / 2;
1583 		break;
1584 	default:
1585 		bpp = fmt->bpc * 3;
1586 		WARN_ON(1);
1587 	}
1588 	return bpp;
1589 }
1590 
1591 static
1592 bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
1593 			    const struct drm_display_mode *mode,
1594 			    unsigned int lanes, unsigned int rate)
1595 {
1596 	u32 max_bw, req_bw, bpp;
1597 
1598 	/*
1599 	 * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8
1600 	 * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the
1601 	 * value thus equals the bandwidth in 10kb/s units, which matches the
1602 	 * units of the rate parameter.
1603 	 */
1604 
1605 	bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1606 	req_bw = mode->clock * bpp / 8;
1607 	max_bw = lanes * rate;
1608 	if (req_bw > max_bw) {
1609 		dev_dbg(mhdp->dev,
1610 			"Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
1611 			mode->name, req_bw, max_bw);
1612 
1613 		return false;
1614 	}
1615 
1616 	return true;
1617 }
1618 
1619 static
1620 enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
1621 					  struct drm_display_mode *mode)
1622 {
1623 	struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1624 
1625 	mutex_lock(&mhdp->link_mutex);
1626 
1627 	if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
1628 				    mhdp->link.rate)) {
1629 		mutex_unlock(&mhdp->link_mutex);
1630 		return MODE_CLOCK_HIGH;
1631 	}
1632 
1633 	mutex_unlock(&mhdp->link_mutex);
1634 	return MODE_OK;
1635 }
1636 
1637 static int cdns_mhdp_connector_atomic_check(struct drm_connector *conn,
1638 					    struct drm_atomic_state *state)
1639 {
1640 	struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1641 	struct drm_connector_state *old_state, *new_state;
1642 	struct drm_crtc_state *crtc_state;
1643 	u64 old_cp, new_cp;
1644 
1645 	if (!mhdp->hdcp_supported)
1646 		return 0;
1647 
1648 	old_state = drm_atomic_get_old_connector_state(state, conn);
1649 	new_state = drm_atomic_get_new_connector_state(state, conn);
1650 	old_cp = old_state->content_protection;
1651 	new_cp = new_state->content_protection;
1652 
1653 	if (old_state->hdcp_content_type != new_state->hdcp_content_type &&
1654 	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1655 		new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1656 		goto mode_changed;
1657 	}
1658 
1659 	if (!new_state->crtc) {
1660 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1661 			new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1662 		return 0;
1663 	}
1664 
1665 	if (old_cp == new_cp ||
1666 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
1667 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
1668 		return 0;
1669 
1670 mode_changed:
1671 	crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
1672 	crtc_state->mode_changed = true;
1673 
1674 	return 0;
1675 }
1676 
1677 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
1678 	.detect_ctx = cdns_mhdp_connector_detect,
1679 	.get_modes = cdns_mhdp_get_modes,
1680 	.mode_valid = cdns_mhdp_mode_valid,
1681 	.atomic_check = cdns_mhdp_connector_atomic_check,
1682 };
1683 
1684 static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
1685 	.fill_modes = drm_helper_probe_single_connector_modes,
1686 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1687 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1688 	.reset = drm_atomic_helper_connector_reset,
1689 	.destroy = drm_connector_cleanup,
1690 };
1691 
1692 static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
1693 {
1694 	u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
1695 	struct drm_connector *conn = &mhdp->connector;
1696 	struct drm_bridge *bridge = &mhdp->bridge;
1697 	int ret;
1698 
1699 	if (!bridge->encoder) {
1700 		dev_err(mhdp->dev, "Parent encoder object not found");
1701 		return -ENODEV;
1702 	}
1703 
1704 	conn->polled = DRM_CONNECTOR_POLL_HPD;
1705 
1706 	ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
1707 				 DRM_MODE_CONNECTOR_DisplayPort);
1708 	if (ret) {
1709 		dev_err(mhdp->dev, "Failed to initialize connector with drm\n");
1710 		return ret;
1711 	}
1712 
1713 	drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
1714 
1715 	ret = drm_display_info_set_bus_formats(&conn->display_info,
1716 					       &bus_format, 1);
1717 	if (ret)
1718 		return ret;
1719 
1720 	ret = drm_connector_attach_encoder(conn, bridge->encoder);
1721 	if (ret) {
1722 		dev_err(mhdp->dev, "Failed to attach connector to encoder\n");
1723 		return ret;
1724 	}
1725 
1726 	if (mhdp->hdcp_supported)
1727 		ret = drm_connector_attach_content_protection_property(conn, true);
1728 
1729 	return ret;
1730 }
1731 
1732 static int cdns_mhdp_attach(struct drm_bridge *bridge,
1733 			    enum drm_bridge_attach_flags flags)
1734 {
1735 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1736 	bool hw_ready;
1737 	int ret;
1738 
1739 	dev_dbg(mhdp->dev, "%s\n", __func__);
1740 
1741 	mhdp->aux.drm_dev = bridge->dev;
1742 	ret = drm_dp_aux_register(&mhdp->aux);
1743 	if (ret < 0)
1744 		return ret;
1745 
1746 	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
1747 		ret = cdns_mhdp_connector_init(mhdp);
1748 		if (ret)
1749 			goto aux_unregister;
1750 	}
1751 
1752 	spin_lock(&mhdp->start_lock);
1753 
1754 	mhdp->bridge_attached = true;
1755 	hw_ready = mhdp->hw_state == MHDP_HW_READY;
1756 
1757 	spin_unlock(&mhdp->start_lock);
1758 
1759 	/* Enable SW event interrupts */
1760 	if (hw_ready)
1761 		cdns_mhdp_bridge_hpd_enable(bridge);
1762 
1763 	return 0;
1764 aux_unregister:
1765 	drm_dp_aux_unregister(&mhdp->aux);
1766 	return ret;
1767 }
1768 
1769 static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
1770 				      const struct drm_display_mode *mode)
1771 {
1772 	unsigned int dp_framer_sp = 0, msa_horizontal_1,
1773 		msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
1774 		misc0 = 0, misc1 = 0, pxl_repr,
1775 		front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
1776 		dp_vertical_1;
1777 	u8 stream_id = mhdp->stream_id;
1778 	u32 bpp, bpc, pxlfmt, framer;
1779 	int ret;
1780 
1781 	pxlfmt = mhdp->display_fmt.color_format;
1782 	bpc = mhdp->display_fmt.bpc;
1783 
1784 	/*
1785 	 * If YCBCR supported and stream not SD, use ITU709
1786 	 * Need to handle ITU version with YCBCR420 when supported
1787 	 */
1788 	if ((pxlfmt == DRM_COLOR_FORMAT_YCBCR444 ||
1789 	     pxlfmt == DRM_COLOR_FORMAT_YCBCR422) && mode->crtc_vdisplay >= 720)
1790 		misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
1791 
1792 	bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1793 
1794 	switch (pxlfmt) {
1795 	case DRM_COLOR_FORMAT_RGB444:
1796 		pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
1797 		misc0 |= DP_COLOR_FORMAT_RGB;
1798 		break;
1799 	case DRM_COLOR_FORMAT_YCBCR444:
1800 		pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
1801 		misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
1802 		break;
1803 	case DRM_COLOR_FORMAT_YCBCR422:
1804 		pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
1805 		misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
1806 		break;
1807 	case DRM_COLOR_FORMAT_YCBCR420:
1808 		pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
1809 		break;
1810 	default:
1811 		pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
1812 	}
1813 
1814 	switch (bpc) {
1815 	case 6:
1816 		misc0 |= DP_TEST_BIT_DEPTH_6;
1817 		pxl_repr |= CDNS_DP_FRAMER_6_BPC;
1818 		break;
1819 	case 8:
1820 		misc0 |= DP_TEST_BIT_DEPTH_8;
1821 		pxl_repr |= CDNS_DP_FRAMER_8_BPC;
1822 		break;
1823 	case 10:
1824 		misc0 |= DP_TEST_BIT_DEPTH_10;
1825 		pxl_repr |= CDNS_DP_FRAMER_10_BPC;
1826 		break;
1827 	case 12:
1828 		misc0 |= DP_TEST_BIT_DEPTH_12;
1829 		pxl_repr |= CDNS_DP_FRAMER_12_BPC;
1830 		break;
1831 	case 16:
1832 		misc0 |= DP_TEST_BIT_DEPTH_16;
1833 		pxl_repr |= CDNS_DP_FRAMER_16_BPC;
1834 		break;
1835 	}
1836 
1837 	bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
1838 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1839 		bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
1840 
1841 	cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
1842 			    bnd_hsync2vsync);
1843 
1844 	hsync2vsync_pol_ctrl = 0;
1845 	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1846 		hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
1847 	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1848 		hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
1849 	cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
1850 			    hsync2vsync_pol_ctrl);
1851 
1852 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
1853 
1854 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1855 		dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
1856 	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1857 		dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
1858 	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1859 		dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
1860 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
1861 
1862 	front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
1863 	back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
1864 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
1865 			    CDNS_DP_FRONT_PORCH(front_porch) |
1866 			    CDNS_DP_BACK_PORCH(back_porch));
1867 
1868 	cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
1869 			    mode->crtc_hdisplay * bpp / 8);
1870 
1871 	msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
1872 	cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
1873 			    CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
1874 			    CDNS_DP_MSAH0_HSYNC_START(msa_h0));
1875 
1876 	hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
1877 	msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
1878 			   CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
1879 	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1880 		msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
1881 	cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
1882 			    msa_horizontal_1);
1883 
1884 	msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
1885 	cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
1886 			    CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
1887 			    CDNS_DP_MSAV0_VSYNC_START(msa_v0));
1888 
1889 	vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
1890 	msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
1891 			 CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
1892 	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1893 		msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
1894 	cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
1895 			    msa_vertical_1);
1896 
1897 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1898 	    mode->crtc_vtotal % 2 == 0)
1899 		misc1 = DP_TEST_INTERLACED;
1900 	if (mhdp->display_fmt.y_only)
1901 		misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
1902 	/* Use VSC SDP for Y420 */
1903 	if (pxlfmt == DRM_COLOR_FORMAT_YCBCR420)
1904 		misc1 = CDNS_DP_TEST_VSC_SDP;
1905 
1906 	cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
1907 			    misc0 | (misc1 << 8));
1908 
1909 	cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
1910 			    CDNS_DP_H_HSYNC_WIDTH(hsync) |
1911 			    CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
1912 
1913 	cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
1914 			    CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
1915 			    CDNS_DP_V0_VSTART(msa_v0));
1916 
1917 	dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
1918 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1919 	    mode->crtc_vtotal % 2 == 0)
1920 		dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
1921 
1922 	cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
1923 
1924 	cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
1925 				(mode->flags & DRM_MODE_FLAG_INTERLACE) ?
1926 				CDNS_DP_VB_ID_INTERLACED : 0);
1927 
1928 	ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer);
1929 	if (ret < 0) {
1930 		dev_err(mhdp->dev,
1931 			"Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1932 			ret);
1933 		return;
1934 	}
1935 	framer |= CDNS_DP_FRAMER_EN;
1936 	framer &= ~CDNS_DP_NO_VIDEO_MODE;
1937 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer);
1938 }
1939 
1940 static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
1941 				 const struct drm_display_mode *mode)
1942 {
1943 	u32 rate, vs, required_bandwidth, available_bandwidth;
1944 	s32 line_thresh1, line_thresh2, line_thresh = 0;
1945 	int pxlclock = mode->crtc_clock;
1946 	u32 tu_size = 64;
1947 	u32 bpp;
1948 
1949 	/* Get rate in MSymbols per second per lane */
1950 	rate = mhdp->link.rate / 1000;
1951 
1952 	bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1953 
1954 	required_bandwidth = pxlclock * bpp / 8;
1955 	available_bandwidth = mhdp->link.num_lanes * rate;
1956 
1957 	vs = tu_size * required_bandwidth / available_bandwidth;
1958 	vs /= 1000;
1959 
1960 	if (vs == tu_size)
1961 		vs = tu_size - 1;
1962 
1963 	line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
1964 	line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
1965 	line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes;
1966 	line_thresh = (line_thresh >> 5) + 2;
1967 
1968 	mhdp->stream_id = 0;
1969 
1970 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
1971 			    CDNS_DP_FRAMER_TU_VS(vs) |
1972 			    CDNS_DP_FRAMER_TU_SIZE(tu_size) |
1973 			    CDNS_DP_FRAMER_TU_CNT_RST_EN);
1974 
1975 	cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
1976 			    line_thresh & GENMASK(5, 0));
1977 
1978 	cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
1979 			    CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
1980 						   0 : tu_size - vs));
1981 
1982 	cdns_mhdp_configure_video(mhdp, mode);
1983 }
1984 
1985 static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
1986 				    struct drm_bridge_state *bridge_state)
1987 {
1988 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1989 	struct drm_atomic_state *state = bridge_state->base.state;
1990 	struct cdns_mhdp_bridge_state *mhdp_state;
1991 	struct drm_crtc_state *crtc_state;
1992 	struct drm_connector *connector;
1993 	struct drm_connector_state *conn_state;
1994 	struct drm_bridge_state *new_state;
1995 	const struct drm_display_mode *mode;
1996 	u32 resp;
1997 	int ret;
1998 
1999 	dev_dbg(mhdp->dev, "bridge enable\n");
2000 
2001 	mutex_lock(&mhdp->link_mutex);
2002 
2003 	if (mhdp->plugged && !mhdp->link_up) {
2004 		ret = cdns_mhdp_link_up(mhdp);
2005 		if (ret < 0)
2006 			goto out;
2007 	}
2008 
2009 	if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable)
2010 		mhdp->info->ops->enable(mhdp);
2011 
2012 	/* Enable VIF clock for stream 0 */
2013 	ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
2014 	if (ret < 0) {
2015 		dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret);
2016 		goto out;
2017 	}
2018 
2019 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2020 			    resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
2021 
2022 	connector = drm_atomic_get_new_connector_for_encoder(state,
2023 							     bridge->encoder);
2024 	if (WARN_ON(!connector))
2025 		goto out;
2026 
2027 	conn_state = drm_atomic_get_new_connector_state(state, connector);
2028 	if (WARN_ON(!conn_state))
2029 		goto out;
2030 
2031 	if (mhdp->hdcp_supported &&
2032 	    mhdp->hw_state == MHDP_HW_READY &&
2033 	    conn_state->content_protection ==
2034 	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2035 		mutex_unlock(&mhdp->link_mutex);
2036 		cdns_mhdp_hdcp_enable(mhdp, conn_state->hdcp_content_type);
2037 		mutex_lock(&mhdp->link_mutex);
2038 	}
2039 
2040 	crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
2041 	if (WARN_ON(!crtc_state))
2042 		goto out;
2043 
2044 	mode = &crtc_state->adjusted_mode;
2045 
2046 	new_state = drm_atomic_get_new_bridge_state(state, bridge);
2047 	if (WARN_ON(!new_state))
2048 		goto out;
2049 
2050 	if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2051 				    mhdp->link.rate)) {
2052 		ret = -EINVAL;
2053 		goto out;
2054 	}
2055 
2056 	cdns_mhdp_sst_enable(mhdp, mode);
2057 
2058 	mhdp_state = to_cdns_mhdp_bridge_state(new_state);
2059 
2060 	mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
2061 	drm_mode_set_name(mhdp_state->current_mode);
2062 
2063 	dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
2064 
2065 	mhdp->bridge_enabled = true;
2066 
2067 out:
2068 	mutex_unlock(&mhdp->link_mutex);
2069 	if (ret < 0)
2070 		schedule_work(&mhdp->modeset_retry_work);
2071 }
2072 
2073 static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
2074 				     struct drm_bridge_state *bridge_state)
2075 {
2076 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2077 	u32 resp;
2078 
2079 	dev_dbg(mhdp->dev, "%s\n", __func__);
2080 
2081 	mutex_lock(&mhdp->link_mutex);
2082 
2083 	if (mhdp->hdcp_supported)
2084 		cdns_mhdp_hdcp_disable(mhdp);
2085 
2086 	mhdp->bridge_enabled = false;
2087 	cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
2088 	resp &= ~CDNS_DP_FRAMER_EN;
2089 	resp |= CDNS_DP_NO_VIDEO_MODE;
2090 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
2091 
2092 	cdns_mhdp_link_down(mhdp);
2093 
2094 	/* Disable VIF clock for stream 0 */
2095 	cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
2096 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2097 			    resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
2098 
2099 	if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable)
2100 		mhdp->info->ops->disable(mhdp);
2101 
2102 	mutex_unlock(&mhdp->link_mutex);
2103 }
2104 
2105 static void cdns_mhdp_detach(struct drm_bridge *bridge)
2106 {
2107 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2108 
2109 	dev_dbg(mhdp->dev, "%s\n", __func__);
2110 
2111 	drm_dp_aux_unregister(&mhdp->aux);
2112 
2113 	spin_lock(&mhdp->start_lock);
2114 
2115 	mhdp->bridge_attached = false;
2116 
2117 	spin_unlock(&mhdp->start_lock);
2118 
2119 	writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2120 }
2121 
2122 static struct drm_bridge_state *
2123 cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
2124 {
2125 	struct cdns_mhdp_bridge_state *state;
2126 
2127 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2128 	if (!state)
2129 		return NULL;
2130 
2131 	__drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
2132 
2133 	return &state->base;
2134 }
2135 
2136 static void
2137 cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge,
2138 				      struct drm_bridge_state *state)
2139 {
2140 	struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2141 
2142 	cdns_mhdp_state = to_cdns_mhdp_bridge_state(state);
2143 
2144 	if (cdns_mhdp_state->current_mode) {
2145 		drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode);
2146 		cdns_mhdp_state->current_mode = NULL;
2147 	}
2148 
2149 	kfree(cdns_mhdp_state);
2150 }
2151 
2152 static struct drm_bridge_state *
2153 cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
2154 {
2155 	struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2156 
2157 	cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL);
2158 	if (!cdns_mhdp_state)
2159 		return NULL;
2160 
2161 	__drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
2162 
2163 	return &cdns_mhdp_state->base;
2164 }
2165 
2166 static u32 *cdns_mhdp_get_input_bus_fmts(struct drm_bridge *bridge,
2167 					 struct drm_bridge_state *bridge_state,
2168 					 struct drm_crtc_state *crtc_state,
2169 					 struct drm_connector_state *conn_state,
2170 					 u32 output_fmt,
2171 					 unsigned int *num_input_fmts)
2172 {
2173 	u32 *input_fmts;
2174 
2175 	*num_input_fmts = 0;
2176 
2177 	input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
2178 	if (!input_fmts)
2179 		return NULL;
2180 
2181 	*num_input_fmts = 1;
2182 	input_fmts[0] = MEDIA_BUS_FMT_RGB121212_1X36;
2183 
2184 	return input_fmts;
2185 }
2186 
2187 static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
2188 				  struct drm_bridge_state *bridge_state,
2189 				  struct drm_crtc_state *crtc_state,
2190 				  struct drm_connector_state *conn_state)
2191 {
2192 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2193 	const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
2194 
2195 	mutex_lock(&mhdp->link_mutex);
2196 
2197 	if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2198 				    mhdp->link.rate)) {
2199 		dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
2200 			__func__, mode->name, mhdp->link.num_lanes,
2201 			mhdp->link.rate / 100);
2202 		mutex_unlock(&mhdp->link_mutex);
2203 		return -EINVAL;
2204 	}
2205 
2206 	/*
2207 	 * There might be flags negotiation supported in future.
2208 	 * Set the bus flags in atomic_check statically for now.
2209 	 */
2210 	if (mhdp->info)
2211 		bridge_state->input_bus_cfg.flags = *mhdp->info->input_bus_flags;
2212 
2213 	mutex_unlock(&mhdp->link_mutex);
2214 	return 0;
2215 }
2216 
2217 static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
2218 {
2219 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2220 
2221 	return cdns_mhdp_detect(mhdp);
2222 }
2223 
2224 static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
2225 					      struct drm_connector *connector)
2226 {
2227 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2228 
2229 	return cdns_mhdp_get_edid(mhdp, connector);
2230 }
2231 
2232 static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
2233 	.atomic_enable = cdns_mhdp_atomic_enable,
2234 	.atomic_disable = cdns_mhdp_atomic_disable,
2235 	.atomic_check = cdns_mhdp_atomic_check,
2236 	.attach = cdns_mhdp_attach,
2237 	.detach = cdns_mhdp_detach,
2238 	.atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
2239 	.atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
2240 	.atomic_reset = cdns_mhdp_bridge_atomic_reset,
2241 	.atomic_get_input_bus_fmts = cdns_mhdp_get_input_bus_fmts,
2242 	.detect = cdns_mhdp_bridge_detect,
2243 	.get_edid = cdns_mhdp_bridge_get_edid,
2244 	.hpd_enable = cdns_mhdp_bridge_hpd_enable,
2245 	.hpd_disable = cdns_mhdp_bridge_hpd_disable,
2246 };
2247 
2248 static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse)
2249 {
2250 	int hpd_event, hpd_status;
2251 
2252 	*hpd_pulse = false;
2253 
2254 	hpd_event = cdns_mhdp_read_hpd_event(mhdp);
2255 
2256 	/* Getting event bits failed, bail out */
2257 	if (hpd_event < 0) {
2258 		dev_warn(mhdp->dev, "%s: read event failed: %d\n",
2259 			 __func__, hpd_event);
2260 		return false;
2261 	}
2262 
2263 	hpd_status = cdns_mhdp_get_hpd_status(mhdp);
2264 	if (hpd_status < 0) {
2265 		dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n",
2266 			 __func__, hpd_status);
2267 		return false;
2268 	}
2269 
2270 	if (hpd_event & DPTX_READ_EVENT_HPD_PULSE)
2271 		*hpd_pulse = true;
2272 
2273 	return !!hpd_status;
2274 }
2275 
2276 static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
2277 {
2278 	struct cdns_mhdp_bridge_state *cdns_bridge_state;
2279 	struct drm_display_mode *current_mode;
2280 	bool old_plugged = mhdp->plugged;
2281 	struct drm_bridge_state *state;
2282 	u8 status[DP_LINK_STATUS_SIZE];
2283 	bool hpd_pulse;
2284 	int ret = 0;
2285 
2286 	mutex_lock(&mhdp->link_mutex);
2287 
2288 	mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
2289 
2290 	if (!mhdp->plugged) {
2291 		cdns_mhdp_link_down(mhdp);
2292 		mhdp->link.rate = mhdp->host.link_rate;
2293 		mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2294 		goto out;
2295 	}
2296 
2297 	/*
2298 	 * If we get a HPD pulse event and we were and still are connected,
2299 	 * check the link status. If link status is ok, there's nothing to do
2300 	 * as we don't handle DP interrupts. If link status is bad, continue
2301 	 * with full link setup.
2302 	 */
2303 	if (hpd_pulse && old_plugged == mhdp->plugged) {
2304 		ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
2305 
2306 		/*
2307 		 * If everything looks fine, just return, as we don't handle
2308 		 * DP IRQs.
2309 		 */
2310 		if (ret > 0 &&
2311 		    drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
2312 		    drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
2313 			goto out;
2314 
2315 		/* If link is bad, mark link as down so that we do a new LT */
2316 		mhdp->link_up = false;
2317 	}
2318 
2319 	if (!mhdp->link_up) {
2320 		ret = cdns_mhdp_link_up(mhdp);
2321 		if (ret < 0)
2322 			goto out;
2323 	}
2324 
2325 	if (mhdp->bridge_enabled) {
2326 		state = drm_priv_to_bridge_state(mhdp->bridge.base.state);
2327 		if (!state) {
2328 			ret = -EINVAL;
2329 			goto out;
2330 		}
2331 
2332 		cdns_bridge_state = to_cdns_mhdp_bridge_state(state);
2333 		if (!cdns_bridge_state) {
2334 			ret = -EINVAL;
2335 			goto out;
2336 		}
2337 
2338 		current_mode = cdns_bridge_state->current_mode;
2339 		if (!current_mode) {
2340 			ret = -EINVAL;
2341 			goto out;
2342 		}
2343 
2344 		if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes,
2345 					    mhdp->link.rate)) {
2346 			ret = -EINVAL;
2347 			goto out;
2348 		}
2349 
2350 		dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__,
2351 			current_mode->name);
2352 
2353 		cdns_mhdp_sst_enable(mhdp, current_mode);
2354 	}
2355 out:
2356 	mutex_unlock(&mhdp->link_mutex);
2357 	return ret;
2358 }
2359 
2360 static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
2361 {
2362 	struct cdns_mhdp_device *mhdp;
2363 	struct drm_connector *conn;
2364 
2365 	mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
2366 
2367 	conn = &mhdp->connector;
2368 
2369 	/* Grab the locks before changing connector property */
2370 	mutex_lock(&conn->dev->mode_config.mutex);
2371 
2372 	/*
2373 	 * Set connector link status to BAD and send a Uevent to notify
2374 	 * userspace to do a modeset.
2375 	 */
2376 	drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
2377 	mutex_unlock(&conn->dev->mode_config.mutex);
2378 
2379 	/* Send Hotplug uevent so userspace can reprobe */
2380 	drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2381 }
2382 
2383 static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
2384 {
2385 	struct cdns_mhdp_device *mhdp = data;
2386 	u32 apb_stat, sw_ev0;
2387 	bool bridge_attached;
2388 
2389 	apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
2390 	if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT))
2391 		return IRQ_NONE;
2392 
2393 	sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
2394 
2395 	/*
2396 	 *  Calling drm_kms_helper_hotplug_event() when not attached
2397 	 *  to drm device causes an oops because the drm_bridge->dev
2398 	 *  is NULL. See cdns_mhdp_fw_cb() comments for details about the
2399 	 *  problems related drm_kms_helper_hotplug_event() call.
2400 	 */
2401 	spin_lock(&mhdp->start_lock);
2402 	bridge_attached = mhdp->bridge_attached;
2403 	spin_unlock(&mhdp->start_lock);
2404 
2405 	if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
2406 		schedule_work(&mhdp->hpd_work);
2407 	}
2408 
2409 	if (sw_ev0 & ~CDNS_DPTX_HPD) {
2410 		mhdp->sw_events |= (sw_ev0 & ~CDNS_DPTX_HPD);
2411 		wake_up(&mhdp->sw_events_wq);
2412 	}
2413 
2414 	return IRQ_HANDLED;
2415 }
2416 
2417 u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, u32 event)
2418 {
2419 	u32 ret;
2420 
2421 	ret = wait_event_timeout(mhdp->sw_events_wq,
2422 				 mhdp->sw_events & event,
2423 				 msecs_to_jiffies(500));
2424 	if (!ret) {
2425 		dev_dbg(mhdp->dev, "SW event 0x%x timeout\n", event);
2426 		goto sw_event_out;
2427 	}
2428 
2429 	ret = mhdp->sw_events;
2430 	mhdp->sw_events &= ~event;
2431 
2432 sw_event_out:
2433 	return ret;
2434 }
2435 
2436 static void cdns_mhdp_hpd_work(struct work_struct *work)
2437 {
2438 	struct cdns_mhdp_device *mhdp = container_of(work,
2439 						     struct cdns_mhdp_device,
2440 						     hpd_work);
2441 	int ret;
2442 
2443 	ret = cdns_mhdp_update_link_status(mhdp);
2444 	if (mhdp->connector.dev) {
2445 		if (ret < 0)
2446 			schedule_work(&mhdp->modeset_retry_work);
2447 		else
2448 			drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2449 	} else {
2450 		drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
2451 	}
2452 }
2453 
2454 static int cdns_mhdp_probe(struct platform_device *pdev)
2455 {
2456 	struct device *dev = &pdev->dev;
2457 	struct cdns_mhdp_device *mhdp;
2458 	unsigned long rate;
2459 	struct clk *clk;
2460 	int ret;
2461 	int irq;
2462 
2463 	mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
2464 	if (!mhdp)
2465 		return -ENOMEM;
2466 
2467 	clk = devm_clk_get(dev, NULL);
2468 	if (IS_ERR(clk)) {
2469 		dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
2470 		return PTR_ERR(clk);
2471 	}
2472 
2473 	mhdp->clk = clk;
2474 	mhdp->dev = dev;
2475 	mutex_init(&mhdp->mbox_mutex);
2476 	mutex_init(&mhdp->link_mutex);
2477 	spin_lock_init(&mhdp->start_lock);
2478 
2479 	drm_dp_aux_init(&mhdp->aux);
2480 	mhdp->aux.dev = dev;
2481 	mhdp->aux.transfer = cdns_mhdp_transfer;
2482 
2483 	mhdp->regs = devm_platform_ioremap_resource(pdev, 0);
2484 	if (IS_ERR(mhdp->regs)) {
2485 		dev_err(dev, "Failed to get memory resource\n");
2486 		return PTR_ERR(mhdp->regs);
2487 	}
2488 
2489 	mhdp->sapb_regs = devm_platform_ioremap_resource_byname(pdev, "mhdptx-sapb");
2490 	if (IS_ERR(mhdp->sapb_regs)) {
2491 		mhdp->hdcp_supported = false;
2492 		dev_warn(dev,
2493 			 "Failed to get SAPB memory resource, HDCP not supported\n");
2494 	} else {
2495 		mhdp->hdcp_supported = true;
2496 	}
2497 
2498 	mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0);
2499 	if (IS_ERR(mhdp->phy)) {
2500 		dev_err(dev, "no PHY configured\n");
2501 		return PTR_ERR(mhdp->phy);
2502 	}
2503 
2504 	platform_set_drvdata(pdev, mhdp);
2505 
2506 	mhdp->info = of_device_get_match_data(dev);
2507 
2508 	clk_prepare_enable(clk);
2509 
2510 	pm_runtime_enable(dev);
2511 	ret = pm_runtime_resume_and_get(dev);
2512 	if (ret < 0) {
2513 		dev_err(dev, "pm_runtime_resume_and_get failed\n");
2514 		pm_runtime_disable(dev);
2515 		goto clk_disable;
2516 	}
2517 
2518 	if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
2519 		ret = mhdp->info->ops->init(mhdp);
2520 		if (ret != 0) {
2521 			dev_err(dev, "MHDP platform initialization failed: %d\n",
2522 				ret);
2523 			goto runtime_put;
2524 		}
2525 	}
2526 
2527 	rate = clk_get_rate(clk);
2528 	writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
2529 	writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
2530 
2531 	dev_dbg(dev, "func clk rate %lu Hz\n", rate);
2532 
2533 	writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2534 
2535 	irq = platform_get_irq(pdev, 0);
2536 	ret = devm_request_threaded_irq(mhdp->dev, irq, NULL,
2537 					cdns_mhdp_irq_handler, IRQF_ONESHOT,
2538 					"mhdp8546", mhdp);
2539 	if (ret) {
2540 		dev_err(dev, "cannot install IRQ %d\n", irq);
2541 		ret = -EIO;
2542 		goto plat_fini;
2543 	}
2544 
2545 	cdns_mhdp_fill_host_caps(mhdp);
2546 
2547 	/* Initialize link rate and num of lanes to host values */
2548 	mhdp->link.rate = mhdp->host.link_rate;
2549 	mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2550 
2551 	/* The only currently supported format */
2552 	mhdp->display_fmt.y_only = false;
2553 	mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
2554 	mhdp->display_fmt.bpc = 8;
2555 
2556 	mhdp->bridge.of_node = pdev->dev.of_node;
2557 	mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
2558 	mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
2559 			   DRM_BRIDGE_OP_HPD;
2560 	mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
2561 
2562 	ret = phy_init(mhdp->phy);
2563 	if (ret) {
2564 		dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
2565 		goto plat_fini;
2566 	}
2567 
2568 	/* Initialize the work for modeset in case of link train failure */
2569 	INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn);
2570 	INIT_WORK(&mhdp->hpd_work, cdns_mhdp_hpd_work);
2571 
2572 	init_waitqueue_head(&mhdp->fw_load_wq);
2573 	init_waitqueue_head(&mhdp->sw_events_wq);
2574 
2575 	ret = cdns_mhdp_load_firmware(mhdp);
2576 	if (ret)
2577 		goto phy_exit;
2578 
2579 	if (mhdp->hdcp_supported)
2580 		cdns_mhdp_hdcp_init(mhdp);
2581 
2582 	drm_bridge_add(&mhdp->bridge);
2583 
2584 	return 0;
2585 
2586 phy_exit:
2587 	phy_exit(mhdp->phy);
2588 plat_fini:
2589 	if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2590 		mhdp->info->ops->exit(mhdp);
2591 runtime_put:
2592 	pm_runtime_put_sync(dev);
2593 	pm_runtime_disable(dev);
2594 clk_disable:
2595 	clk_disable_unprepare(mhdp->clk);
2596 
2597 	return ret;
2598 }
2599 
2600 static int cdns_mhdp_remove(struct platform_device *pdev)
2601 {
2602 	struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
2603 	unsigned long timeout = msecs_to_jiffies(100);
2604 	bool stop_fw = false;
2605 	int ret;
2606 
2607 	drm_bridge_remove(&mhdp->bridge);
2608 
2609 	ret = wait_event_timeout(mhdp->fw_load_wq,
2610 				 mhdp->hw_state == MHDP_HW_READY,
2611 				 timeout);
2612 	if (ret == 0)
2613 		dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
2614 			__func__);
2615 	else
2616 		stop_fw = true;
2617 
2618 	spin_lock(&mhdp->start_lock);
2619 	mhdp->hw_state = MHDP_HW_STOPPED;
2620 	spin_unlock(&mhdp->start_lock);
2621 
2622 	if (stop_fw)
2623 		ret = cdns_mhdp_set_firmware_active(mhdp, false);
2624 
2625 	phy_exit(mhdp->phy);
2626 
2627 	if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2628 		mhdp->info->ops->exit(mhdp);
2629 
2630 	pm_runtime_put_sync(&pdev->dev);
2631 	pm_runtime_disable(&pdev->dev);
2632 
2633 	cancel_work_sync(&mhdp->modeset_retry_work);
2634 	flush_work(&mhdp->hpd_work);
2635 	/* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
2636 
2637 	clk_disable_unprepare(mhdp->clk);
2638 
2639 	return ret;
2640 }
2641 
2642 static const struct of_device_id mhdp_ids[] = {
2643 	{ .compatible = "cdns,mhdp8546", },
2644 #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
2645 	{ .compatible = "ti,j721e-mhdp8546",
2646 	  .data = &(const struct cdns_mhdp_platform_info) {
2647 		  .input_bus_flags = &mhdp_ti_j721e_bridge_input_bus_flags,
2648 		  .ops = &mhdp_ti_j721e_ops,
2649 	  },
2650 	},
2651 #endif
2652 	{ /* sentinel */ }
2653 };
2654 MODULE_DEVICE_TABLE(of, mhdp_ids);
2655 
2656 static struct platform_driver mhdp_driver = {
2657 	.driver	= {
2658 		.name		= "cdns-mhdp8546",
2659 		.of_match_table	= of_match_ptr(mhdp_ids),
2660 	},
2661 	.probe	= cdns_mhdp_probe,
2662 	.remove	= cdns_mhdp_remove,
2663 };
2664 module_platform_driver(mhdp_driver);
2665 
2666 MODULE_FIRMWARE(FW_NAME);
2667 
2668 MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
2669 MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>");
2670 MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>");
2671 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
2672 MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
2673 MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
2674 MODULE_LICENSE("GPL");
2675 MODULE_ALIAS("platform:cdns-mhdp8546");
2676