1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cadence MHDP8546 DP bridge driver.
4  *
5  * Copyright (C) 2020 Cadence Design Systems, Inc.
6  *
7  * Authors: Quentin Schulz <quentin.schulz@free-electrons.com>
8  *          Swapnil Jakhade <sjakhade@cadence.com>
9  *          Yuti Amonkar <yamonkar@cadence.com>
10  *          Tomi Valkeinen <tomi.valkeinen@ti.com>
11  *          Jyri Sarha <jsarha@ti.com>
12  *
13  * TODO:
14  *     - Implement optimized mailbox communication using mailbox interrupts
15  *     - Add support for power management
16  *     - Add support for features like audio, MST and fast link training
17  *     - Implement request_fw_cancel to handle HW_STATE
18  *     - Fix asynchronous loading of firmware implementation
19  *     - Add DRM helper function for cdns_mhdp_lower_link_rate
20  */
21 
22 #include <linux/clk.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/firmware.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/media-bus-format.h>
30 #include <linux/module.h>
31 #include <linux/of.h>
32 #include <linux/of_device.h>
33 #include <linux/phy/phy.h>
34 #include <linux/phy/phy-dp.h>
35 #include <linux/platform_device.h>
36 #include <linux/slab.h>
37 #include <linux/wait.h>
38 
39 #include <drm/display/drm_dp_helper.h>
40 #include <drm/display/drm_hdcp_helper.h>
41 #include <drm/drm_atomic.h>
42 #include <drm/drm_atomic_helper.h>
43 #include <drm/drm_atomic_state_helper.h>
44 #include <drm/drm_bridge.h>
45 #include <drm/drm_connector.h>
46 #include <drm/drm_crtc_helper.h>
47 #include <drm/drm_edid.h>
48 #include <drm/drm_modeset_helper_vtables.h>
49 #include <drm/drm_print.h>
50 #include <drm/drm_probe_helper.h>
51 
52 #include <asm/unaligned.h>
53 
54 #include "cdns-mhdp8546-core.h"
55 #include "cdns-mhdp8546-hdcp.h"
56 #include "cdns-mhdp8546-j721e.h"
57 
58 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
59 {
60 	int ret, empty;
61 
62 	WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
63 
64 	ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
65 				 empty, !empty, MAILBOX_RETRY_US,
66 				 MAILBOX_TIMEOUT_US);
67 	if (ret < 0)
68 		return ret;
69 
70 	return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
71 }
72 
73 static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
74 {
75 	int ret, full;
76 
77 	WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
78 
79 	ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
80 				 full, !full, MAILBOX_RETRY_US,
81 				 MAILBOX_TIMEOUT_US);
82 	if (ret < 0)
83 		return ret;
84 
85 	writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
86 
87 	return 0;
88 }
89 
90 static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp,
91 					 u8 module_id, u8 opcode,
92 					 u16 req_size)
93 {
94 	u32 mbox_size, i;
95 	u8 header[4];
96 	int ret;
97 
98 	/* read the header of the message */
99 	for (i = 0; i < sizeof(header); i++) {
100 		ret = cdns_mhdp_mailbox_read(mhdp);
101 		if (ret < 0)
102 			return ret;
103 
104 		header[i] = ret;
105 	}
106 
107 	mbox_size = get_unaligned_be16(header + 2);
108 
109 	if (opcode != header[0] || module_id != header[1] ||
110 	    req_size != mbox_size) {
111 		/*
112 		 * If the message in mailbox is not what we want, we need to
113 		 * clear the mailbox by reading its contents.
114 		 */
115 		for (i = 0; i < mbox_size; i++)
116 			if (cdns_mhdp_mailbox_read(mhdp) < 0)
117 				break;
118 
119 		return -EINVAL;
120 	}
121 
122 	return 0;
123 }
124 
125 static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp,
126 				       u8 *buff, u16 buff_size)
127 {
128 	u32 i;
129 	int ret;
130 
131 	for (i = 0; i < buff_size; i++) {
132 		ret = cdns_mhdp_mailbox_read(mhdp);
133 		if (ret < 0)
134 			return ret;
135 
136 		buff[i] = ret;
137 	}
138 
139 	return 0;
140 }
141 
142 static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
143 				  u8 opcode, u16 size, u8 *message)
144 {
145 	u8 header[4];
146 	int ret, i;
147 
148 	header[0] = opcode;
149 	header[1] = module_id;
150 	put_unaligned_be16(size, header + 2);
151 
152 	for (i = 0; i < sizeof(header); i++) {
153 		ret = cdns_mhdp_mailbox_write(mhdp, header[i]);
154 		if (ret)
155 			return ret;
156 	}
157 
158 	for (i = 0; i < size; i++) {
159 		ret = cdns_mhdp_mailbox_write(mhdp, message[i]);
160 		if (ret)
161 			return ret;
162 	}
163 
164 	return 0;
165 }
166 
167 static
168 int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
169 {
170 	u8 msg[4], resp[8];
171 	int ret;
172 
173 	put_unaligned_be32(addr, msg);
174 
175 	mutex_lock(&mhdp->mbox_mutex);
176 
177 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
178 				     GENERAL_REGISTER_READ,
179 				     sizeof(msg), msg);
180 	if (ret)
181 		goto out;
182 
183 	ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL,
184 					    GENERAL_REGISTER_READ,
185 					    sizeof(resp));
186 	if (ret)
187 		goto out;
188 
189 	ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp));
190 	if (ret)
191 		goto out;
192 
193 	/* Returned address value should be the same as requested */
194 	if (memcmp(msg, resp, sizeof(msg))) {
195 		ret = -EINVAL;
196 		goto out;
197 	}
198 
199 	*value = get_unaligned_be32(resp + 4);
200 
201 out:
202 	mutex_unlock(&mhdp->mbox_mutex);
203 	if (ret) {
204 		dev_err(mhdp->dev, "Failed to read register\n");
205 		*value = 0;
206 	}
207 
208 	return ret;
209 }
210 
211 static
212 int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
213 {
214 	u8 msg[6];
215 	int ret;
216 
217 	put_unaligned_be16(addr, msg);
218 	put_unaligned_be32(val, msg + 2);
219 
220 	mutex_lock(&mhdp->mbox_mutex);
221 
222 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
223 				     DPTX_WRITE_REGISTER, sizeof(msg), msg);
224 
225 	mutex_unlock(&mhdp->mbox_mutex);
226 
227 	return ret;
228 }
229 
230 static
231 int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
232 			    u8 start_bit, u8 bits_no, u32 val)
233 {
234 	u8 field[8];
235 	int ret;
236 
237 	put_unaligned_be16(addr, field);
238 	field[2] = start_bit;
239 	field[3] = bits_no;
240 	put_unaligned_be32(val, field + 4);
241 
242 	mutex_lock(&mhdp->mbox_mutex);
243 
244 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
245 				     DPTX_WRITE_FIELD, sizeof(field), field);
246 
247 	mutex_unlock(&mhdp->mbox_mutex);
248 
249 	return ret;
250 }
251 
252 static
253 int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
254 			u32 addr, u8 *data, u16 len)
255 {
256 	u8 msg[5], reg[5];
257 	int ret;
258 
259 	put_unaligned_be16(len, msg);
260 	put_unaligned_be24(addr, msg + 2);
261 
262 	mutex_lock(&mhdp->mbox_mutex);
263 
264 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
265 				     DPTX_READ_DPCD, sizeof(msg), msg);
266 	if (ret)
267 		goto out;
268 
269 	ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
270 					    DPTX_READ_DPCD,
271 					    sizeof(reg) + len);
272 	if (ret)
273 		goto out;
274 
275 	ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
276 	if (ret)
277 		goto out;
278 
279 	ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len);
280 
281 out:
282 	mutex_unlock(&mhdp->mbox_mutex);
283 
284 	return ret;
285 }
286 
287 static
288 int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
289 {
290 	u8 msg[6], reg[5];
291 	int ret;
292 
293 	put_unaligned_be16(1, msg);
294 	put_unaligned_be24(addr, msg + 2);
295 	msg[5] = value;
296 
297 	mutex_lock(&mhdp->mbox_mutex);
298 
299 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
300 				     DPTX_WRITE_DPCD, sizeof(msg), msg);
301 	if (ret)
302 		goto out;
303 
304 	ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
305 					    DPTX_WRITE_DPCD, sizeof(reg));
306 	if (ret)
307 		goto out;
308 
309 	ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
310 	if (ret)
311 		goto out;
312 
313 	if (addr != get_unaligned_be24(reg + 2))
314 		ret = -EINVAL;
315 
316 out:
317 	mutex_unlock(&mhdp->mbox_mutex);
318 
319 	if (ret)
320 		dev_err(mhdp->dev, "dpcd write failed: %d\n", ret);
321 	return ret;
322 }
323 
324 static
325 int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
326 {
327 	u8 msg[5];
328 	int ret, i;
329 
330 	msg[0] = GENERAL_MAIN_CONTROL;
331 	msg[1] = MB_MODULE_ID_GENERAL;
332 	msg[2] = 0;
333 	msg[3] = 1;
334 	msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
335 
336 	mutex_lock(&mhdp->mbox_mutex);
337 
338 	for (i = 0; i < sizeof(msg); i++) {
339 		ret = cdns_mhdp_mailbox_write(mhdp, msg[i]);
340 		if (ret)
341 			goto out;
342 	}
343 
344 	/* read the firmware state */
345 	ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg));
346 	if (ret)
347 		goto out;
348 
349 	ret = 0;
350 
351 out:
352 	mutex_unlock(&mhdp->mbox_mutex);
353 
354 	if (ret < 0)
355 		dev_err(mhdp->dev, "set firmware active failed\n");
356 	return ret;
357 }
358 
359 static
360 int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
361 {
362 	u8 status;
363 	int ret;
364 
365 	mutex_lock(&mhdp->mbox_mutex);
366 
367 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
368 				     DPTX_HPD_STATE, 0, NULL);
369 	if (ret)
370 		goto err_get_hpd;
371 
372 	ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
373 					    DPTX_HPD_STATE,
374 					    sizeof(status));
375 	if (ret)
376 		goto err_get_hpd;
377 
378 	ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status));
379 	if (ret)
380 		goto err_get_hpd;
381 
382 	mutex_unlock(&mhdp->mbox_mutex);
383 
384 	dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__,
385 		status ? "" : "un");
386 
387 	return status;
388 
389 err_get_hpd:
390 	mutex_unlock(&mhdp->mbox_mutex);
391 
392 	return ret;
393 }
394 
395 static
396 int cdns_mhdp_get_edid_block(void *data, u8 *edid,
397 			     unsigned int block, size_t length)
398 {
399 	struct cdns_mhdp_device *mhdp = data;
400 	u8 msg[2], reg[2], i;
401 	int ret;
402 
403 	mutex_lock(&mhdp->mbox_mutex);
404 
405 	for (i = 0; i < 4; i++) {
406 		msg[0] = block / 2;
407 		msg[1] = block % 2;
408 
409 		ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
410 					     DPTX_GET_EDID, sizeof(msg), msg);
411 		if (ret)
412 			continue;
413 
414 		ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
415 						    DPTX_GET_EDID,
416 						    sizeof(reg) + length);
417 		if (ret)
418 			continue;
419 
420 		ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
421 		if (ret)
422 			continue;
423 
424 		ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length);
425 		if (ret)
426 			continue;
427 
428 		if (reg[0] == length && reg[1] == block / 2)
429 			break;
430 	}
431 
432 	mutex_unlock(&mhdp->mbox_mutex);
433 
434 	if (ret)
435 		dev_err(mhdp->dev, "get block[%d] edid failed: %d\n",
436 			block, ret);
437 
438 	return ret;
439 }
440 
441 static
442 int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp)
443 {
444 	u8 event = 0;
445 	int ret;
446 
447 	mutex_lock(&mhdp->mbox_mutex);
448 
449 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
450 				     DPTX_READ_EVENT, 0, NULL);
451 	if (ret)
452 		goto out;
453 
454 	ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
455 					    DPTX_READ_EVENT, sizeof(event));
456 	if (ret < 0)
457 		goto out;
458 
459 	ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event));
460 out:
461 	mutex_unlock(&mhdp->mbox_mutex);
462 
463 	if (ret < 0)
464 		return ret;
465 
466 	dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__,
467 		(event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "",
468 		(event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "",
469 		(event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "",
470 		(event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : "");
471 
472 	return event;
473 }
474 
475 static
476 int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes,
477 			unsigned int udelay, const u8 *lanes_data,
478 			u8 link_status[DP_LINK_STATUS_SIZE])
479 {
480 	u8 payload[7];
481 	u8 hdr[5]; /* For DPCD read response header */
482 	u32 addr;
483 	int ret;
484 
485 	if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
486 		dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes);
487 		ret = -EINVAL;
488 		goto out;
489 	}
490 
491 	payload[0] = nlanes;
492 	put_unaligned_be16(udelay, payload + 1);
493 	memcpy(payload + 3, lanes_data, nlanes);
494 
495 	mutex_lock(&mhdp->mbox_mutex);
496 
497 	ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
498 				     DPTX_ADJUST_LT,
499 				     sizeof(payload), payload);
500 	if (ret)
501 		goto out;
502 
503 	/* Yes, read the DPCD read command response */
504 	ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
505 					    DPTX_READ_DPCD,
506 					    sizeof(hdr) + DP_LINK_STATUS_SIZE);
507 	if (ret)
508 		goto out;
509 
510 	ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr));
511 	if (ret)
512 		goto out;
513 
514 	addr = get_unaligned_be24(hdr + 2);
515 	if (addr != DP_LANE0_1_STATUS)
516 		goto out;
517 
518 	ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status,
519 					  DP_LINK_STATUS_SIZE);
520 
521 out:
522 	mutex_unlock(&mhdp->mbox_mutex);
523 
524 	if (ret)
525 		dev_err(mhdp->dev, "Failed to adjust Link Training.\n");
526 
527 	return ret;
528 }
529 
530 /**
531  * cdns_mhdp_link_power_up() - power up a DisplayPort link
532  * @aux: DisplayPort AUX channel
533  * @link: pointer to a structure containing the link configuration
534  *
535  * Returns 0 on success or a negative error code on failure.
536  */
537 static
538 int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
539 {
540 	u8 value;
541 	int err;
542 
543 	/* DP_SET_POWER register is only available on DPCD v1.1 and later */
544 	if (link->revision < 0x11)
545 		return 0;
546 
547 	err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
548 	if (err < 0)
549 		return err;
550 
551 	value &= ~DP_SET_POWER_MASK;
552 	value |= DP_SET_POWER_D0;
553 
554 	err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
555 	if (err < 0)
556 		return err;
557 
558 	/*
559 	 * According to the DP 1.1 specification, a "Sink Device must exit the
560 	 * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
561 	 * Control Field" (register 0x600).
562 	 */
563 	usleep_range(1000, 2000);
564 
565 	return 0;
566 }
567 
568 /**
569  * cdns_mhdp_link_power_down() - power down a DisplayPort link
570  * @aux: DisplayPort AUX channel
571  * @link: pointer to a structure containing the link configuration
572  *
573  * Returns 0 on success or a negative error code on failure.
574  */
575 static
576 int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
577 			      struct cdns_mhdp_link *link)
578 {
579 	u8 value;
580 	int err;
581 
582 	/* DP_SET_POWER register is only available on DPCD v1.1 and later */
583 	if (link->revision < 0x11)
584 		return 0;
585 
586 	err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
587 	if (err < 0)
588 		return err;
589 
590 	value &= ~DP_SET_POWER_MASK;
591 	value |= DP_SET_POWER_D3;
592 
593 	err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
594 	if (err < 0)
595 		return err;
596 
597 	return 0;
598 }
599 
600 /**
601  * cdns_mhdp_link_configure() - configure a DisplayPort link
602  * @aux: DisplayPort AUX channel
603  * @link: pointer to a structure containing the link configuration
604  *
605  * Returns 0 on success or a negative error code on failure.
606  */
607 static
608 int cdns_mhdp_link_configure(struct drm_dp_aux *aux,
609 			     struct cdns_mhdp_link *link)
610 {
611 	u8 values[2];
612 	int err;
613 
614 	values[0] = drm_dp_link_rate_to_bw_code(link->rate);
615 	values[1] = link->num_lanes;
616 
617 	if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
618 		values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
619 
620 	err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
621 	if (err < 0)
622 		return err;
623 
624 	return 0;
625 }
626 
627 static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp)
628 {
629 	return min(mhdp->host.link_rate, mhdp->sink.link_rate);
630 }
631 
632 static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp)
633 {
634 	return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt);
635 }
636 
637 static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp)
638 {
639 	return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp);
640 }
641 
642 static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp)
643 {
644 	/* Check if SSC is supported by both sides */
645 	return mhdp->host.ssc && mhdp->sink.ssc;
646 }
647 
648 static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp)
649 {
650 	dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged);
651 
652 	if (mhdp->plugged)
653 		return connector_status_connected;
654 	else
655 		return connector_status_disconnected;
656 }
657 
658 static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp)
659 {
660 	u32 major_num, minor_num, revision;
661 	u32 fw_ver, lib_ver;
662 
663 	fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8)
664 	       | readl(mhdp->regs + CDNS_VER_L);
665 
666 	lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8)
667 		| readl(mhdp->regs + CDNS_LIB_L_ADDR);
668 
669 	if (lib_ver < 33984) {
670 		/*
671 		 * Older FW versions with major number 1, used to store FW
672 		 * version information by storing repository revision number
673 		 * in registers. This is for identifying these FW versions.
674 		 */
675 		major_num = 1;
676 		minor_num = 2;
677 		if (fw_ver == 26098) {
678 			revision = 15;
679 		} else if (lib_ver == 0 && fw_ver == 0) {
680 			revision = 17;
681 		} else {
682 			dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
683 				fw_ver, lib_ver);
684 			return -ENODEV;
685 		}
686 	} else {
687 		/* To identify newer FW versions with major number 2 onwards. */
688 		major_num = fw_ver / 10000;
689 		minor_num = (fw_ver / 100) % 100;
690 		revision = (fw_ver % 10000) % 100;
691 	}
692 
693 	dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num,
694 		revision);
695 	return 0;
696 }
697 
698 static int cdns_mhdp_fw_activate(const struct firmware *fw,
699 				 struct cdns_mhdp_device *mhdp)
700 {
701 	unsigned int reg;
702 	int ret;
703 
704 	/* Release uCPU reset and stall it. */
705 	writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
706 
707 	memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
708 
709 	/* Leave debug mode, release stall */
710 	writel(0, mhdp->regs + CDNS_APB_CTRL);
711 
712 	/*
713 	 * Wait for the KEEP_ALIVE "message" on the first 8 bits.
714 	 * Updated each sched "tick" (~2ms)
715 	 */
716 	ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
717 				 reg & CDNS_KEEP_ALIVE_MASK, 500,
718 				 CDNS_KEEP_ALIVE_TIMEOUT);
719 	if (ret) {
720 		dev_err(mhdp->dev,
721 			"device didn't give any life sign: reg %d\n", reg);
722 		return ret;
723 	}
724 
725 	ret = cdns_mhdp_check_fw_version(mhdp);
726 	if (ret)
727 		return ret;
728 
729 	/* Init events to 0 as it's not cleared by FW at boot but on read */
730 	readl(mhdp->regs + CDNS_SW_EVENT0);
731 	readl(mhdp->regs + CDNS_SW_EVENT1);
732 	readl(mhdp->regs + CDNS_SW_EVENT2);
733 	readl(mhdp->regs + CDNS_SW_EVENT3);
734 
735 	/* Activate uCPU */
736 	ret = cdns_mhdp_set_firmware_active(mhdp, true);
737 	if (ret)
738 		return ret;
739 
740 	spin_lock(&mhdp->start_lock);
741 
742 	mhdp->hw_state = MHDP_HW_READY;
743 
744 	/*
745 	 * Here we must keep the lock while enabling the interrupts
746 	 * since it would otherwise be possible that interrupt enable
747 	 * code is executed after the bridge is detached. The similar
748 	 * situation is not possible in attach()/detach() callbacks
749 	 * since the hw_state changes from MHDP_HW_READY to
750 	 * MHDP_HW_STOPPED happens only due to driver removal when
751 	 * bridge should already be detached.
752 	 */
753 	if (mhdp->bridge_attached)
754 		writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
755 		       mhdp->regs + CDNS_APB_INT_MASK);
756 
757 	spin_unlock(&mhdp->start_lock);
758 
759 	wake_up(&mhdp->fw_load_wq);
760 	dev_dbg(mhdp->dev, "DP FW activated\n");
761 
762 	return 0;
763 }
764 
765 static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context)
766 {
767 	struct cdns_mhdp_device *mhdp = context;
768 	bool bridge_attached;
769 	int ret;
770 
771 	dev_dbg(mhdp->dev, "firmware callback\n");
772 
773 	if (!fw || !fw->data) {
774 		dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
775 		return;
776 	}
777 
778 	ret = cdns_mhdp_fw_activate(fw, mhdp);
779 
780 	release_firmware(fw);
781 
782 	if (ret)
783 		return;
784 
785 	/*
786 	 *  XXX how to make sure the bridge is still attached when
787 	 *      calling drm_kms_helper_hotplug_event() after releasing
788 	 *      the lock? We should not hold the spin lock when
789 	 *      calling drm_kms_helper_hotplug_event() since it may
790 	 *      cause a dead lock. FB-dev console calls detect from the
791 	 *      same thread just down the call stack started here.
792 	 */
793 	spin_lock(&mhdp->start_lock);
794 	bridge_attached = mhdp->bridge_attached;
795 	spin_unlock(&mhdp->start_lock);
796 	if (bridge_attached) {
797 		if (mhdp->connector.dev)
798 			drm_kms_helper_hotplug_event(mhdp->bridge.dev);
799 		else
800 			drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
801 	}
802 }
803 
804 static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp)
805 {
806 	int ret;
807 
808 	ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
809 				      GFP_KERNEL, mhdp, cdns_mhdp_fw_cb);
810 	if (ret) {
811 		dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
812 			FW_NAME, ret);
813 		return ret;
814 	}
815 
816 	return 0;
817 }
818 
819 static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
820 				  struct drm_dp_aux_msg *msg)
821 {
822 	struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
823 	int ret;
824 
825 	if (msg->request != DP_AUX_NATIVE_WRITE &&
826 	    msg->request != DP_AUX_NATIVE_READ)
827 		return -EOPNOTSUPP;
828 
829 	if (msg->request == DP_AUX_NATIVE_WRITE) {
830 		const u8 *buf = msg->buffer;
831 		unsigned int i;
832 
833 		for (i = 0; i < msg->size; ++i) {
834 			ret = cdns_mhdp_dpcd_write(mhdp,
835 						   msg->address + i, buf[i]);
836 			if (!ret)
837 				continue;
838 
839 			dev_err(mhdp->dev,
840 				"Failed to write DPCD addr %u\n",
841 				msg->address + i);
842 
843 			return ret;
844 		}
845 	} else {
846 		ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
847 					  msg->buffer, msg->size);
848 		if (ret) {
849 			dev_err(mhdp->dev,
850 				"Failed to read DPCD addr %u\n",
851 				msg->address);
852 
853 			return ret;
854 		}
855 	}
856 
857 	return msg->size;
858 }
859 
860 static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
861 {
862 	union phy_configure_opts phy_cfg;
863 	u32 reg32;
864 	int ret;
865 
866 	drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
867 			   DP_TRAINING_PATTERN_DISABLE);
868 
869 	/* Reset PHY configuration */
870 	reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
871 	if (!mhdp->host.scrambler)
872 		reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
873 
874 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
875 
876 	cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
877 			    mhdp->sink.enhanced & mhdp->host.enhanced);
878 
879 	cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
880 			    CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
881 
882 	cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link);
883 	phy_cfg.dp.link_rate = mhdp->link.rate / 100;
884 	phy_cfg.dp.lanes = mhdp->link.num_lanes;
885 
886 	memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage));
887 	memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre));
888 
889 	phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
890 	phy_cfg.dp.set_lanes = true;
891 	phy_cfg.dp.set_rate = true;
892 	phy_cfg.dp.set_voltages = true;
893 	ret = phy_configure(mhdp->phy,  &phy_cfg);
894 	if (ret) {
895 		dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
896 			__func__, ret);
897 		return ret;
898 	}
899 
900 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
901 			    CDNS_PHY_COMMON_CONFIG |
902 			    CDNS_PHY_TRAINING_EN |
903 			    CDNS_PHY_TRAINING_TYPE(1) |
904 			    CDNS_PHY_SCRAMBLER_BYPASS);
905 
906 	drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
907 			   DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
908 
909 	return 0;
910 }
911 
912 static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
913 				       u8 link_status[DP_LINK_STATUS_SIZE],
914 				       u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
915 				       union phy_configure_opts *phy_cfg)
916 {
917 	u8 adjust, max_pre_emph, max_volt_swing;
918 	u8 set_volt, set_pre;
919 	unsigned int i;
920 
921 	max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
922 			   << DP_TRAIN_PRE_EMPHASIS_SHIFT;
923 	max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
924 
925 	for (i = 0; i < mhdp->link.num_lanes; i++) {
926 		/* Check if Voltage swing and pre-emphasis are within limits */
927 		adjust = drm_dp_get_adjust_request_voltage(link_status, i);
928 		set_volt = min(adjust, max_volt_swing);
929 
930 		adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
931 		set_pre = min(adjust, max_pre_emph)
932 			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
933 
934 		/*
935 		 * Voltage swing level and pre-emphasis level combination is
936 		 * not allowed: leaving pre-emphasis as-is, and adjusting
937 		 * voltage swing.
938 		 */
939 		if (set_volt + set_pre > 3)
940 			set_volt = 3 - set_pre;
941 
942 		phy_cfg->dp.voltage[i] = set_volt;
943 		lanes_data[i] = set_volt;
944 
945 		if (set_volt == max_volt_swing)
946 			lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
947 
948 		phy_cfg->dp.pre[i] = set_pre;
949 		lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
950 
951 		if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
952 			lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
953 	}
954 }
955 
956 static
957 void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
958 					  unsigned int lane, u8 volt)
959 {
960 	unsigned int s = ((lane & 1) ?
961 			  DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
962 			  DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
963 	unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
964 
965 	link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
966 	link_status[idx] |= volt << s;
967 }
968 
969 static
970 void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
971 					       unsigned int lane, u8 pre_emphasis)
972 {
973 	unsigned int s = ((lane & 1) ?
974 			  DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
975 			  DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
976 	unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
977 
978 	link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
979 	link_status[idx] |= pre_emphasis << s;
980 }
981 
982 static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
983 					  u8 link_status[DP_LINK_STATUS_SIZE])
984 {
985 	u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
986 	u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
987 	unsigned int i;
988 	u8 volt, pre;
989 
990 	for (i = 0; i < mhdp->link.num_lanes; i++) {
991 		volt = drm_dp_get_adjust_request_voltage(link_status, i);
992 		pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
993 		if (volt + pre > 3)
994 			cdns_mhdp_set_adjust_request_voltage(link_status, i,
995 							     3 - pre);
996 		if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
997 			cdns_mhdp_set_adjust_request_voltage(link_status, i,
998 							     max_volt);
999 		if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
1000 			cdns_mhdp_set_adjust_request_pre_emphasis(link_status,
1001 								  i, max_pre);
1002 	}
1003 }
1004 
1005 static void cdns_mhdp_print_lt_status(const char *prefix,
1006 				      struct cdns_mhdp_device *mhdp,
1007 				      union phy_configure_opts *phy_cfg)
1008 {
1009 	char vs[8] = "0/0/0/0";
1010 	char pe[8] = "0/0/0/0";
1011 	unsigned int i;
1012 
1013 	for (i = 0; i < mhdp->link.num_lanes; i++) {
1014 		vs[i * 2] = '0' + phy_cfg->dp.voltage[i];
1015 		pe[i * 2] = '0' + phy_cfg->dp.pre[i];
1016 	}
1017 
1018 	vs[i * 2 - 1] = '\0';
1019 	pe[i * 2 - 1] = '\0';
1020 
1021 	dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
1022 		prefix,
1023 		mhdp->link.num_lanes, mhdp->link.rate / 100,
1024 		vs, pe);
1025 }
1026 
1027 static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
1028 					       u8 eq_tps,
1029 					       unsigned int training_interval)
1030 {
1031 	u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
1032 	u8 link_status[DP_LINK_STATUS_SIZE];
1033 	union phy_configure_opts phy_cfg;
1034 	u32 reg32;
1035 	int ret;
1036 	bool r;
1037 
1038 	dev_dbg(mhdp->dev, "Starting EQ phase\n");
1039 
1040 	/* Enable link training TPS[eq_tps] in PHY */
1041 	reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
1042 		CDNS_PHY_TRAINING_TYPE(eq_tps);
1043 	if (eq_tps != 4)
1044 		reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1045 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1046 
1047 	drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1048 			   (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
1049 			   CDNS_DP_TRAINING_PATTERN_4);
1050 
1051 	drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1052 
1053 	do {
1054 		cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1055 					   &phy_cfg);
1056 		phy_cfg.dp.lanes = mhdp->link.num_lanes;
1057 		phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1058 		phy_cfg.dp.set_lanes = false;
1059 		phy_cfg.dp.set_rate = false;
1060 		phy_cfg.dp.set_voltages = true;
1061 		ret = phy_configure(mhdp->phy,  &phy_cfg);
1062 		if (ret) {
1063 			dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1064 				__func__, ret);
1065 			goto err;
1066 		}
1067 
1068 		cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
1069 				    training_interval, lanes_data, link_status);
1070 
1071 		r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes);
1072 		if (!r)
1073 			goto err;
1074 
1075 		if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
1076 			cdns_mhdp_print_lt_status("EQ phase ok", mhdp,
1077 						  &phy_cfg);
1078 			return true;
1079 		}
1080 
1081 		fail_counter_short++;
1082 
1083 		cdns_mhdp_adjust_requested_eq(mhdp, link_status);
1084 	} while (fail_counter_short < 5);
1085 
1086 err:
1087 	cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg);
1088 
1089 	return false;
1090 }
1091 
1092 static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
1093 					  u8 link_status[DP_LINK_STATUS_SIZE],
1094 					  u8 *req_volt, u8 *req_pre)
1095 {
1096 	const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1097 	const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1098 	unsigned int i;
1099 
1100 	for (i = 0; i < mhdp->link.num_lanes; i++) {
1101 		u8 val;
1102 
1103 		val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
1104 		      max_volt : req_volt[i];
1105 		cdns_mhdp_set_adjust_request_voltage(link_status, i, val);
1106 
1107 		val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
1108 		      max_pre : req_pre[i];
1109 		cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
1110 	}
1111 }
1112 
1113 static
1114 void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
1115 			   bool *same_before_adjust, bool *max_swing_reached,
1116 			   u8 before_cr[CDNS_DP_MAX_NUM_LANES],
1117 			   u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
1118 			   u8 *req_pre)
1119 {
1120 	const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1121 	const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1122 	bool same_pre, same_volt;
1123 	unsigned int i;
1124 	u8 adjust;
1125 
1126 	*same_before_adjust = false;
1127 	*max_swing_reached = false;
1128 	*cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
1129 
1130 	for (i = 0; i < mhdp->link.num_lanes; i++) {
1131 		adjust = drm_dp_get_adjust_request_voltage(after_cr, i);
1132 		req_volt[i] = min(adjust, max_volt);
1133 
1134 		adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
1135 		      DP_TRAIN_PRE_EMPHASIS_SHIFT;
1136 		req_pre[i] = min(adjust, max_pre);
1137 
1138 		same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
1139 			   req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1140 		same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
1141 			    req_volt[i];
1142 		if (same_pre && same_volt)
1143 			*same_before_adjust = true;
1144 
1145 		/* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
1146 		if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
1147 			*max_swing_reached = true;
1148 			return;
1149 		}
1150 	}
1151 }
1152 
1153 static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
1154 {
1155 	u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
1156 	fail_counter_short = 0, fail_counter_cr_long = 0;
1157 	u8 link_status[DP_LINK_STATUS_SIZE];
1158 	bool cr_done;
1159 	union phy_configure_opts phy_cfg;
1160 	int ret;
1161 
1162 	dev_dbg(mhdp->dev, "Starting CR phase\n");
1163 
1164 	ret = cdns_mhdp_link_training_init(mhdp);
1165 	if (ret)
1166 		goto err;
1167 
1168 	drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1169 
1170 	do {
1171 		u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
1172 		u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
1173 		bool same_before_adjust, max_swing_reached;
1174 
1175 		cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1176 					   &phy_cfg);
1177 		phy_cfg.dp.lanes = mhdp->link.num_lanes;
1178 		phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1179 		phy_cfg.dp.set_lanes = false;
1180 		phy_cfg.dp.set_rate = false;
1181 		phy_cfg.dp.set_voltages = true;
1182 		ret = phy_configure(mhdp->phy,  &phy_cfg);
1183 		if (ret) {
1184 			dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1185 				__func__, ret);
1186 			goto err;
1187 		}
1188 
1189 		cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
1190 				    lanes_data, link_status);
1191 
1192 		cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
1193 				      &max_swing_reached, lanes_data,
1194 				      link_status,
1195 				      requested_adjust_volt_swing,
1196 				      requested_adjust_pre_emphasis);
1197 
1198 		if (max_swing_reached) {
1199 			dev_err(mhdp->dev, "CR: max swing reached\n");
1200 			goto err;
1201 		}
1202 
1203 		if (cr_done) {
1204 			cdns_mhdp_print_lt_status("CR phase ok", mhdp,
1205 						  &phy_cfg);
1206 			return true;
1207 		}
1208 
1209 		/* Not all CR_DONE bits set */
1210 		fail_counter_cr_long++;
1211 
1212 		if (same_before_adjust) {
1213 			fail_counter_short++;
1214 			continue;
1215 		}
1216 
1217 		fail_counter_short = 0;
1218 		/*
1219 		 * Voltage swing/pre-emphasis adjust requested
1220 		 * during CR phase
1221 		 */
1222 		cdns_mhdp_adjust_requested_cr(mhdp, link_status,
1223 					      requested_adjust_volt_swing,
1224 					      requested_adjust_pre_emphasis);
1225 	} while (fail_counter_short < 5 && fail_counter_cr_long < 10);
1226 
1227 err:
1228 	cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg);
1229 
1230 	return false;
1231 }
1232 
1233 static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link)
1234 {
1235 	switch (drm_dp_link_rate_to_bw_code(link->rate)) {
1236 	case DP_LINK_BW_2_7:
1237 		link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
1238 		break;
1239 	case DP_LINK_BW_5_4:
1240 		link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
1241 		break;
1242 	case DP_LINK_BW_8_1:
1243 		link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
1244 		break;
1245 	}
1246 }
1247 
1248 static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp,
1249 				   unsigned int training_interval)
1250 {
1251 	u32 reg32;
1252 	const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp);
1253 	int ret;
1254 
1255 	while (1) {
1256 		if (!cdns_mhdp_link_training_cr(mhdp)) {
1257 			if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1258 			    DP_LINK_BW_1_62) {
1259 				dev_dbg(mhdp->dev,
1260 					"Reducing link rate during CR phase\n");
1261 				cdns_mhdp_lower_link_rate(&mhdp->link);
1262 
1263 				continue;
1264 			} else if (mhdp->link.num_lanes > 1) {
1265 				dev_dbg(mhdp->dev,
1266 					"Reducing lanes number during CR phase\n");
1267 				mhdp->link.num_lanes >>= 1;
1268 				mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1269 
1270 				continue;
1271 			}
1272 
1273 			dev_err(mhdp->dev,
1274 				"Link training failed during CR phase\n");
1275 			goto err;
1276 		}
1277 
1278 		if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps,
1279 						       training_interval))
1280 			break;
1281 
1282 		if (mhdp->link.num_lanes > 1) {
1283 			dev_dbg(mhdp->dev,
1284 				"Reducing lanes number during EQ phase\n");
1285 			mhdp->link.num_lanes >>= 1;
1286 
1287 			continue;
1288 		} else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1289 			   DP_LINK_BW_1_62) {
1290 			dev_dbg(mhdp->dev,
1291 				"Reducing link rate during EQ phase\n");
1292 			cdns_mhdp_lower_link_rate(&mhdp->link);
1293 			mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1294 
1295 			continue;
1296 		}
1297 
1298 		dev_err(mhdp->dev, "Link training failed during EQ phase\n");
1299 		goto err;
1300 	}
1301 
1302 	dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n",
1303 		mhdp->link.num_lanes, mhdp->link.rate / 100);
1304 
1305 	drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1306 			   mhdp->host.scrambler ? 0 :
1307 			   DP_LINK_SCRAMBLING_DISABLE);
1308 
1309 	ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &reg32);
1310 	if (ret < 0) {
1311 		dev_err(mhdp->dev,
1312 			"Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1313 			ret);
1314 		return ret;
1315 	}
1316 	reg32 &= ~GENMASK(1, 0);
1317 	reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
1318 	reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC;
1319 	reg32 |= CDNS_DP_FRAMER_EN;
1320 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
1321 
1322 	/* Reset PHY config */
1323 	reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1324 	if (!mhdp->host.scrambler)
1325 		reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1326 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1327 
1328 	return 0;
1329 err:
1330 	/* Reset PHY config */
1331 	reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1332 	if (!mhdp->host.scrambler)
1333 		reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1334 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1335 
1336 	drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1337 			   DP_TRAINING_PATTERN_DISABLE);
1338 
1339 	return -EIO;
1340 }
1341 
1342 static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp,
1343 					      u32 interval)
1344 {
1345 	if (interval == 0)
1346 		return 400;
1347 	if (interval < 5)
1348 		return 4000 << (interval - 1);
1349 	dev_err(mhdp->dev,
1350 		"wrong training interval returned by DPCD: %d\n", interval);
1351 	return 0;
1352 }
1353 
1354 static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp)
1355 {
1356 	unsigned int link_rate;
1357 
1358 	/* Get source capabilities based on PHY attributes */
1359 
1360 	mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width;
1361 	if (!mhdp->host.lanes_cnt)
1362 		mhdp->host.lanes_cnt = 4;
1363 
1364 	link_rate = mhdp->phy->attrs.max_link_rate;
1365 	if (!link_rate)
1366 		link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
1367 	else
1368 		/* PHY uses Mb/s, DRM uses tens of kb/s. */
1369 		link_rate *= 100;
1370 
1371 	mhdp->host.link_rate = link_rate;
1372 	mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
1373 	mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
1374 	mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
1375 				  CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
1376 				  CDNS_SUPPORT_TPS(4);
1377 	mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
1378 	mhdp->host.fast_link = false;
1379 	mhdp->host.enhanced = true;
1380 	mhdp->host.scrambler = true;
1381 	mhdp->host.ssc = false;
1382 }
1383 
1384 static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp,
1385 				     u8 dpcd[DP_RECEIVER_CAP_SIZE])
1386 {
1387 	mhdp->sink.link_rate = mhdp->link.rate;
1388 	mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
1389 	mhdp->sink.enhanced = !!(mhdp->link.capabilities &
1390 				 DP_LINK_CAP_ENHANCED_FRAMING);
1391 
1392 	/* Set SSC support */
1393 	mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] &
1394 				  DP_MAX_DOWNSPREAD_0_5);
1395 
1396 	/* Set TPS support */
1397 	mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
1398 	if (drm_dp_tps3_supported(dpcd))
1399 		mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
1400 	if (drm_dp_tps4_supported(dpcd))
1401 		mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
1402 
1403 	/* Set fast link support */
1404 	mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] &
1405 				  DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
1406 }
1407 
1408 static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
1409 {
1410 	u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2];
1411 	u32 resp, interval, interval_us;
1412 	u8 ext_cap_chk = 0;
1413 	unsigned int addr;
1414 	int err;
1415 
1416 	WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1417 
1418 	drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL,
1419 			  &ext_cap_chk);
1420 
1421 	if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)
1422 		addr = DP_DP13_DPCD_REV;
1423 	else
1424 		addr = DP_DPCD_REV;
1425 
1426 	err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE);
1427 	if (err < 0) {
1428 		dev_err(mhdp->dev, "Failed to read receiver capabilities\n");
1429 		return err;
1430 	}
1431 
1432 	mhdp->link.revision = dpcd[0];
1433 	mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]);
1434 	mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
1435 
1436 	if (dpcd[2] & DP_ENHANCED_FRAME_CAP)
1437 		mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
1438 
1439 	dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
1440 	cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
1441 
1442 	cdns_mhdp_fill_sink_caps(mhdp, dpcd);
1443 
1444 	mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1445 	mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1446 
1447 	/* Disable framer for link training */
1448 	err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
1449 	if (err < 0) {
1450 		dev_err(mhdp->dev,
1451 			"Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1452 			err);
1453 		return err;
1454 	}
1455 
1456 	resp &= ~CDNS_DP_FRAMER_EN;
1457 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
1458 
1459 	/* Spread AMP if required, enable 8b/10b coding */
1460 	amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0;
1461 	amp[1] = DP_SET_ANSI_8B10B;
1462 	drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
1463 
1464 	if (mhdp->host.fast_link & mhdp->sink.fast_link) {
1465 		dev_err(mhdp->dev, "fastlink not supported\n");
1466 		return -EOPNOTSUPP;
1467 	}
1468 
1469 	interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK;
1470 	interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval);
1471 	if (!interval_us ||
1472 	    cdns_mhdp_link_training(mhdp, interval_us)) {
1473 		dev_err(mhdp->dev, "Link training failed. Exiting.\n");
1474 		return -EIO;
1475 	}
1476 
1477 	mhdp->link_up = true;
1478 
1479 	return 0;
1480 }
1481 
1482 static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
1483 {
1484 	WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1485 
1486 	if (mhdp->plugged)
1487 		cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
1488 
1489 	mhdp->link_up = false;
1490 }
1491 
1492 static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp,
1493 				       struct drm_connector *connector)
1494 {
1495 	if (!mhdp->plugged)
1496 		return NULL;
1497 
1498 	return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
1499 }
1500 
1501 static int cdns_mhdp_get_modes(struct drm_connector *connector)
1502 {
1503 	struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
1504 	struct edid *edid;
1505 	int num_modes;
1506 
1507 	if (!mhdp->plugged)
1508 		return 0;
1509 
1510 	edid = cdns_mhdp_get_edid(mhdp, connector);
1511 	if (!edid) {
1512 		dev_err(mhdp->dev, "Failed to read EDID\n");
1513 		return 0;
1514 	}
1515 
1516 	drm_connector_update_edid_property(connector, edid);
1517 	num_modes = drm_add_edid_modes(connector, edid);
1518 	kfree(edid);
1519 
1520 	/*
1521 	 * HACK: Warn about unsupported display formats until we deal
1522 	 *       with them correctly.
1523 	 */
1524 	if (connector->display_info.color_formats &&
1525 	    !(connector->display_info.color_formats &
1526 	      mhdp->display_fmt.color_format))
1527 		dev_warn(mhdp->dev,
1528 			 "%s: No supported color_format found (0x%08x)\n",
1529 			__func__, connector->display_info.color_formats);
1530 
1531 	if (connector->display_info.bpc &&
1532 	    connector->display_info.bpc < mhdp->display_fmt.bpc)
1533 		dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
1534 			 __func__, connector->display_info.bpc,
1535 			 mhdp->display_fmt.bpc);
1536 
1537 	return num_modes;
1538 }
1539 
1540 static int cdns_mhdp_connector_detect(struct drm_connector *conn,
1541 				      struct drm_modeset_acquire_ctx *ctx,
1542 				      bool force)
1543 {
1544 	struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1545 
1546 	return cdns_mhdp_detect(mhdp);
1547 }
1548 
1549 static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
1550 {
1551 	u32 bpp;
1552 
1553 	if (fmt->y_only)
1554 		return fmt->bpc;
1555 
1556 	switch (fmt->color_format) {
1557 	case DRM_COLOR_FORMAT_RGB444:
1558 	case DRM_COLOR_FORMAT_YCBCR444:
1559 		bpp = fmt->bpc * 3;
1560 		break;
1561 	case DRM_COLOR_FORMAT_YCBCR422:
1562 		bpp = fmt->bpc * 2;
1563 		break;
1564 	case DRM_COLOR_FORMAT_YCBCR420:
1565 		bpp = fmt->bpc * 3 / 2;
1566 		break;
1567 	default:
1568 		bpp = fmt->bpc * 3;
1569 		WARN_ON(1);
1570 	}
1571 	return bpp;
1572 }
1573 
1574 static
1575 bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
1576 			    const struct drm_display_mode *mode,
1577 			    unsigned int lanes, unsigned int rate)
1578 {
1579 	u32 max_bw, req_bw, bpp;
1580 
1581 	/*
1582 	 * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8
1583 	 * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the
1584 	 * value thus equals the bandwidth in 10kb/s units, which matches the
1585 	 * units of the rate parameter.
1586 	 */
1587 
1588 	bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1589 	req_bw = mode->clock * bpp / 8;
1590 	max_bw = lanes * rate;
1591 	if (req_bw > max_bw) {
1592 		dev_dbg(mhdp->dev,
1593 			"Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
1594 			mode->name, req_bw, max_bw);
1595 
1596 		return false;
1597 	}
1598 
1599 	return true;
1600 }
1601 
1602 static
1603 enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
1604 					  struct drm_display_mode *mode)
1605 {
1606 	struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1607 
1608 	mutex_lock(&mhdp->link_mutex);
1609 
1610 	if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
1611 				    mhdp->link.rate)) {
1612 		mutex_unlock(&mhdp->link_mutex);
1613 		return MODE_CLOCK_HIGH;
1614 	}
1615 
1616 	mutex_unlock(&mhdp->link_mutex);
1617 	return MODE_OK;
1618 }
1619 
1620 static int cdns_mhdp_connector_atomic_check(struct drm_connector *conn,
1621 					    struct drm_atomic_state *state)
1622 {
1623 	struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1624 	struct drm_connector_state *old_state, *new_state;
1625 	struct drm_crtc_state *crtc_state;
1626 	u64 old_cp, new_cp;
1627 
1628 	if (!mhdp->hdcp_supported)
1629 		return 0;
1630 
1631 	old_state = drm_atomic_get_old_connector_state(state, conn);
1632 	new_state = drm_atomic_get_new_connector_state(state, conn);
1633 	old_cp = old_state->content_protection;
1634 	new_cp = new_state->content_protection;
1635 
1636 	if (old_state->hdcp_content_type != new_state->hdcp_content_type &&
1637 	    new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1638 		new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1639 		goto mode_changed;
1640 	}
1641 
1642 	if (!new_state->crtc) {
1643 		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1644 			new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1645 		return 0;
1646 	}
1647 
1648 	if (old_cp == new_cp ||
1649 	    (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
1650 	     new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
1651 		return 0;
1652 
1653 mode_changed:
1654 	crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
1655 	crtc_state->mode_changed = true;
1656 
1657 	return 0;
1658 }
1659 
1660 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
1661 	.detect_ctx = cdns_mhdp_connector_detect,
1662 	.get_modes = cdns_mhdp_get_modes,
1663 	.mode_valid = cdns_mhdp_mode_valid,
1664 	.atomic_check = cdns_mhdp_connector_atomic_check,
1665 };
1666 
1667 static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
1668 	.fill_modes = drm_helper_probe_single_connector_modes,
1669 	.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1670 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1671 	.reset = drm_atomic_helper_connector_reset,
1672 	.destroy = drm_connector_cleanup,
1673 };
1674 
1675 static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
1676 {
1677 	u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
1678 	struct drm_connector *conn = &mhdp->connector;
1679 	struct drm_bridge *bridge = &mhdp->bridge;
1680 	int ret;
1681 
1682 	if (!bridge->encoder) {
1683 		dev_err(mhdp->dev, "Parent encoder object not found");
1684 		return -ENODEV;
1685 	}
1686 
1687 	conn->polled = DRM_CONNECTOR_POLL_HPD;
1688 
1689 	ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
1690 				 DRM_MODE_CONNECTOR_DisplayPort);
1691 	if (ret) {
1692 		dev_err(mhdp->dev, "Failed to initialize connector with drm\n");
1693 		return ret;
1694 	}
1695 
1696 	drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
1697 
1698 	ret = drm_display_info_set_bus_formats(&conn->display_info,
1699 					       &bus_format, 1);
1700 	if (ret)
1701 		return ret;
1702 
1703 	ret = drm_connector_attach_encoder(conn, bridge->encoder);
1704 	if (ret) {
1705 		dev_err(mhdp->dev, "Failed to attach connector to encoder\n");
1706 		return ret;
1707 	}
1708 
1709 	if (mhdp->hdcp_supported)
1710 		ret = drm_connector_attach_content_protection_property(conn, true);
1711 
1712 	return ret;
1713 }
1714 
1715 static int cdns_mhdp_attach(struct drm_bridge *bridge,
1716 			    enum drm_bridge_attach_flags flags)
1717 {
1718 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1719 	bool hw_ready;
1720 	int ret;
1721 
1722 	dev_dbg(mhdp->dev, "%s\n", __func__);
1723 
1724 	mhdp->aux.drm_dev = bridge->dev;
1725 	ret = drm_dp_aux_register(&mhdp->aux);
1726 	if (ret < 0)
1727 		return ret;
1728 
1729 	if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
1730 		ret = cdns_mhdp_connector_init(mhdp);
1731 		if (ret)
1732 			goto aux_unregister;
1733 	}
1734 
1735 	spin_lock(&mhdp->start_lock);
1736 
1737 	mhdp->bridge_attached = true;
1738 	hw_ready = mhdp->hw_state == MHDP_HW_READY;
1739 
1740 	spin_unlock(&mhdp->start_lock);
1741 
1742 	/* Enable SW event interrupts */
1743 	if (hw_ready)
1744 		writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
1745 		       mhdp->regs + CDNS_APB_INT_MASK);
1746 
1747 	return 0;
1748 aux_unregister:
1749 	drm_dp_aux_unregister(&mhdp->aux);
1750 	return ret;
1751 }
1752 
1753 static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
1754 				      const struct drm_display_mode *mode)
1755 {
1756 	unsigned int dp_framer_sp = 0, msa_horizontal_1,
1757 		msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
1758 		misc0 = 0, misc1 = 0, pxl_repr,
1759 		front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
1760 		dp_vertical_1;
1761 	u8 stream_id = mhdp->stream_id;
1762 	u32 bpp, bpc, pxlfmt, framer;
1763 	int ret;
1764 
1765 	pxlfmt = mhdp->display_fmt.color_format;
1766 	bpc = mhdp->display_fmt.bpc;
1767 
1768 	/*
1769 	 * If YCBCR supported and stream not SD, use ITU709
1770 	 * Need to handle ITU version with YCBCR420 when supported
1771 	 */
1772 	if ((pxlfmt == DRM_COLOR_FORMAT_YCBCR444 ||
1773 	     pxlfmt == DRM_COLOR_FORMAT_YCBCR422) && mode->crtc_vdisplay >= 720)
1774 		misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
1775 
1776 	bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1777 
1778 	switch (pxlfmt) {
1779 	case DRM_COLOR_FORMAT_RGB444:
1780 		pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
1781 		misc0 |= DP_COLOR_FORMAT_RGB;
1782 		break;
1783 	case DRM_COLOR_FORMAT_YCBCR444:
1784 		pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
1785 		misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
1786 		break;
1787 	case DRM_COLOR_FORMAT_YCBCR422:
1788 		pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
1789 		misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
1790 		break;
1791 	case DRM_COLOR_FORMAT_YCBCR420:
1792 		pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
1793 		break;
1794 	default:
1795 		pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
1796 	}
1797 
1798 	switch (bpc) {
1799 	case 6:
1800 		misc0 |= DP_TEST_BIT_DEPTH_6;
1801 		pxl_repr |= CDNS_DP_FRAMER_6_BPC;
1802 		break;
1803 	case 8:
1804 		misc0 |= DP_TEST_BIT_DEPTH_8;
1805 		pxl_repr |= CDNS_DP_FRAMER_8_BPC;
1806 		break;
1807 	case 10:
1808 		misc0 |= DP_TEST_BIT_DEPTH_10;
1809 		pxl_repr |= CDNS_DP_FRAMER_10_BPC;
1810 		break;
1811 	case 12:
1812 		misc0 |= DP_TEST_BIT_DEPTH_12;
1813 		pxl_repr |= CDNS_DP_FRAMER_12_BPC;
1814 		break;
1815 	case 16:
1816 		misc0 |= DP_TEST_BIT_DEPTH_16;
1817 		pxl_repr |= CDNS_DP_FRAMER_16_BPC;
1818 		break;
1819 	}
1820 
1821 	bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
1822 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1823 		bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
1824 
1825 	cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
1826 			    bnd_hsync2vsync);
1827 
1828 	hsync2vsync_pol_ctrl = 0;
1829 	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1830 		hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
1831 	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1832 		hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
1833 	cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
1834 			    hsync2vsync_pol_ctrl);
1835 
1836 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
1837 
1838 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1839 		dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
1840 	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1841 		dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
1842 	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1843 		dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
1844 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
1845 
1846 	front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
1847 	back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
1848 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
1849 			    CDNS_DP_FRONT_PORCH(front_porch) |
1850 			    CDNS_DP_BACK_PORCH(back_porch));
1851 
1852 	cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
1853 			    mode->crtc_hdisplay * bpp / 8);
1854 
1855 	msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
1856 	cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
1857 			    CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
1858 			    CDNS_DP_MSAH0_HSYNC_START(msa_h0));
1859 
1860 	hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
1861 	msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
1862 			   CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
1863 	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1864 		msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
1865 	cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
1866 			    msa_horizontal_1);
1867 
1868 	msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
1869 	cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
1870 			    CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
1871 			    CDNS_DP_MSAV0_VSYNC_START(msa_v0));
1872 
1873 	vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
1874 	msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
1875 			 CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
1876 	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1877 		msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
1878 	cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
1879 			    msa_vertical_1);
1880 
1881 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1882 	    mode->crtc_vtotal % 2 == 0)
1883 		misc1 = DP_TEST_INTERLACED;
1884 	if (mhdp->display_fmt.y_only)
1885 		misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
1886 	/* Use VSC SDP for Y420 */
1887 	if (pxlfmt == DRM_COLOR_FORMAT_YCBCR420)
1888 		misc1 = CDNS_DP_TEST_VSC_SDP;
1889 
1890 	cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
1891 			    misc0 | (misc1 << 8));
1892 
1893 	cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
1894 			    CDNS_DP_H_HSYNC_WIDTH(hsync) |
1895 			    CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
1896 
1897 	cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
1898 			    CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
1899 			    CDNS_DP_V0_VSTART(msa_v0));
1900 
1901 	dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
1902 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1903 	    mode->crtc_vtotal % 2 == 0)
1904 		dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
1905 
1906 	cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
1907 
1908 	cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
1909 				(mode->flags & DRM_MODE_FLAG_INTERLACE) ?
1910 				CDNS_DP_VB_ID_INTERLACED : 0);
1911 
1912 	ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer);
1913 	if (ret < 0) {
1914 		dev_err(mhdp->dev,
1915 			"Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1916 			ret);
1917 		return;
1918 	}
1919 	framer |= CDNS_DP_FRAMER_EN;
1920 	framer &= ~CDNS_DP_NO_VIDEO_MODE;
1921 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer);
1922 }
1923 
1924 static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
1925 				 const struct drm_display_mode *mode)
1926 {
1927 	u32 rate, vs, required_bandwidth, available_bandwidth;
1928 	s32 line_thresh1, line_thresh2, line_thresh = 0;
1929 	int pxlclock = mode->crtc_clock;
1930 	u32 tu_size = 64;
1931 	u32 bpp;
1932 
1933 	/* Get rate in MSymbols per second per lane */
1934 	rate = mhdp->link.rate / 1000;
1935 
1936 	bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1937 
1938 	required_bandwidth = pxlclock * bpp / 8;
1939 	available_bandwidth = mhdp->link.num_lanes * rate;
1940 
1941 	vs = tu_size * required_bandwidth / available_bandwidth;
1942 	vs /= 1000;
1943 
1944 	if (vs == tu_size)
1945 		vs = tu_size - 1;
1946 
1947 	line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
1948 	line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
1949 	line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes;
1950 	line_thresh = (line_thresh >> 5) + 2;
1951 
1952 	mhdp->stream_id = 0;
1953 
1954 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
1955 			    CDNS_DP_FRAMER_TU_VS(vs) |
1956 			    CDNS_DP_FRAMER_TU_SIZE(tu_size) |
1957 			    CDNS_DP_FRAMER_TU_CNT_RST_EN);
1958 
1959 	cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
1960 			    line_thresh & GENMASK(5, 0));
1961 
1962 	cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
1963 			    CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
1964 						   0 : tu_size - vs));
1965 
1966 	cdns_mhdp_configure_video(mhdp, mode);
1967 }
1968 
1969 static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
1970 				    struct drm_bridge_state *bridge_state)
1971 {
1972 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1973 	struct drm_atomic_state *state = bridge_state->base.state;
1974 	struct cdns_mhdp_bridge_state *mhdp_state;
1975 	struct drm_crtc_state *crtc_state;
1976 	struct drm_connector *connector;
1977 	struct drm_connector_state *conn_state;
1978 	struct drm_bridge_state *new_state;
1979 	const struct drm_display_mode *mode;
1980 	u32 resp;
1981 	int ret;
1982 
1983 	dev_dbg(mhdp->dev, "bridge enable\n");
1984 
1985 	mutex_lock(&mhdp->link_mutex);
1986 
1987 	if (mhdp->plugged && !mhdp->link_up) {
1988 		ret = cdns_mhdp_link_up(mhdp);
1989 		if (ret < 0)
1990 			goto out;
1991 	}
1992 
1993 	if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable)
1994 		mhdp->info->ops->enable(mhdp);
1995 
1996 	/* Enable VIF clock for stream 0 */
1997 	ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
1998 	if (ret < 0) {
1999 		dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret);
2000 		goto out;
2001 	}
2002 
2003 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2004 			    resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
2005 
2006 	connector = drm_atomic_get_new_connector_for_encoder(state,
2007 							     bridge->encoder);
2008 	if (WARN_ON(!connector))
2009 		goto out;
2010 
2011 	conn_state = drm_atomic_get_new_connector_state(state, connector);
2012 	if (WARN_ON(!conn_state))
2013 		goto out;
2014 
2015 	if (mhdp->hdcp_supported &&
2016 	    mhdp->hw_state == MHDP_HW_READY &&
2017 	    conn_state->content_protection ==
2018 	    DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2019 		mutex_unlock(&mhdp->link_mutex);
2020 		cdns_mhdp_hdcp_enable(mhdp, conn_state->hdcp_content_type);
2021 		mutex_lock(&mhdp->link_mutex);
2022 	}
2023 
2024 	crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
2025 	if (WARN_ON(!crtc_state))
2026 		goto out;
2027 
2028 	mode = &crtc_state->adjusted_mode;
2029 
2030 	new_state = drm_atomic_get_new_bridge_state(state, bridge);
2031 	if (WARN_ON(!new_state))
2032 		goto out;
2033 
2034 	if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2035 				    mhdp->link.rate)) {
2036 		ret = -EINVAL;
2037 		goto out;
2038 	}
2039 
2040 	cdns_mhdp_sst_enable(mhdp, mode);
2041 
2042 	mhdp_state = to_cdns_mhdp_bridge_state(new_state);
2043 
2044 	mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
2045 	drm_mode_set_name(mhdp_state->current_mode);
2046 
2047 	dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
2048 
2049 	mhdp->bridge_enabled = true;
2050 
2051 out:
2052 	mutex_unlock(&mhdp->link_mutex);
2053 	if (ret < 0)
2054 		schedule_work(&mhdp->modeset_retry_work);
2055 }
2056 
2057 static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
2058 				     struct drm_bridge_state *bridge_state)
2059 {
2060 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2061 	u32 resp;
2062 
2063 	dev_dbg(mhdp->dev, "%s\n", __func__);
2064 
2065 	mutex_lock(&mhdp->link_mutex);
2066 
2067 	if (mhdp->hdcp_supported)
2068 		cdns_mhdp_hdcp_disable(mhdp);
2069 
2070 	mhdp->bridge_enabled = false;
2071 	cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
2072 	resp &= ~CDNS_DP_FRAMER_EN;
2073 	resp |= CDNS_DP_NO_VIDEO_MODE;
2074 	cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
2075 
2076 	cdns_mhdp_link_down(mhdp);
2077 
2078 	/* Disable VIF clock for stream 0 */
2079 	cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
2080 	cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2081 			    resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
2082 
2083 	if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable)
2084 		mhdp->info->ops->disable(mhdp);
2085 
2086 	mutex_unlock(&mhdp->link_mutex);
2087 }
2088 
2089 static void cdns_mhdp_detach(struct drm_bridge *bridge)
2090 {
2091 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2092 
2093 	dev_dbg(mhdp->dev, "%s\n", __func__);
2094 
2095 	drm_dp_aux_unregister(&mhdp->aux);
2096 
2097 	spin_lock(&mhdp->start_lock);
2098 
2099 	mhdp->bridge_attached = false;
2100 
2101 	spin_unlock(&mhdp->start_lock);
2102 
2103 	writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2104 }
2105 
2106 static struct drm_bridge_state *
2107 cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
2108 {
2109 	struct cdns_mhdp_bridge_state *state;
2110 
2111 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2112 	if (!state)
2113 		return NULL;
2114 
2115 	__drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
2116 
2117 	return &state->base;
2118 }
2119 
2120 static void
2121 cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge,
2122 				      struct drm_bridge_state *state)
2123 {
2124 	struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2125 
2126 	cdns_mhdp_state = to_cdns_mhdp_bridge_state(state);
2127 
2128 	if (cdns_mhdp_state->current_mode) {
2129 		drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode);
2130 		cdns_mhdp_state->current_mode = NULL;
2131 	}
2132 
2133 	kfree(cdns_mhdp_state);
2134 }
2135 
2136 static struct drm_bridge_state *
2137 cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
2138 {
2139 	struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2140 
2141 	cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL);
2142 	if (!cdns_mhdp_state)
2143 		return NULL;
2144 
2145 	__drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
2146 
2147 	return &cdns_mhdp_state->base;
2148 }
2149 
2150 static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
2151 				  struct drm_bridge_state *bridge_state,
2152 				  struct drm_crtc_state *crtc_state,
2153 				  struct drm_connector_state *conn_state)
2154 {
2155 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2156 	const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
2157 
2158 	mutex_lock(&mhdp->link_mutex);
2159 
2160 	if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2161 				    mhdp->link.rate)) {
2162 		dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
2163 			__func__, mode->name, mhdp->link.num_lanes,
2164 			mhdp->link.rate / 100);
2165 		mutex_unlock(&mhdp->link_mutex);
2166 		return -EINVAL;
2167 	}
2168 
2169 	mutex_unlock(&mhdp->link_mutex);
2170 	return 0;
2171 }
2172 
2173 static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
2174 {
2175 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2176 
2177 	return cdns_mhdp_detect(mhdp);
2178 }
2179 
2180 static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
2181 					      struct drm_connector *connector)
2182 {
2183 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2184 
2185 	return cdns_mhdp_get_edid(mhdp, connector);
2186 }
2187 
2188 static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
2189 {
2190 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2191 
2192 	/* Enable SW event interrupts */
2193 	if (mhdp->bridge_attached)
2194 		writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
2195 		       mhdp->regs + CDNS_APB_INT_MASK);
2196 }
2197 
2198 static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
2199 {
2200 	struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2201 
2202 	writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
2203 }
2204 
2205 static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
2206 	.atomic_enable = cdns_mhdp_atomic_enable,
2207 	.atomic_disable = cdns_mhdp_atomic_disable,
2208 	.atomic_check = cdns_mhdp_atomic_check,
2209 	.attach = cdns_mhdp_attach,
2210 	.detach = cdns_mhdp_detach,
2211 	.atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
2212 	.atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
2213 	.atomic_reset = cdns_mhdp_bridge_atomic_reset,
2214 	.detect = cdns_mhdp_bridge_detect,
2215 	.get_edid = cdns_mhdp_bridge_get_edid,
2216 	.hpd_enable = cdns_mhdp_bridge_hpd_enable,
2217 	.hpd_disable = cdns_mhdp_bridge_hpd_disable,
2218 };
2219 
2220 static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse)
2221 {
2222 	int hpd_event, hpd_status;
2223 
2224 	*hpd_pulse = false;
2225 
2226 	hpd_event = cdns_mhdp_read_hpd_event(mhdp);
2227 
2228 	/* Getting event bits failed, bail out */
2229 	if (hpd_event < 0) {
2230 		dev_warn(mhdp->dev, "%s: read event failed: %d\n",
2231 			 __func__, hpd_event);
2232 		return false;
2233 	}
2234 
2235 	hpd_status = cdns_mhdp_get_hpd_status(mhdp);
2236 	if (hpd_status < 0) {
2237 		dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n",
2238 			 __func__, hpd_status);
2239 		return false;
2240 	}
2241 
2242 	if (hpd_event & DPTX_READ_EVENT_HPD_PULSE)
2243 		*hpd_pulse = true;
2244 
2245 	return !!hpd_status;
2246 }
2247 
2248 static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
2249 {
2250 	struct cdns_mhdp_bridge_state *cdns_bridge_state;
2251 	struct drm_display_mode *current_mode;
2252 	bool old_plugged = mhdp->plugged;
2253 	struct drm_bridge_state *state;
2254 	u8 status[DP_LINK_STATUS_SIZE];
2255 	bool hpd_pulse;
2256 	int ret = 0;
2257 
2258 	mutex_lock(&mhdp->link_mutex);
2259 
2260 	mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
2261 
2262 	if (!mhdp->plugged) {
2263 		cdns_mhdp_link_down(mhdp);
2264 		mhdp->link.rate = mhdp->host.link_rate;
2265 		mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2266 		goto out;
2267 	}
2268 
2269 	/*
2270 	 * If we get a HPD pulse event and we were and still are connected,
2271 	 * check the link status. If link status is ok, there's nothing to do
2272 	 * as we don't handle DP interrupts. If link status is bad, continue
2273 	 * with full link setup.
2274 	 */
2275 	if (hpd_pulse && old_plugged == mhdp->plugged) {
2276 		ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
2277 
2278 		/*
2279 		 * If everything looks fine, just return, as we don't handle
2280 		 * DP IRQs.
2281 		 */
2282 		if (ret > 0 &&
2283 		    drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
2284 		    drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
2285 			goto out;
2286 
2287 		/* If link is bad, mark link as down so that we do a new LT */
2288 		mhdp->link_up = false;
2289 	}
2290 
2291 	if (!mhdp->link_up) {
2292 		ret = cdns_mhdp_link_up(mhdp);
2293 		if (ret < 0)
2294 			goto out;
2295 	}
2296 
2297 	if (mhdp->bridge_enabled) {
2298 		state = drm_priv_to_bridge_state(mhdp->bridge.base.state);
2299 		if (!state) {
2300 			ret = -EINVAL;
2301 			goto out;
2302 		}
2303 
2304 		cdns_bridge_state = to_cdns_mhdp_bridge_state(state);
2305 		if (!cdns_bridge_state) {
2306 			ret = -EINVAL;
2307 			goto out;
2308 		}
2309 
2310 		current_mode = cdns_bridge_state->current_mode;
2311 		if (!current_mode) {
2312 			ret = -EINVAL;
2313 			goto out;
2314 		}
2315 
2316 		if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes,
2317 					    mhdp->link.rate)) {
2318 			ret = -EINVAL;
2319 			goto out;
2320 		}
2321 
2322 		dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__,
2323 			current_mode->name);
2324 
2325 		cdns_mhdp_sst_enable(mhdp, current_mode);
2326 	}
2327 out:
2328 	mutex_unlock(&mhdp->link_mutex);
2329 	return ret;
2330 }
2331 
2332 static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
2333 {
2334 	struct cdns_mhdp_device *mhdp;
2335 	struct drm_connector *conn;
2336 
2337 	mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
2338 
2339 	conn = &mhdp->connector;
2340 
2341 	/* Grab the locks before changing connector property */
2342 	mutex_lock(&conn->dev->mode_config.mutex);
2343 
2344 	/*
2345 	 * Set connector link status to BAD and send a Uevent to notify
2346 	 * userspace to do a modeset.
2347 	 */
2348 	drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
2349 	mutex_unlock(&conn->dev->mode_config.mutex);
2350 
2351 	/* Send Hotplug uevent so userspace can reprobe */
2352 	drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2353 }
2354 
2355 static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
2356 {
2357 	struct cdns_mhdp_device *mhdp = data;
2358 	u32 apb_stat, sw_ev0;
2359 	bool bridge_attached;
2360 
2361 	apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
2362 	if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT))
2363 		return IRQ_NONE;
2364 
2365 	sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
2366 
2367 	/*
2368 	 *  Calling drm_kms_helper_hotplug_event() when not attached
2369 	 *  to drm device causes an oops because the drm_bridge->dev
2370 	 *  is NULL. See cdns_mhdp_fw_cb() comments for details about the
2371 	 *  problems related drm_kms_helper_hotplug_event() call.
2372 	 */
2373 	spin_lock(&mhdp->start_lock);
2374 	bridge_attached = mhdp->bridge_attached;
2375 	spin_unlock(&mhdp->start_lock);
2376 
2377 	if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
2378 		schedule_work(&mhdp->hpd_work);
2379 	}
2380 
2381 	if (sw_ev0 & ~CDNS_DPTX_HPD) {
2382 		mhdp->sw_events |= (sw_ev0 & ~CDNS_DPTX_HPD);
2383 		wake_up(&mhdp->sw_events_wq);
2384 	}
2385 
2386 	return IRQ_HANDLED;
2387 }
2388 
2389 u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, u32 event)
2390 {
2391 	u32 ret;
2392 
2393 	ret = wait_event_timeout(mhdp->sw_events_wq,
2394 				 mhdp->sw_events & event,
2395 				 msecs_to_jiffies(500));
2396 	if (!ret) {
2397 		dev_dbg(mhdp->dev, "SW event 0x%x timeout\n", event);
2398 		goto sw_event_out;
2399 	}
2400 
2401 	ret = mhdp->sw_events;
2402 	mhdp->sw_events &= ~event;
2403 
2404 sw_event_out:
2405 	return ret;
2406 }
2407 
2408 static void cdns_mhdp_hpd_work(struct work_struct *work)
2409 {
2410 	struct cdns_mhdp_device *mhdp = container_of(work,
2411 						     struct cdns_mhdp_device,
2412 						     hpd_work);
2413 	int ret;
2414 
2415 	ret = cdns_mhdp_update_link_status(mhdp);
2416 	if (mhdp->connector.dev) {
2417 		if (ret < 0)
2418 			schedule_work(&mhdp->modeset_retry_work);
2419 		else
2420 			drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2421 	} else {
2422 		drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
2423 	}
2424 }
2425 
2426 static int cdns_mhdp_probe(struct platform_device *pdev)
2427 {
2428 	struct device *dev = &pdev->dev;
2429 	struct cdns_mhdp_device *mhdp;
2430 	unsigned long rate;
2431 	struct clk *clk;
2432 	int ret;
2433 	int irq;
2434 
2435 	mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
2436 	if (!mhdp)
2437 		return -ENOMEM;
2438 
2439 	clk = devm_clk_get(dev, NULL);
2440 	if (IS_ERR(clk)) {
2441 		dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
2442 		return PTR_ERR(clk);
2443 	}
2444 
2445 	mhdp->clk = clk;
2446 	mhdp->dev = dev;
2447 	mutex_init(&mhdp->mbox_mutex);
2448 	mutex_init(&mhdp->link_mutex);
2449 	spin_lock_init(&mhdp->start_lock);
2450 
2451 	drm_dp_aux_init(&mhdp->aux);
2452 	mhdp->aux.dev = dev;
2453 	mhdp->aux.transfer = cdns_mhdp_transfer;
2454 
2455 	mhdp->regs = devm_platform_ioremap_resource(pdev, 0);
2456 	if (IS_ERR(mhdp->regs)) {
2457 		dev_err(dev, "Failed to get memory resource\n");
2458 		return PTR_ERR(mhdp->regs);
2459 	}
2460 
2461 	mhdp->sapb_regs = devm_platform_ioremap_resource_byname(pdev, "mhdptx-sapb");
2462 	if (IS_ERR(mhdp->sapb_regs)) {
2463 		mhdp->hdcp_supported = false;
2464 		dev_warn(dev,
2465 			 "Failed to get SAPB memory resource, HDCP not supported\n");
2466 	} else {
2467 		mhdp->hdcp_supported = true;
2468 	}
2469 
2470 	mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0);
2471 	if (IS_ERR(mhdp->phy)) {
2472 		dev_err(dev, "no PHY configured\n");
2473 		return PTR_ERR(mhdp->phy);
2474 	}
2475 
2476 	platform_set_drvdata(pdev, mhdp);
2477 
2478 	mhdp->info = of_device_get_match_data(dev);
2479 
2480 	clk_prepare_enable(clk);
2481 
2482 	pm_runtime_enable(dev);
2483 	ret = pm_runtime_resume_and_get(dev);
2484 	if (ret < 0) {
2485 		dev_err(dev, "pm_runtime_resume_and_get failed\n");
2486 		pm_runtime_disable(dev);
2487 		goto clk_disable;
2488 	}
2489 
2490 	if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
2491 		ret = mhdp->info->ops->init(mhdp);
2492 		if (ret != 0) {
2493 			dev_err(dev, "MHDP platform initialization failed: %d\n",
2494 				ret);
2495 			goto runtime_put;
2496 		}
2497 	}
2498 
2499 	rate = clk_get_rate(clk);
2500 	writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
2501 	writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
2502 
2503 	dev_dbg(dev, "func clk rate %lu Hz\n", rate);
2504 
2505 	writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2506 
2507 	irq = platform_get_irq(pdev, 0);
2508 	ret = devm_request_threaded_irq(mhdp->dev, irq, NULL,
2509 					cdns_mhdp_irq_handler, IRQF_ONESHOT,
2510 					"mhdp8546", mhdp);
2511 	if (ret) {
2512 		dev_err(dev, "cannot install IRQ %d\n", irq);
2513 		ret = -EIO;
2514 		goto plat_fini;
2515 	}
2516 
2517 	cdns_mhdp_fill_host_caps(mhdp);
2518 
2519 	/* Initialize link rate and num of lanes to host values */
2520 	mhdp->link.rate = mhdp->host.link_rate;
2521 	mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2522 
2523 	/* The only currently supported format */
2524 	mhdp->display_fmt.y_only = false;
2525 	mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
2526 	mhdp->display_fmt.bpc = 8;
2527 
2528 	mhdp->bridge.of_node = pdev->dev.of_node;
2529 	mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
2530 	mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
2531 			   DRM_BRIDGE_OP_HPD;
2532 	mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
2533 	if (mhdp->info)
2534 		mhdp->bridge.timings = mhdp->info->timings;
2535 
2536 	ret = phy_init(mhdp->phy);
2537 	if (ret) {
2538 		dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
2539 		goto plat_fini;
2540 	}
2541 
2542 	/* Initialize the work for modeset in case of link train failure */
2543 	INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn);
2544 	INIT_WORK(&mhdp->hpd_work, cdns_mhdp_hpd_work);
2545 
2546 	init_waitqueue_head(&mhdp->fw_load_wq);
2547 	init_waitqueue_head(&mhdp->sw_events_wq);
2548 
2549 	ret = cdns_mhdp_load_firmware(mhdp);
2550 	if (ret)
2551 		goto phy_exit;
2552 
2553 	if (mhdp->hdcp_supported)
2554 		cdns_mhdp_hdcp_init(mhdp);
2555 
2556 	drm_bridge_add(&mhdp->bridge);
2557 
2558 	return 0;
2559 
2560 phy_exit:
2561 	phy_exit(mhdp->phy);
2562 plat_fini:
2563 	if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2564 		mhdp->info->ops->exit(mhdp);
2565 runtime_put:
2566 	pm_runtime_put_sync(dev);
2567 	pm_runtime_disable(dev);
2568 clk_disable:
2569 	clk_disable_unprepare(mhdp->clk);
2570 
2571 	return ret;
2572 }
2573 
2574 static int cdns_mhdp_remove(struct platform_device *pdev)
2575 {
2576 	struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
2577 	unsigned long timeout = msecs_to_jiffies(100);
2578 	bool stop_fw = false;
2579 	int ret;
2580 
2581 	drm_bridge_remove(&mhdp->bridge);
2582 
2583 	ret = wait_event_timeout(mhdp->fw_load_wq,
2584 				 mhdp->hw_state == MHDP_HW_READY,
2585 				 timeout);
2586 	if (ret == 0)
2587 		dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
2588 			__func__);
2589 	else
2590 		stop_fw = true;
2591 
2592 	spin_lock(&mhdp->start_lock);
2593 	mhdp->hw_state = MHDP_HW_STOPPED;
2594 	spin_unlock(&mhdp->start_lock);
2595 
2596 	if (stop_fw)
2597 		ret = cdns_mhdp_set_firmware_active(mhdp, false);
2598 
2599 	phy_exit(mhdp->phy);
2600 
2601 	if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2602 		mhdp->info->ops->exit(mhdp);
2603 
2604 	pm_runtime_put_sync(&pdev->dev);
2605 	pm_runtime_disable(&pdev->dev);
2606 
2607 	cancel_work_sync(&mhdp->modeset_retry_work);
2608 	flush_scheduled_work();
2609 
2610 	clk_disable_unprepare(mhdp->clk);
2611 
2612 	return ret;
2613 }
2614 
2615 static const struct of_device_id mhdp_ids[] = {
2616 	{ .compatible = "cdns,mhdp8546", },
2617 #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
2618 	{ .compatible = "ti,j721e-mhdp8546",
2619 	  .data = &(const struct cdns_mhdp_platform_info) {
2620 		  .timings = &mhdp_ti_j721e_bridge_timings,
2621 		  .ops = &mhdp_ti_j721e_ops,
2622 	  },
2623 	},
2624 #endif
2625 	{ /* sentinel */ }
2626 };
2627 MODULE_DEVICE_TABLE(of, mhdp_ids);
2628 
2629 static struct platform_driver mhdp_driver = {
2630 	.driver	= {
2631 		.name		= "cdns-mhdp8546",
2632 		.of_match_table	= of_match_ptr(mhdp_ids),
2633 	},
2634 	.probe	= cdns_mhdp_probe,
2635 	.remove	= cdns_mhdp_remove,
2636 };
2637 module_platform_driver(mhdp_driver);
2638 
2639 MODULE_FIRMWARE(FW_NAME);
2640 
2641 MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
2642 MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>");
2643 MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>");
2644 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
2645 MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
2646 MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
2647 MODULE_LICENSE("GPL");
2648 MODULE_ALIAS("platform:cdns-mhdp8546");
2649