xref: /openbmc/linux/drivers/gpu/drm/msm/dsi/dsi_host.c (revision 8ee90c5c)
1 /*
2  * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/gpio.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/interrupt.h>
20 #include <linux/of_device.h>
21 #include <linux/of_gpio.h>
22 #include <linux/of_irq.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/of_graph.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/spinlock.h>
27 #include <linux/mfd/syscon.h>
28 #include <linux/regmap.h>
29 #include <video/mipi_display.h>
30 
31 #include "dsi.h"
32 #include "dsi.xml.h"
33 #include "sfpb.xml.h"
34 #include "dsi_cfg.h"
35 #include "msm_kms.h"
36 
37 static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
38 {
39 	u32 ver;
40 
41 	if (!major || !minor)
42 		return -EINVAL;
43 
44 	/*
45 	 * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
46 	 * makes all other registers 4-byte shifted down.
47 	 *
48 	 * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
49 	 * older, we read the DSI_VERSION register without any shift(offset
50 	 * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
51 	 * the case of DSI6G, this has to be zero (the offset points to a
52 	 * scratch register which we never touch)
53 	 */
54 
55 	ver = msm_readl(base + REG_DSI_VERSION);
56 	if (ver) {
57 		/* older dsi host, there is no register shift */
58 		ver = FIELD(ver, DSI_VERSION_MAJOR);
59 		if (ver <= MSM_DSI_VER_MAJOR_V2) {
60 			/* old versions */
61 			*major = ver;
62 			*minor = 0;
63 			return 0;
64 		} else {
65 			return -EINVAL;
66 		}
67 	} else {
68 		/*
69 		 * newer host, offset 0 has 6G_HW_VERSION, the rest of the
70 		 * registers are shifted down, read DSI_VERSION again with
71 		 * the shifted offset
72 		 */
73 		ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
74 		ver = FIELD(ver, DSI_VERSION_MAJOR);
75 		if (ver == MSM_DSI_VER_MAJOR_6G) {
76 			/* 6G version */
77 			*major = ver;
78 			*minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
79 			return 0;
80 		} else {
81 			return -EINVAL;
82 		}
83 	}
84 }
85 
86 #define DSI_ERR_STATE_ACK			0x0000
87 #define DSI_ERR_STATE_TIMEOUT			0x0001
88 #define DSI_ERR_STATE_DLN0_PHY			0x0002
89 #define DSI_ERR_STATE_FIFO			0x0004
90 #define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW	0x0008
91 #define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION	0x0010
92 #define DSI_ERR_STATE_PLL_UNLOCKED		0x0020
93 
94 #define DSI_CLK_CTRL_ENABLE_CLKS	\
95 		(DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
96 		DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
97 		DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
98 		DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
99 
100 struct msm_dsi_host {
101 	struct mipi_dsi_host base;
102 
103 	struct platform_device *pdev;
104 	struct drm_device *dev;
105 
106 	int id;
107 
108 	void __iomem *ctrl_base;
109 	struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
110 
111 	struct clk *bus_clks[DSI_BUS_CLK_MAX];
112 
113 	struct clk *byte_clk;
114 	struct clk *esc_clk;
115 	struct clk *pixel_clk;
116 	struct clk *byte_clk_src;
117 	struct clk *pixel_clk_src;
118 
119 	u32 byte_clk_rate;
120 	u32 esc_clk_rate;
121 
122 	/* DSI v2 specific clocks */
123 	struct clk *src_clk;
124 	struct clk *esc_clk_src;
125 	struct clk *dsi_clk_src;
126 
127 	u32 src_clk_rate;
128 
129 	struct gpio_desc *disp_en_gpio;
130 	struct gpio_desc *te_gpio;
131 
132 	const struct msm_dsi_cfg_handler *cfg_hnd;
133 
134 	struct completion dma_comp;
135 	struct completion video_comp;
136 	struct mutex dev_mutex;
137 	struct mutex cmd_mutex;
138 	spinlock_t intr_lock; /* Protect interrupt ctrl register */
139 
140 	u32 err_work_state;
141 	struct work_struct err_work;
142 	struct work_struct hpd_work;
143 	struct workqueue_struct *workqueue;
144 
145 	/* DSI 6G TX buffer*/
146 	struct drm_gem_object *tx_gem_obj;
147 
148 	/* DSI v2 TX buffer */
149 	void *tx_buf;
150 	dma_addr_t tx_buf_paddr;
151 
152 	int tx_size;
153 
154 	u8 *rx_buf;
155 
156 	struct regmap *sfpb;
157 
158 	struct drm_display_mode *mode;
159 
160 	/* connected device info */
161 	struct device_node *device_node;
162 	unsigned int channel;
163 	unsigned int lanes;
164 	enum mipi_dsi_pixel_format format;
165 	unsigned long mode_flags;
166 
167 	/* lane data parsed via DT */
168 	int dlane_swap;
169 	int num_data_lanes;
170 
171 	u32 dma_cmd_ctrl_restore;
172 
173 	bool registered;
174 	bool power_on;
175 	int irq;
176 };
177 
178 static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
179 {
180 	switch (fmt) {
181 	case MIPI_DSI_FMT_RGB565:		return 16;
182 	case MIPI_DSI_FMT_RGB666_PACKED:	return 18;
183 	case MIPI_DSI_FMT_RGB666:
184 	case MIPI_DSI_FMT_RGB888:
185 	default:				return 24;
186 	}
187 }
188 
189 static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
190 {
191 	return msm_readl(msm_host->ctrl_base + reg);
192 }
193 static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
194 {
195 	msm_writel(data, msm_host->ctrl_base + reg);
196 }
197 
198 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
199 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
200 
201 static const struct msm_dsi_cfg_handler *dsi_get_config(
202 						struct msm_dsi_host *msm_host)
203 {
204 	const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
205 	struct device *dev = &msm_host->pdev->dev;
206 	struct regulator *gdsc_reg;
207 	struct clk *ahb_clk;
208 	int ret;
209 	u32 major = 0, minor = 0;
210 
211 	gdsc_reg = regulator_get(dev, "gdsc");
212 	if (IS_ERR(gdsc_reg)) {
213 		pr_err("%s: cannot get gdsc\n", __func__);
214 		goto exit;
215 	}
216 
217 	ahb_clk = clk_get(dev, "iface_clk");
218 	if (IS_ERR(ahb_clk)) {
219 		pr_err("%s: cannot get interface clock\n", __func__);
220 		goto put_gdsc;
221 	}
222 
223 	pm_runtime_get_sync(dev);
224 
225 	ret = regulator_enable(gdsc_reg);
226 	if (ret) {
227 		pr_err("%s: unable to enable gdsc\n", __func__);
228 		goto put_clk;
229 	}
230 
231 	ret = clk_prepare_enable(ahb_clk);
232 	if (ret) {
233 		pr_err("%s: unable to enable ahb_clk\n", __func__);
234 		goto disable_gdsc;
235 	}
236 
237 	ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
238 	if (ret) {
239 		pr_err("%s: Invalid version\n", __func__);
240 		goto disable_clks;
241 	}
242 
243 	cfg_hnd = msm_dsi_cfg_get(major, minor);
244 
245 	DBG("%s: Version %x:%x\n", __func__, major, minor);
246 
247 disable_clks:
248 	clk_disable_unprepare(ahb_clk);
249 disable_gdsc:
250 	regulator_disable(gdsc_reg);
251 	pm_runtime_put_sync(dev);
252 put_clk:
253 	clk_put(ahb_clk);
254 put_gdsc:
255 	regulator_put(gdsc_reg);
256 exit:
257 	return cfg_hnd;
258 }
259 
260 static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
261 {
262 	return container_of(host, struct msm_dsi_host, base);
263 }
264 
265 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
266 {
267 	struct regulator_bulk_data *s = msm_host->supplies;
268 	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
269 	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
270 	int i;
271 
272 	DBG("");
273 	for (i = num - 1; i >= 0; i--)
274 		if (regs[i].disable_load >= 0)
275 			regulator_set_load(s[i].consumer,
276 					   regs[i].disable_load);
277 
278 	regulator_bulk_disable(num, s);
279 }
280 
281 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
282 {
283 	struct regulator_bulk_data *s = msm_host->supplies;
284 	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
285 	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
286 	int ret, i;
287 
288 	DBG("");
289 	for (i = 0; i < num; i++) {
290 		if (regs[i].enable_load >= 0) {
291 			ret = regulator_set_load(s[i].consumer,
292 						 regs[i].enable_load);
293 			if (ret < 0) {
294 				pr_err("regulator %d set op mode failed, %d\n",
295 					i, ret);
296 				goto fail;
297 			}
298 		}
299 	}
300 
301 	ret = regulator_bulk_enable(num, s);
302 	if (ret < 0) {
303 		pr_err("regulator enable failed, %d\n", ret);
304 		goto fail;
305 	}
306 
307 	return 0;
308 
309 fail:
310 	for (i--; i >= 0; i--)
311 		regulator_set_load(s[i].consumer, regs[i].disable_load);
312 	return ret;
313 }
314 
315 static int dsi_regulator_init(struct msm_dsi_host *msm_host)
316 {
317 	struct regulator_bulk_data *s = msm_host->supplies;
318 	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
319 	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
320 	int i, ret;
321 
322 	for (i = 0; i < num; i++)
323 		s[i].supply = regs[i].name;
324 
325 	ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
326 	if (ret < 0) {
327 		pr_err("%s: failed to init regulator, ret=%d\n",
328 						__func__, ret);
329 		return ret;
330 	}
331 
332 	return 0;
333 }
334 
335 static int dsi_clk_init(struct msm_dsi_host *msm_host)
336 {
337 	struct device *dev = &msm_host->pdev->dev;
338 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
339 	const struct msm_dsi_config *cfg = cfg_hnd->cfg;
340 	int i, ret = 0;
341 
342 	/* get bus clocks */
343 	for (i = 0; i < cfg->num_bus_clks; i++) {
344 		msm_host->bus_clks[i] = devm_clk_get(dev,
345 						cfg->bus_clk_names[i]);
346 		if (IS_ERR(msm_host->bus_clks[i])) {
347 			ret = PTR_ERR(msm_host->bus_clks[i]);
348 			pr_err("%s: Unable to get %s, ret = %d\n",
349 				__func__, cfg->bus_clk_names[i], ret);
350 			goto exit;
351 		}
352 	}
353 
354 	/* get link and source clocks */
355 	msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
356 	if (IS_ERR(msm_host->byte_clk)) {
357 		ret = PTR_ERR(msm_host->byte_clk);
358 		pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
359 			__func__, ret);
360 		msm_host->byte_clk = NULL;
361 		goto exit;
362 	}
363 
364 	msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
365 	if (IS_ERR(msm_host->pixel_clk)) {
366 		ret = PTR_ERR(msm_host->pixel_clk);
367 		pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
368 			__func__, ret);
369 		msm_host->pixel_clk = NULL;
370 		goto exit;
371 	}
372 
373 	msm_host->esc_clk = devm_clk_get(dev, "core_clk");
374 	if (IS_ERR(msm_host->esc_clk)) {
375 		ret = PTR_ERR(msm_host->esc_clk);
376 		pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
377 			__func__, ret);
378 		msm_host->esc_clk = NULL;
379 		goto exit;
380 	}
381 
382 	msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
383 	if (!msm_host->byte_clk_src) {
384 		ret = -ENODEV;
385 		pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
386 		goto exit;
387 	}
388 
389 	msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
390 	if (!msm_host->pixel_clk_src) {
391 		ret = -ENODEV;
392 		pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
393 		goto exit;
394 	}
395 
396 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
397 		msm_host->src_clk = devm_clk_get(dev, "src_clk");
398 		if (IS_ERR(msm_host->src_clk)) {
399 			ret = PTR_ERR(msm_host->src_clk);
400 			pr_err("%s: can't find dsi_src_clk. ret=%d\n",
401 				__func__, ret);
402 			msm_host->src_clk = NULL;
403 			goto exit;
404 		}
405 
406 		msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
407 		if (!msm_host->esc_clk_src) {
408 			ret = -ENODEV;
409 			pr_err("%s: can't get esc_clk_src. ret=%d\n",
410 				__func__, ret);
411 			goto exit;
412 		}
413 
414 		msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
415 		if (!msm_host->dsi_clk_src) {
416 			ret = -ENODEV;
417 			pr_err("%s: can't get dsi_clk_src. ret=%d\n",
418 				__func__, ret);
419 		}
420 	}
421 exit:
422 	return ret;
423 }
424 
425 static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
426 {
427 	const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
428 	int i, ret;
429 
430 	DBG("id=%d", msm_host->id);
431 
432 	for (i = 0; i < cfg->num_bus_clks; i++) {
433 		ret = clk_prepare_enable(msm_host->bus_clks[i]);
434 		if (ret) {
435 			pr_err("%s: failed to enable bus clock %d ret %d\n",
436 				__func__, i, ret);
437 			goto err;
438 		}
439 	}
440 
441 	return 0;
442 err:
443 	for (; i > 0; i--)
444 		clk_disable_unprepare(msm_host->bus_clks[i]);
445 
446 	return ret;
447 }
448 
449 static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
450 {
451 	const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
452 	int i;
453 
454 	DBG("");
455 
456 	for (i = cfg->num_bus_clks - 1; i >= 0; i--)
457 		clk_disable_unprepare(msm_host->bus_clks[i]);
458 }
459 
460 int msm_dsi_runtime_suspend(struct device *dev)
461 {
462 	struct platform_device *pdev = to_platform_device(dev);
463 	struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
464 	struct mipi_dsi_host *host = msm_dsi->host;
465 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
466 
467 	if (!msm_host->cfg_hnd)
468 		return 0;
469 
470 	dsi_bus_clk_disable(msm_host);
471 
472 	return 0;
473 }
474 
475 int msm_dsi_runtime_resume(struct device *dev)
476 {
477 	struct platform_device *pdev = to_platform_device(dev);
478 	struct msm_dsi *msm_dsi = platform_get_drvdata(pdev);
479 	struct mipi_dsi_host *host = msm_dsi->host;
480 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
481 
482 	if (!msm_host->cfg_hnd)
483 		return 0;
484 
485 	return dsi_bus_clk_enable(msm_host);
486 }
487 
488 static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
489 {
490 	int ret;
491 
492 	DBG("Set clk rates: pclk=%d, byteclk=%d",
493 		msm_host->mode->clock, msm_host->byte_clk_rate);
494 
495 	ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
496 	if (ret) {
497 		pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
498 		goto error;
499 	}
500 
501 	ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
502 	if (ret) {
503 		pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
504 		goto error;
505 	}
506 
507 	ret = clk_prepare_enable(msm_host->esc_clk);
508 	if (ret) {
509 		pr_err("%s: Failed to enable dsi esc clk\n", __func__);
510 		goto error;
511 	}
512 
513 	ret = clk_prepare_enable(msm_host->byte_clk);
514 	if (ret) {
515 		pr_err("%s: Failed to enable dsi byte clk\n", __func__);
516 		goto byte_clk_err;
517 	}
518 
519 	ret = clk_prepare_enable(msm_host->pixel_clk);
520 	if (ret) {
521 		pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
522 		goto pixel_clk_err;
523 	}
524 
525 	return 0;
526 
527 pixel_clk_err:
528 	clk_disable_unprepare(msm_host->byte_clk);
529 byte_clk_err:
530 	clk_disable_unprepare(msm_host->esc_clk);
531 error:
532 	return ret;
533 }
534 
535 static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
536 {
537 	int ret;
538 
539 	DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
540 		msm_host->mode->clock, msm_host->byte_clk_rate,
541 		msm_host->esc_clk_rate, msm_host->src_clk_rate);
542 
543 	ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
544 	if (ret) {
545 		pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
546 		goto error;
547 	}
548 
549 	ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
550 	if (ret) {
551 		pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
552 		goto error;
553 	}
554 
555 	ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
556 	if (ret) {
557 		pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
558 		goto error;
559 	}
560 
561 	ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
562 	if (ret) {
563 		pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
564 		goto error;
565 	}
566 
567 	ret = clk_prepare_enable(msm_host->byte_clk);
568 	if (ret) {
569 		pr_err("%s: Failed to enable dsi byte clk\n", __func__);
570 		goto error;
571 	}
572 
573 	ret = clk_prepare_enable(msm_host->esc_clk);
574 	if (ret) {
575 		pr_err("%s: Failed to enable dsi esc clk\n", __func__);
576 		goto esc_clk_err;
577 	}
578 
579 	ret = clk_prepare_enable(msm_host->src_clk);
580 	if (ret) {
581 		pr_err("%s: Failed to enable dsi src clk\n", __func__);
582 		goto src_clk_err;
583 	}
584 
585 	ret = clk_prepare_enable(msm_host->pixel_clk);
586 	if (ret) {
587 		pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
588 		goto pixel_clk_err;
589 	}
590 
591 	return 0;
592 
593 pixel_clk_err:
594 	clk_disable_unprepare(msm_host->src_clk);
595 src_clk_err:
596 	clk_disable_unprepare(msm_host->esc_clk);
597 esc_clk_err:
598 	clk_disable_unprepare(msm_host->byte_clk);
599 error:
600 	return ret;
601 }
602 
603 static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
604 {
605 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
606 
607 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
608 		return dsi_link_clk_enable_6g(msm_host);
609 	else
610 		return dsi_link_clk_enable_v2(msm_host);
611 }
612 
613 static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
614 {
615 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
616 
617 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
618 		clk_disable_unprepare(msm_host->esc_clk);
619 		clk_disable_unprepare(msm_host->pixel_clk);
620 		clk_disable_unprepare(msm_host->byte_clk);
621 	} else {
622 		clk_disable_unprepare(msm_host->pixel_clk);
623 		clk_disable_unprepare(msm_host->src_clk);
624 		clk_disable_unprepare(msm_host->esc_clk);
625 		clk_disable_unprepare(msm_host->byte_clk);
626 	}
627 }
628 
629 static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
630 {
631 	struct drm_display_mode *mode = msm_host->mode;
632 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
633 	u8 lanes = msm_host->lanes;
634 	u32 bpp = dsi_get_bpp(msm_host->format);
635 	u32 pclk_rate;
636 
637 	if (!mode) {
638 		pr_err("%s: mode not set\n", __func__);
639 		return -EINVAL;
640 	}
641 
642 	pclk_rate = mode->clock * 1000;
643 	if (lanes > 0) {
644 		msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
645 	} else {
646 		pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
647 		msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
648 	}
649 
650 	DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
651 
652 	msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
653 
654 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
655 		unsigned int esc_mhz, esc_div;
656 		unsigned long byte_mhz;
657 
658 		msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
659 
660 		/*
661 		 * esc clock is byte clock followed by a 4 bit divider,
662 		 * we need to find an escape clock frequency within the
663 		 * mipi DSI spec range within the maximum divider limit
664 		 * We iterate here between an escape clock frequencey
665 		 * between 20 Mhz to 5 Mhz and pick up the first one
666 		 * that can be supported by our divider
667 		 */
668 
669 		byte_mhz = msm_host->byte_clk_rate / 1000000;
670 
671 		for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
672 			esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
673 
674 			/*
675 			 * TODO: Ideally, we shouldn't know what sort of divider
676 			 * is available in mmss_cc, we're just assuming that
677 			 * it'll always be a 4 bit divider. Need to come up with
678 			 * a better way here.
679 			 */
680 			if (esc_div >= 1 && esc_div <= 16)
681 				break;
682 		}
683 
684 		if (esc_mhz < 5)
685 			return -EINVAL;
686 
687 		msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
688 
689 		DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
690 			msm_host->src_clk_rate);
691 	}
692 
693 	return 0;
694 }
695 
696 static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
697 {
698 	u32 intr;
699 	unsigned long flags;
700 
701 	spin_lock_irqsave(&msm_host->intr_lock, flags);
702 	intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
703 
704 	if (enable)
705 		intr |= mask;
706 	else
707 		intr &= ~mask;
708 
709 	DBG("intr=%x enable=%d", intr, enable);
710 
711 	dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
712 	spin_unlock_irqrestore(&msm_host->intr_lock, flags);
713 }
714 
715 static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
716 {
717 	if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
718 		return BURST_MODE;
719 	else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
720 		return NON_BURST_SYNCH_PULSE;
721 
722 	return NON_BURST_SYNCH_EVENT;
723 }
724 
725 static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
726 				const enum mipi_dsi_pixel_format mipi_fmt)
727 {
728 	switch (mipi_fmt) {
729 	case MIPI_DSI_FMT_RGB888:	return VID_DST_FORMAT_RGB888;
730 	case MIPI_DSI_FMT_RGB666:	return VID_DST_FORMAT_RGB666_LOOSE;
731 	case MIPI_DSI_FMT_RGB666_PACKED:	return VID_DST_FORMAT_RGB666;
732 	case MIPI_DSI_FMT_RGB565:	return VID_DST_FORMAT_RGB565;
733 	default:			return VID_DST_FORMAT_RGB888;
734 	}
735 }
736 
737 static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
738 				const enum mipi_dsi_pixel_format mipi_fmt)
739 {
740 	switch (mipi_fmt) {
741 	case MIPI_DSI_FMT_RGB888:	return CMD_DST_FORMAT_RGB888;
742 	case MIPI_DSI_FMT_RGB666_PACKED:
743 	case MIPI_DSI_FMT_RGB666:	return VID_DST_FORMAT_RGB666;
744 	case MIPI_DSI_FMT_RGB565:	return CMD_DST_FORMAT_RGB565;
745 	default:			return CMD_DST_FORMAT_RGB888;
746 	}
747 }
748 
749 static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
750 			struct msm_dsi_phy_shared_timings *phy_shared_timings)
751 {
752 	u32 flags = msm_host->mode_flags;
753 	enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
754 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
755 	u32 data = 0;
756 
757 	if (!enable) {
758 		dsi_write(msm_host, REG_DSI_CTRL, 0);
759 		return;
760 	}
761 
762 	if (flags & MIPI_DSI_MODE_VIDEO) {
763 		if (flags & MIPI_DSI_MODE_VIDEO_HSE)
764 			data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
765 		if (flags & MIPI_DSI_MODE_VIDEO_HFP)
766 			data |= DSI_VID_CFG0_HFP_POWER_STOP;
767 		if (flags & MIPI_DSI_MODE_VIDEO_HBP)
768 			data |= DSI_VID_CFG0_HBP_POWER_STOP;
769 		if (flags & MIPI_DSI_MODE_VIDEO_HSA)
770 			data |= DSI_VID_CFG0_HSA_POWER_STOP;
771 		/* Always set low power stop mode for BLLP
772 		 * to let command engine send packets
773 		 */
774 		data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
775 			DSI_VID_CFG0_BLLP_POWER_STOP;
776 		data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
777 		data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
778 		data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
779 		dsi_write(msm_host, REG_DSI_VID_CFG0, data);
780 
781 		/* Do not swap RGB colors */
782 		data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
783 		dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
784 	} else {
785 		/* Do not swap RGB colors */
786 		data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
787 		data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
788 		dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
789 
790 		data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
791 			DSI_CMD_CFG1_WR_MEM_CONTINUE(
792 					MIPI_DCS_WRITE_MEMORY_CONTINUE);
793 		/* Always insert DCS command */
794 		data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
795 		dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
796 	}
797 
798 	dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
799 			DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
800 			DSI_CMD_DMA_CTRL_LOW_POWER);
801 
802 	data = 0;
803 	/* Always assume dedicated TE pin */
804 	data |= DSI_TRIG_CTRL_TE;
805 	data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
806 	data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
807 	data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
808 	if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
809 		(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
810 		data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
811 	dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
812 
813 	data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) |
814 		DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre);
815 	dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
816 
817 	if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
818 	    (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) &&
819 	    phy_shared_timings->clk_pre_inc_by_2)
820 		dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND,
821 			  DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK);
822 
823 	data = 0;
824 	if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
825 		data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
826 	dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
827 
828 	/* allow only ack-err-status to generate interrupt */
829 	dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
830 
831 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
832 
833 	dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
834 
835 	data = DSI_CTRL_CLK_EN;
836 
837 	DBG("lane number=%d", msm_host->lanes);
838 	data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
839 
840 	dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
841 		  DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
842 
843 	if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
844 		dsi_write(msm_host, REG_DSI_LANE_CTRL,
845 			DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
846 
847 	data |= DSI_CTRL_ENABLE;
848 
849 	dsi_write(msm_host, REG_DSI_CTRL, data);
850 }
851 
852 static void dsi_timing_setup(struct msm_dsi_host *msm_host)
853 {
854 	struct drm_display_mode *mode = msm_host->mode;
855 	u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
856 	u32 h_total = mode->htotal;
857 	u32 v_total = mode->vtotal;
858 	u32 hs_end = mode->hsync_end - mode->hsync_start;
859 	u32 vs_end = mode->vsync_end - mode->vsync_start;
860 	u32 ha_start = h_total - mode->hsync_start;
861 	u32 ha_end = ha_start + mode->hdisplay;
862 	u32 va_start = v_total - mode->vsync_start;
863 	u32 va_end = va_start + mode->vdisplay;
864 	u32 wc;
865 
866 	DBG("");
867 
868 	if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
869 		dsi_write(msm_host, REG_DSI_ACTIVE_H,
870 			DSI_ACTIVE_H_START(ha_start) |
871 			DSI_ACTIVE_H_END(ha_end));
872 		dsi_write(msm_host, REG_DSI_ACTIVE_V,
873 			DSI_ACTIVE_V_START(va_start) |
874 			DSI_ACTIVE_V_END(va_end));
875 		dsi_write(msm_host, REG_DSI_TOTAL,
876 			DSI_TOTAL_H_TOTAL(h_total - 1) |
877 			DSI_TOTAL_V_TOTAL(v_total - 1));
878 
879 		dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
880 			DSI_ACTIVE_HSYNC_START(hs_start) |
881 			DSI_ACTIVE_HSYNC_END(hs_end));
882 		dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
883 		dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
884 			DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
885 			DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
886 	} else {		/* command mode */
887 		/* image data and 1 byte write_memory_start cmd */
888 		wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
889 
890 		dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
891 			DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
892 			DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
893 					msm_host->channel) |
894 			DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
895 					MIPI_DSI_DCS_LONG_WRITE));
896 
897 		dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
898 			DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
899 			DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
900 	}
901 }
902 
903 static void dsi_sw_reset(struct msm_dsi_host *msm_host)
904 {
905 	dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
906 	wmb(); /* clocks need to be enabled before reset */
907 
908 	dsi_write(msm_host, REG_DSI_RESET, 1);
909 	wmb(); /* make sure reset happen */
910 	dsi_write(msm_host, REG_DSI_RESET, 0);
911 }
912 
913 static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
914 					bool video_mode, bool enable)
915 {
916 	u32 dsi_ctrl;
917 
918 	dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
919 
920 	if (!enable) {
921 		dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
922 				DSI_CTRL_CMD_MODE_EN);
923 		dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
924 					DSI_IRQ_MASK_VIDEO_DONE, 0);
925 	} else {
926 		if (video_mode) {
927 			dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
928 		} else {		/* command mode */
929 			dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
930 			dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
931 		}
932 		dsi_ctrl |= DSI_CTRL_ENABLE;
933 	}
934 
935 	dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
936 }
937 
938 static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
939 {
940 	u32 data;
941 
942 	data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
943 
944 	if (mode == 0)
945 		data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
946 	else
947 		data |= DSI_CMD_DMA_CTRL_LOW_POWER;
948 
949 	dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
950 }
951 
952 static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
953 {
954 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
955 
956 	reinit_completion(&msm_host->video_comp);
957 
958 	wait_for_completion_timeout(&msm_host->video_comp,
959 			msecs_to_jiffies(70));
960 
961 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
962 }
963 
964 static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
965 {
966 	if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
967 		return;
968 
969 	if (msm_host->power_on) {
970 		dsi_wait4video_done(msm_host);
971 		/* delay 4 ms to skip BLLP */
972 		usleep_range(2000, 4000);
973 	}
974 }
975 
976 /* dsi_cmd */
977 static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
978 {
979 	struct drm_device *dev = msm_host->dev;
980 	struct msm_drm_private *priv = dev->dev_private;
981 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
982 	int ret;
983 	uint64_t iova;
984 
985 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
986 		msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
987 		if (IS_ERR(msm_host->tx_gem_obj)) {
988 			ret = PTR_ERR(msm_host->tx_gem_obj);
989 			pr_err("%s: failed to allocate gem, %d\n",
990 				__func__, ret);
991 			msm_host->tx_gem_obj = NULL;
992 			return ret;
993 		}
994 
995 		ret = msm_gem_get_iova(msm_host->tx_gem_obj,
996 				priv->kms->aspace, &iova);
997 		mutex_unlock(&dev->struct_mutex);
998 		if (ret) {
999 			pr_err("%s: failed to get iova, %d\n", __func__, ret);
1000 			return ret;
1001 		}
1002 
1003 		if (iova & 0x07) {
1004 			pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
1005 			return -EINVAL;
1006 		}
1007 
1008 		msm_host->tx_size = msm_host->tx_gem_obj->size;
1009 	} else {
1010 		msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
1011 					&msm_host->tx_buf_paddr, GFP_KERNEL);
1012 		if (!msm_host->tx_buf) {
1013 			ret = -ENOMEM;
1014 			pr_err("%s: failed to allocate tx buf, %d\n",
1015 				__func__, ret);
1016 			return ret;
1017 		}
1018 
1019 		msm_host->tx_size = size;
1020 	}
1021 
1022 	return 0;
1023 }
1024 
1025 static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
1026 {
1027 	struct drm_device *dev = msm_host->dev;
1028 
1029 	if (msm_host->tx_gem_obj) {
1030 		msm_gem_put_iova(msm_host->tx_gem_obj, 0);
1031 		mutex_lock(&dev->struct_mutex);
1032 		msm_gem_free_object(msm_host->tx_gem_obj);
1033 		msm_host->tx_gem_obj = NULL;
1034 		mutex_unlock(&dev->struct_mutex);
1035 	}
1036 
1037 	if (msm_host->tx_buf)
1038 		dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
1039 			msm_host->tx_buf_paddr);
1040 }
1041 
1042 /*
1043  * prepare cmd buffer to be txed
1044  */
1045 static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
1046 			   const struct mipi_dsi_msg *msg)
1047 {
1048 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1049 	struct mipi_dsi_packet packet;
1050 	int len;
1051 	int ret;
1052 	u8 *data;
1053 
1054 	ret = mipi_dsi_create_packet(&packet, msg);
1055 	if (ret) {
1056 		pr_err("%s: create packet failed, %d\n", __func__, ret);
1057 		return ret;
1058 	}
1059 	len = (packet.size + 3) & (~0x3);
1060 
1061 	if (len > msm_host->tx_size) {
1062 		pr_err("%s: packet size is too big\n", __func__);
1063 		return -EINVAL;
1064 	}
1065 
1066 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
1067 		data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
1068 		if (IS_ERR(data)) {
1069 			ret = PTR_ERR(data);
1070 			pr_err("%s: get vaddr failed, %d\n", __func__, ret);
1071 			return ret;
1072 		}
1073 	} else {
1074 		data = msm_host->tx_buf;
1075 	}
1076 
1077 	/* MSM specific command format in memory */
1078 	data[0] = packet.header[1];
1079 	data[1] = packet.header[2];
1080 	data[2] = packet.header[0];
1081 	data[3] = BIT(7); /* Last packet */
1082 	if (mipi_dsi_packet_format_is_long(msg->type))
1083 		data[3] |= BIT(6);
1084 	if (msg->rx_buf && msg->rx_len)
1085 		data[3] |= BIT(5);
1086 
1087 	/* Long packet */
1088 	if (packet.payload && packet.payload_length)
1089 		memcpy(data + 4, packet.payload, packet.payload_length);
1090 
1091 	/* Append 0xff to the end */
1092 	if (packet.size < len)
1093 		memset(data + packet.size, 0xff, len - packet.size);
1094 
1095 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
1096 		msm_gem_put_vaddr(msm_host->tx_gem_obj);
1097 
1098 	return len;
1099 }
1100 
1101 /*
1102  * dsi_short_read1_resp: 1 parameter
1103  */
1104 static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1105 {
1106 	u8 *data = msg->rx_buf;
1107 	if (data && (msg->rx_len >= 1)) {
1108 		*data = buf[1]; /* strip out dcs type */
1109 		return 1;
1110 	} else {
1111 		pr_err("%s: read data does not match with rx_buf len %zu\n",
1112 			__func__, msg->rx_len);
1113 		return -EINVAL;
1114 	}
1115 }
1116 
1117 /*
1118  * dsi_short_read2_resp: 2 parameter
1119  */
1120 static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1121 {
1122 	u8 *data = msg->rx_buf;
1123 	if (data && (msg->rx_len >= 2)) {
1124 		data[0] = buf[1]; /* strip out dcs type */
1125 		data[1] = buf[2];
1126 		return 2;
1127 	} else {
1128 		pr_err("%s: read data does not match with rx_buf len %zu\n",
1129 			__func__, msg->rx_len);
1130 		return -EINVAL;
1131 	}
1132 }
1133 
1134 static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1135 {
1136 	/* strip out 4 byte dcs header */
1137 	if (msg->rx_buf && msg->rx_len)
1138 		memcpy(msg->rx_buf, buf + 4, msg->rx_len);
1139 
1140 	return msg->rx_len;
1141 }
1142 
1143 static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
1144 {
1145 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1146 	struct drm_device *dev = msm_host->dev;
1147 	struct msm_drm_private *priv = dev->dev_private;
1148 	int ret;
1149 	uint64_t dma_base;
1150 	bool triggered;
1151 
1152 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
1153 		ret = msm_gem_get_iova(msm_host->tx_gem_obj,
1154 				priv->kms->aspace, &dma_base);
1155 		if (ret) {
1156 			pr_err("%s: failed to get iova: %d\n", __func__, ret);
1157 			return ret;
1158 		}
1159 	} else {
1160 		dma_base = msm_host->tx_buf_paddr;
1161 	}
1162 
1163 	reinit_completion(&msm_host->dma_comp);
1164 
1165 	dsi_wait4video_eng_busy(msm_host);
1166 
1167 	triggered = msm_dsi_manager_cmd_xfer_trigger(
1168 						msm_host->id, dma_base, len);
1169 	if (triggered) {
1170 		ret = wait_for_completion_timeout(&msm_host->dma_comp,
1171 					msecs_to_jiffies(200));
1172 		DBG("ret=%d", ret);
1173 		if (ret == 0)
1174 			ret = -ETIMEDOUT;
1175 		else
1176 			ret = len;
1177 	} else
1178 		ret = len;
1179 
1180 	return ret;
1181 }
1182 
1183 static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
1184 			u8 *buf, int rx_byte, int pkt_size)
1185 {
1186 	u32 *lp, *temp, data;
1187 	int i, j = 0, cnt;
1188 	u32 read_cnt;
1189 	u8 reg[16];
1190 	int repeated_bytes = 0;
1191 	int buf_offset = buf - msm_host->rx_buf;
1192 
1193 	lp = (u32 *)buf;
1194 	temp = (u32 *)reg;
1195 	cnt = (rx_byte + 3) >> 2;
1196 	if (cnt > 4)
1197 		cnt = 4; /* 4 x 32 bits registers only */
1198 
1199 	if (rx_byte == 4)
1200 		read_cnt = 4;
1201 	else
1202 		read_cnt = pkt_size + 6;
1203 
1204 	/*
1205 	 * In case of multiple reads from the panel, after the first read, there
1206 	 * is possibility that there are some bytes in the payload repeating in
1207 	 * the RDBK_DATA registers. Since we read all the parameters from the
1208 	 * panel right from the first byte for every pass. We need to skip the
1209 	 * repeating bytes and then append the new parameters to the rx buffer.
1210 	 */
1211 	if (read_cnt > 16) {
1212 		int bytes_shifted;
1213 		/* Any data more than 16 bytes will be shifted out.
1214 		 * The temp read buffer should already contain these bytes.
1215 		 * The remaining bytes in read buffer are the repeated bytes.
1216 		 */
1217 		bytes_shifted = read_cnt - 16;
1218 		repeated_bytes = buf_offset - bytes_shifted;
1219 	}
1220 
1221 	for (i = cnt - 1; i >= 0; i--) {
1222 		data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
1223 		*temp++ = ntohl(data); /* to host byte order */
1224 		DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
1225 	}
1226 
1227 	for (i = repeated_bytes; i < 16; i++)
1228 		buf[j++] = reg[i];
1229 
1230 	return j;
1231 }
1232 
1233 static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
1234 				const struct mipi_dsi_msg *msg)
1235 {
1236 	int len, ret;
1237 	int bllp_len = msm_host->mode->hdisplay *
1238 			dsi_get_bpp(msm_host->format) / 8;
1239 
1240 	len = dsi_cmd_dma_add(msm_host, msg);
1241 	if (!len) {
1242 		pr_err("%s: failed to add cmd type = 0x%x\n",
1243 			__func__,  msg->type);
1244 		return -EINVAL;
1245 	}
1246 
1247 	/* for video mode, do not send cmds more than
1248 	* one pixel line, since it only transmit it
1249 	* during BLLP.
1250 	*/
1251 	/* TODO: if the command is sent in LP mode, the bit rate is only
1252 	 * half of esc clk rate. In this case, if the video is already
1253 	 * actively streaming, we need to check more carefully if the
1254 	 * command can be fit into one BLLP.
1255 	 */
1256 	if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
1257 		pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
1258 			__func__, len);
1259 		return -EINVAL;
1260 	}
1261 
1262 	ret = dsi_cmd_dma_tx(msm_host, len);
1263 	if (ret < len) {
1264 		pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
1265 			__func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
1266 		return -ECOMM;
1267 	}
1268 
1269 	return len;
1270 }
1271 
1272 static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
1273 {
1274 	u32 data0, data1;
1275 
1276 	data0 = dsi_read(msm_host, REG_DSI_CTRL);
1277 	data1 = data0;
1278 	data1 &= ~DSI_CTRL_ENABLE;
1279 	dsi_write(msm_host, REG_DSI_CTRL, data1);
1280 	/*
1281 	 * dsi controller need to be disabled before
1282 	 * clocks turned on
1283 	 */
1284 	wmb();
1285 
1286 	dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
1287 	wmb();	/* make sure clocks enabled */
1288 
1289 	/* dsi controller can only be reset while clocks are running */
1290 	dsi_write(msm_host, REG_DSI_RESET, 1);
1291 	wmb();	/* make sure reset happen */
1292 	dsi_write(msm_host, REG_DSI_RESET, 0);
1293 	wmb();	/* controller out of reset */
1294 	dsi_write(msm_host, REG_DSI_CTRL, data0);
1295 	wmb();	/* make sure dsi controller enabled again */
1296 }
1297 
1298 static void dsi_hpd_worker(struct work_struct *work)
1299 {
1300 	struct msm_dsi_host *msm_host =
1301 		container_of(work, struct msm_dsi_host, hpd_work);
1302 
1303 	drm_helper_hpd_irq_event(msm_host->dev);
1304 }
1305 
1306 static void dsi_err_worker(struct work_struct *work)
1307 {
1308 	struct msm_dsi_host *msm_host =
1309 		container_of(work, struct msm_dsi_host, err_work);
1310 	u32 status = msm_host->err_work_state;
1311 
1312 	pr_err_ratelimited("%s: status=%x\n", __func__, status);
1313 	if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
1314 		dsi_sw_reset_restore(msm_host);
1315 
1316 	/* It is safe to clear here because error irq is disabled. */
1317 	msm_host->err_work_state = 0;
1318 
1319 	/* enable dsi error interrupt */
1320 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
1321 }
1322 
1323 static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
1324 {
1325 	u32 status;
1326 
1327 	status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
1328 
1329 	if (status) {
1330 		dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
1331 		/* Writing of an extra 0 needed to clear error bits */
1332 		dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
1333 		msm_host->err_work_state |= DSI_ERR_STATE_ACK;
1334 	}
1335 }
1336 
1337 static void dsi_timeout_status(struct msm_dsi_host *msm_host)
1338 {
1339 	u32 status;
1340 
1341 	status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
1342 
1343 	if (status) {
1344 		dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
1345 		msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
1346 	}
1347 }
1348 
1349 static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
1350 {
1351 	u32 status;
1352 
1353 	status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
1354 
1355 	if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
1356 			DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
1357 			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
1358 			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
1359 			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
1360 		dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
1361 		msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
1362 	}
1363 }
1364 
1365 static void dsi_fifo_status(struct msm_dsi_host *msm_host)
1366 {
1367 	u32 status;
1368 
1369 	status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
1370 
1371 	/* fifo underflow, overflow */
1372 	if (status) {
1373 		dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
1374 		msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
1375 		if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
1376 			msm_host->err_work_state |=
1377 					DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
1378 	}
1379 }
1380 
1381 static void dsi_status(struct msm_dsi_host *msm_host)
1382 {
1383 	u32 status;
1384 
1385 	status = dsi_read(msm_host, REG_DSI_STATUS0);
1386 
1387 	if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
1388 		dsi_write(msm_host, REG_DSI_STATUS0, status);
1389 		msm_host->err_work_state |=
1390 			DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
1391 	}
1392 }
1393 
1394 static void dsi_clk_status(struct msm_dsi_host *msm_host)
1395 {
1396 	u32 status;
1397 
1398 	status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
1399 
1400 	if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
1401 		dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
1402 		msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
1403 	}
1404 }
1405 
1406 static void dsi_error(struct msm_dsi_host *msm_host)
1407 {
1408 	/* disable dsi error interrupt */
1409 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
1410 
1411 	dsi_clk_status(msm_host);
1412 	dsi_fifo_status(msm_host);
1413 	dsi_ack_err_status(msm_host);
1414 	dsi_timeout_status(msm_host);
1415 	dsi_status(msm_host);
1416 	dsi_dln0_phy_err(msm_host);
1417 
1418 	queue_work(msm_host->workqueue, &msm_host->err_work);
1419 }
1420 
1421 static irqreturn_t dsi_host_irq(int irq, void *ptr)
1422 {
1423 	struct msm_dsi_host *msm_host = ptr;
1424 	u32 isr;
1425 	unsigned long flags;
1426 
1427 	if (!msm_host->ctrl_base)
1428 		return IRQ_HANDLED;
1429 
1430 	spin_lock_irqsave(&msm_host->intr_lock, flags);
1431 	isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
1432 	dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
1433 	spin_unlock_irqrestore(&msm_host->intr_lock, flags);
1434 
1435 	DBG("isr=0x%x, id=%d", isr, msm_host->id);
1436 
1437 	if (isr & DSI_IRQ_ERROR)
1438 		dsi_error(msm_host);
1439 
1440 	if (isr & DSI_IRQ_VIDEO_DONE)
1441 		complete(&msm_host->video_comp);
1442 
1443 	if (isr & DSI_IRQ_CMD_DMA_DONE)
1444 		complete(&msm_host->dma_comp);
1445 
1446 	return IRQ_HANDLED;
1447 }
1448 
1449 static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
1450 			struct device *panel_device)
1451 {
1452 	msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
1453 							 "disp-enable",
1454 							 GPIOD_OUT_LOW);
1455 	if (IS_ERR(msm_host->disp_en_gpio)) {
1456 		DBG("cannot get disp-enable-gpios %ld",
1457 				PTR_ERR(msm_host->disp_en_gpio));
1458 		return PTR_ERR(msm_host->disp_en_gpio);
1459 	}
1460 
1461 	msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
1462 								GPIOD_IN);
1463 	if (IS_ERR(msm_host->te_gpio)) {
1464 		DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
1465 		return PTR_ERR(msm_host->te_gpio);
1466 	}
1467 
1468 	return 0;
1469 }
1470 
1471 static int dsi_host_attach(struct mipi_dsi_host *host,
1472 					struct mipi_dsi_device *dsi)
1473 {
1474 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1475 	int ret;
1476 
1477 	if (dsi->lanes > msm_host->num_data_lanes)
1478 		return -EINVAL;
1479 
1480 	msm_host->channel = dsi->channel;
1481 	msm_host->lanes = dsi->lanes;
1482 	msm_host->format = dsi->format;
1483 	msm_host->mode_flags = dsi->mode_flags;
1484 
1485 	msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags);
1486 
1487 	/* Some gpios defined in panel DT need to be controlled by host */
1488 	ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
1489 	if (ret)
1490 		return ret;
1491 
1492 	DBG("id=%d", msm_host->id);
1493 	if (msm_host->dev)
1494 		queue_work(msm_host->workqueue, &msm_host->hpd_work);
1495 
1496 	return 0;
1497 }
1498 
1499 static int dsi_host_detach(struct mipi_dsi_host *host,
1500 					struct mipi_dsi_device *dsi)
1501 {
1502 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1503 
1504 	msm_host->device_node = NULL;
1505 
1506 	DBG("id=%d", msm_host->id);
1507 	if (msm_host->dev)
1508 		queue_work(msm_host->workqueue, &msm_host->hpd_work);
1509 
1510 	return 0;
1511 }
1512 
1513 static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
1514 					const struct mipi_dsi_msg *msg)
1515 {
1516 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1517 	int ret;
1518 
1519 	if (!msg || !msm_host->power_on)
1520 		return -EINVAL;
1521 
1522 	mutex_lock(&msm_host->cmd_mutex);
1523 	ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
1524 	mutex_unlock(&msm_host->cmd_mutex);
1525 
1526 	return ret;
1527 }
1528 
1529 static struct mipi_dsi_host_ops dsi_host_ops = {
1530 	.attach = dsi_host_attach,
1531 	.detach = dsi_host_detach,
1532 	.transfer = dsi_host_transfer,
1533 };
1534 
1535 /*
1536  * List of supported physical to logical lane mappings.
1537  * For example, the 2nd entry represents the following mapping:
1538  *
1539  * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
1540  */
1541 static const int supported_data_lane_swaps[][4] = {
1542 	{ 0, 1, 2, 3 },
1543 	{ 3, 0, 1, 2 },
1544 	{ 2, 3, 0, 1 },
1545 	{ 1, 2, 3, 0 },
1546 	{ 0, 3, 2, 1 },
1547 	{ 1, 0, 3, 2 },
1548 	{ 2, 1, 0, 3 },
1549 	{ 3, 2, 1, 0 },
1550 };
1551 
1552 static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
1553 				    struct device_node *ep)
1554 {
1555 	struct device *dev = &msm_host->pdev->dev;
1556 	struct property *prop;
1557 	u32 lane_map[4];
1558 	int ret, i, len, num_lanes;
1559 
1560 	prop = of_find_property(ep, "data-lanes", &len);
1561 	if (!prop) {
1562 		dev_dbg(dev,
1563 			"failed to find data lane mapping, using default\n");
1564 		return 0;
1565 	}
1566 
1567 	num_lanes = len / sizeof(u32);
1568 
1569 	if (num_lanes < 1 || num_lanes > 4) {
1570 		dev_err(dev, "bad number of data lanes\n");
1571 		return -EINVAL;
1572 	}
1573 
1574 	msm_host->num_data_lanes = num_lanes;
1575 
1576 	ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
1577 					 num_lanes);
1578 	if (ret) {
1579 		dev_err(dev, "failed to read lane data\n");
1580 		return ret;
1581 	}
1582 
1583 	/*
1584 	 * compare DT specified physical-logical lane mappings with the ones
1585 	 * supported by hardware
1586 	 */
1587 	for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) {
1588 		const int *swap = supported_data_lane_swaps[i];
1589 		int j;
1590 
1591 		/*
1592 		 * the data-lanes array we get from DT has a logical->physical
1593 		 * mapping. The "data lane swap" register field represents
1594 		 * supported configurations in a physical->logical mapping.
1595 		 * Translate the DT mapping to what we understand and find a
1596 		 * configuration that works.
1597 		 */
1598 		for (j = 0; j < num_lanes; j++) {
1599 			if (lane_map[j] < 0 || lane_map[j] > 3)
1600 				dev_err(dev, "bad physical lane entry %u\n",
1601 					lane_map[j]);
1602 
1603 			if (swap[lane_map[j]] != j)
1604 				break;
1605 		}
1606 
1607 		if (j == num_lanes) {
1608 			msm_host->dlane_swap = i;
1609 			return 0;
1610 		}
1611 	}
1612 
1613 	return -EINVAL;
1614 }
1615 
1616 static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
1617 {
1618 	struct device *dev = &msm_host->pdev->dev;
1619 	struct device_node *np = dev->of_node;
1620 	struct device_node *endpoint, *device_node;
1621 	int ret = 0;
1622 
1623 	/*
1624 	 * Get the endpoint of the output port of the DSI host. In our case,
1625 	 * this is mapped to port number with reg = 1. Don't return an error if
1626 	 * the remote endpoint isn't defined. It's possible that there is
1627 	 * nothing connected to the dsi output.
1628 	 */
1629 	endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
1630 	if (!endpoint) {
1631 		dev_dbg(dev, "%s: no endpoint\n", __func__);
1632 		return 0;
1633 	}
1634 
1635 	ret = dsi_host_parse_lane_data(msm_host, endpoint);
1636 	if (ret) {
1637 		dev_err(dev, "%s: invalid lane configuration %d\n",
1638 			__func__, ret);
1639 		goto err;
1640 	}
1641 
1642 	/* Get panel node from the output port's endpoint data */
1643 	device_node = of_graph_get_remote_node(np, 1, 0);
1644 	if (!device_node) {
1645 		dev_dbg(dev, "%s: no valid device\n", __func__);
1646 		goto err;
1647 	}
1648 
1649 	msm_host->device_node = device_node;
1650 
1651 	if (of_property_read_bool(np, "syscon-sfpb")) {
1652 		msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
1653 					"syscon-sfpb");
1654 		if (IS_ERR(msm_host->sfpb)) {
1655 			dev_err(dev, "%s: failed to get sfpb regmap\n",
1656 				__func__);
1657 			ret = PTR_ERR(msm_host->sfpb);
1658 		}
1659 	}
1660 
1661 	of_node_put(device_node);
1662 
1663 err:
1664 	of_node_put(endpoint);
1665 
1666 	return ret;
1667 }
1668 
1669 static int dsi_host_get_id(struct msm_dsi_host *msm_host)
1670 {
1671 	struct platform_device *pdev = msm_host->pdev;
1672 	const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
1673 	struct resource *res;
1674 	int i;
1675 
1676 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
1677 	if (!res)
1678 		return -EINVAL;
1679 
1680 	for (i = 0; i < cfg->num_dsi; i++) {
1681 		if (cfg->io_start[i] == res->start)
1682 			return i;
1683 	}
1684 
1685 	return -EINVAL;
1686 }
1687 
1688 int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1689 {
1690 	struct msm_dsi_host *msm_host = NULL;
1691 	struct platform_device *pdev = msm_dsi->pdev;
1692 	int ret;
1693 
1694 	msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
1695 	if (!msm_host) {
1696 		pr_err("%s: FAILED: cannot alloc dsi host\n",
1697 		       __func__);
1698 		ret = -ENOMEM;
1699 		goto fail;
1700 	}
1701 
1702 	msm_host->pdev = pdev;
1703 	msm_dsi->host = &msm_host->base;
1704 
1705 	ret = dsi_host_parse_dt(msm_host);
1706 	if (ret) {
1707 		pr_err("%s: failed to parse dt\n", __func__);
1708 		goto fail;
1709 	}
1710 
1711 	msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
1712 	if (IS_ERR(msm_host->ctrl_base)) {
1713 		pr_err("%s: unable to map Dsi ctrl base\n", __func__);
1714 		ret = PTR_ERR(msm_host->ctrl_base);
1715 		goto fail;
1716 	}
1717 
1718 	pm_runtime_enable(&pdev->dev);
1719 
1720 	msm_host->cfg_hnd = dsi_get_config(msm_host);
1721 	if (!msm_host->cfg_hnd) {
1722 		ret = -EINVAL;
1723 		pr_err("%s: get config failed\n", __func__);
1724 		goto fail;
1725 	}
1726 
1727 	msm_host->id = dsi_host_get_id(msm_host);
1728 	if (msm_host->id < 0) {
1729 		ret = msm_host->id;
1730 		pr_err("%s: unable to identify DSI host index\n", __func__);
1731 		goto fail;
1732 	}
1733 
1734 	/* fixup base address by io offset */
1735 	msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
1736 
1737 	ret = dsi_regulator_init(msm_host);
1738 	if (ret) {
1739 		pr_err("%s: regulator init failed\n", __func__);
1740 		goto fail;
1741 	}
1742 
1743 	ret = dsi_clk_init(msm_host);
1744 	if (ret) {
1745 		pr_err("%s: unable to initialize dsi clks\n", __func__);
1746 		goto fail;
1747 	}
1748 
1749 	msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
1750 	if (!msm_host->rx_buf) {
1751 		ret = -ENOMEM;
1752 		pr_err("%s: alloc rx temp buf failed\n", __func__);
1753 		goto fail;
1754 	}
1755 
1756 	init_completion(&msm_host->dma_comp);
1757 	init_completion(&msm_host->video_comp);
1758 	mutex_init(&msm_host->dev_mutex);
1759 	mutex_init(&msm_host->cmd_mutex);
1760 	spin_lock_init(&msm_host->intr_lock);
1761 
1762 	/* setup workqueue */
1763 	msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
1764 	INIT_WORK(&msm_host->err_work, dsi_err_worker);
1765 	INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
1766 
1767 	msm_dsi->id = msm_host->id;
1768 
1769 	DBG("Dsi Host %d initialized", msm_host->id);
1770 	return 0;
1771 
1772 fail:
1773 	return ret;
1774 }
1775 
1776 void msm_dsi_host_destroy(struct mipi_dsi_host *host)
1777 {
1778 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1779 
1780 	DBG("");
1781 	dsi_tx_buf_free(msm_host);
1782 	if (msm_host->workqueue) {
1783 		flush_workqueue(msm_host->workqueue);
1784 		destroy_workqueue(msm_host->workqueue);
1785 		msm_host->workqueue = NULL;
1786 	}
1787 
1788 	mutex_destroy(&msm_host->cmd_mutex);
1789 	mutex_destroy(&msm_host->dev_mutex);
1790 
1791 	pm_runtime_disable(&msm_host->pdev->dev);
1792 }
1793 
1794 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
1795 					struct drm_device *dev)
1796 {
1797 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1798 	struct platform_device *pdev = msm_host->pdev;
1799 	int ret;
1800 
1801 	msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1802 	if (msm_host->irq < 0) {
1803 		ret = msm_host->irq;
1804 		dev_err(dev->dev, "failed to get irq: %d\n", ret);
1805 		return ret;
1806 	}
1807 
1808 	ret = devm_request_irq(&pdev->dev, msm_host->irq,
1809 			dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
1810 			"dsi_isr", msm_host);
1811 	if (ret < 0) {
1812 		dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
1813 				msm_host->irq, ret);
1814 		return ret;
1815 	}
1816 
1817 	msm_host->dev = dev;
1818 	ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
1819 	if (ret) {
1820 		pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
1821 		return ret;
1822 	}
1823 
1824 	return 0;
1825 }
1826 
1827 int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
1828 {
1829 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1830 	int ret;
1831 
1832 	/* Register mipi dsi host */
1833 	if (!msm_host->registered) {
1834 		host->dev = &msm_host->pdev->dev;
1835 		host->ops = &dsi_host_ops;
1836 		ret = mipi_dsi_host_register(host);
1837 		if (ret)
1838 			return ret;
1839 
1840 		msm_host->registered = true;
1841 
1842 		/* If the panel driver has not been probed after host register,
1843 		 * we should defer the host's probe.
1844 		 * It makes sure panel is connected when fbcon detects
1845 		 * connector status and gets the proper display mode to
1846 		 * create framebuffer.
1847 		 * Don't try to defer if there is nothing connected to the dsi
1848 		 * output
1849 		 */
1850 		if (check_defer && msm_host->device_node) {
1851 			if (!of_drm_find_panel(msm_host->device_node))
1852 				if (!of_drm_find_bridge(msm_host->device_node))
1853 					return -EPROBE_DEFER;
1854 		}
1855 	}
1856 
1857 	return 0;
1858 }
1859 
1860 void msm_dsi_host_unregister(struct mipi_dsi_host *host)
1861 {
1862 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1863 
1864 	if (msm_host->registered) {
1865 		mipi_dsi_host_unregister(host);
1866 		host->dev = NULL;
1867 		host->ops = NULL;
1868 		msm_host->registered = false;
1869 	}
1870 }
1871 
1872 int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
1873 				const struct mipi_dsi_msg *msg)
1874 {
1875 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1876 
1877 	/* TODO: make sure dsi_cmd_mdp is idle.
1878 	 * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
1879 	 * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
1880 	 * How to handle the old versions? Wait for mdp cmd done?
1881 	 */
1882 
1883 	/*
1884 	 * mdss interrupt is generated in mdp core clock domain
1885 	 * mdp clock need to be enabled to receive dsi interrupt
1886 	 */
1887 	pm_runtime_get_sync(&msm_host->pdev->dev);
1888 	dsi_link_clk_enable(msm_host);
1889 
1890 	/* TODO: vote for bus bandwidth */
1891 
1892 	if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1893 		dsi_set_tx_power_mode(0, msm_host);
1894 
1895 	msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
1896 	dsi_write(msm_host, REG_DSI_CTRL,
1897 		msm_host->dma_cmd_ctrl_restore |
1898 		DSI_CTRL_CMD_MODE_EN |
1899 		DSI_CTRL_ENABLE);
1900 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
1901 
1902 	return 0;
1903 }
1904 
1905 void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
1906 				const struct mipi_dsi_msg *msg)
1907 {
1908 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1909 
1910 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
1911 	dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
1912 
1913 	if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1914 		dsi_set_tx_power_mode(1, msm_host);
1915 
1916 	/* TODO: unvote for bus bandwidth */
1917 
1918 	dsi_link_clk_disable(msm_host);
1919 	pm_runtime_put_autosuspend(&msm_host->pdev->dev);
1920 }
1921 
1922 int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
1923 				const struct mipi_dsi_msg *msg)
1924 {
1925 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1926 
1927 	return dsi_cmds2buf_tx(msm_host, msg);
1928 }
1929 
1930 int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
1931 				const struct mipi_dsi_msg *msg)
1932 {
1933 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1934 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1935 	int data_byte, rx_byte, dlen, end;
1936 	int short_response, diff, pkt_size, ret = 0;
1937 	char cmd;
1938 	int rlen = msg->rx_len;
1939 	u8 *buf;
1940 
1941 	if (rlen <= 2) {
1942 		short_response = 1;
1943 		pkt_size = rlen;
1944 		rx_byte = 4;
1945 	} else {
1946 		short_response = 0;
1947 		data_byte = 10;	/* first read */
1948 		if (rlen < data_byte)
1949 			pkt_size = rlen;
1950 		else
1951 			pkt_size = data_byte;
1952 		rx_byte = data_byte + 6; /* 4 header + 2 crc */
1953 	}
1954 
1955 	buf = msm_host->rx_buf;
1956 	end = 0;
1957 	while (!end) {
1958 		u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
1959 		struct mipi_dsi_msg max_pkt_size_msg = {
1960 			.channel = msg->channel,
1961 			.type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
1962 			.tx_len = 2,
1963 			.tx_buf = tx,
1964 		};
1965 
1966 		DBG("rlen=%d pkt_size=%d rx_byte=%d",
1967 			rlen, pkt_size, rx_byte);
1968 
1969 		ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
1970 		if (ret < 2) {
1971 			pr_err("%s: Set max pkt size failed, %d\n",
1972 				__func__, ret);
1973 			return -EINVAL;
1974 		}
1975 
1976 		if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
1977 			(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
1978 			/* Clear the RDBK_DATA registers */
1979 			dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
1980 					DSI_RDBK_DATA_CTRL_CLR);
1981 			wmb(); /* make sure the RDBK registers are cleared */
1982 			dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
1983 			wmb(); /* release cleared status before transfer */
1984 		}
1985 
1986 		ret = dsi_cmds2buf_tx(msm_host, msg);
1987 		if (ret < msg->tx_len) {
1988 			pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
1989 			return ret;
1990 		}
1991 
1992 		/*
1993 		 * once cmd_dma_done interrupt received,
1994 		 * return data from client is ready and stored
1995 		 * at RDBK_DATA register already
1996 		 * since rx fifo is 16 bytes, dcs header is kept at first loop,
1997 		 * after that dcs header lost during shift into registers
1998 		 */
1999 		dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
2000 
2001 		if (dlen <= 0)
2002 			return 0;
2003 
2004 		if (short_response)
2005 			break;
2006 
2007 		if (rlen <= data_byte) {
2008 			diff = data_byte - rlen;
2009 			end = 1;
2010 		} else {
2011 			diff = 0;
2012 			rlen -= data_byte;
2013 		}
2014 
2015 		if (!end) {
2016 			dlen -= 2; /* 2 crc */
2017 			dlen -= diff;
2018 			buf += dlen;	/* next start position */
2019 			data_byte = 14;	/* NOT first read */
2020 			if (rlen < data_byte)
2021 				pkt_size += rlen;
2022 			else
2023 				pkt_size += data_byte;
2024 			DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
2025 		}
2026 	}
2027 
2028 	/*
2029 	 * For single Long read, if the requested rlen < 10,
2030 	 * we need to shift the start position of rx
2031 	 * data buffer to skip the bytes which are not
2032 	 * updated.
2033 	 */
2034 	if (pkt_size < 10 && !short_response)
2035 		buf = msm_host->rx_buf + (10 - rlen);
2036 	else
2037 		buf = msm_host->rx_buf;
2038 
2039 	cmd = buf[0];
2040 	switch (cmd) {
2041 	case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
2042 		pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
2043 		ret = 0;
2044 		break;
2045 	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
2046 	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
2047 		ret = dsi_short_read1_resp(buf, msg);
2048 		break;
2049 	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
2050 	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
2051 		ret = dsi_short_read2_resp(buf, msg);
2052 		break;
2053 	case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
2054 	case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
2055 		ret = dsi_long_read_resp(buf, msg);
2056 		break;
2057 	default:
2058 		pr_warn("%s:Invalid response cmd\n", __func__);
2059 		ret = 0;
2060 	}
2061 
2062 	return ret;
2063 }
2064 
2065 void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
2066 				  u32 len)
2067 {
2068 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2069 
2070 	dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
2071 	dsi_write(msm_host, REG_DSI_DMA_LEN, len);
2072 	dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
2073 
2074 	/* Make sure trigger happens */
2075 	wmb();
2076 }
2077 
2078 int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
2079 	struct msm_dsi_pll *src_pll)
2080 {
2081 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2082 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2083 	struct clk *byte_clk_provider, *pixel_clk_provider;
2084 	int ret;
2085 
2086 	ret = msm_dsi_pll_get_clk_provider(src_pll,
2087 				&byte_clk_provider, &pixel_clk_provider);
2088 	if (ret) {
2089 		pr_info("%s: can't get provider from pll, don't set parent\n",
2090 			__func__);
2091 		return 0;
2092 	}
2093 
2094 	ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
2095 	if (ret) {
2096 		pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
2097 			__func__, ret);
2098 		goto exit;
2099 	}
2100 
2101 	ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
2102 	if (ret) {
2103 		pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
2104 			__func__, ret);
2105 		goto exit;
2106 	}
2107 
2108 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
2109 		ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
2110 		if (ret) {
2111 			pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
2112 				__func__, ret);
2113 			goto exit;
2114 		}
2115 
2116 		ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
2117 		if (ret) {
2118 			pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
2119 				__func__, ret);
2120 			goto exit;
2121 		}
2122 	}
2123 
2124 exit:
2125 	return ret;
2126 }
2127 
2128 void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
2129 {
2130 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2131 
2132 	DBG("");
2133 	dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
2134 	/* Make sure fully reset */
2135 	wmb();
2136 	udelay(1000);
2137 	dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
2138 	udelay(100);
2139 }
2140 
2141 void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
2142 	struct msm_dsi_phy_clk_request *clk_req)
2143 {
2144 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2145 	int ret;
2146 
2147 	ret = dsi_calc_clk_rate(msm_host);
2148 	if (ret) {
2149 		pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
2150 		return;
2151 	}
2152 
2153 	clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
2154 	clk_req->escclk_rate = msm_host->esc_clk_rate;
2155 }
2156 
2157 int msm_dsi_host_enable(struct mipi_dsi_host *host)
2158 {
2159 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2160 
2161 	dsi_op_mode_config(msm_host,
2162 		!!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
2163 
2164 	/* TODO: clock should be turned off for command mode,
2165 	 * and only turned on before MDP START.
2166 	 * This part of code should be enabled once mdp driver support it.
2167 	 */
2168 	/* if (msm_panel->mode == MSM_DSI_CMD_MODE) {
2169 	 *	dsi_link_clk_disable(msm_host);
2170 	 *	pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2171 	 * }
2172 	 */
2173 
2174 	return 0;
2175 }
2176 
2177 int msm_dsi_host_disable(struct mipi_dsi_host *host)
2178 {
2179 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2180 
2181 	dsi_op_mode_config(msm_host,
2182 		!!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
2183 
2184 	/* Since we have disabled INTF, the video engine won't stop so that
2185 	 * the cmd engine will be blocked.
2186 	 * Reset to disable video engine so that we can send off cmd.
2187 	 */
2188 	dsi_sw_reset(msm_host);
2189 
2190 	return 0;
2191 }
2192 
2193 static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
2194 {
2195 	enum sfpb_ahb_arb_master_port_en en;
2196 
2197 	if (!msm_host->sfpb)
2198 		return;
2199 
2200 	en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
2201 
2202 	regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
2203 			SFPB_GPREG_MASTER_PORT_EN__MASK,
2204 			SFPB_GPREG_MASTER_PORT_EN(en));
2205 }
2206 
2207 int msm_dsi_host_power_on(struct mipi_dsi_host *host,
2208 			struct msm_dsi_phy_shared_timings *phy_shared_timings)
2209 {
2210 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2211 	int ret = 0;
2212 
2213 	mutex_lock(&msm_host->dev_mutex);
2214 	if (msm_host->power_on) {
2215 		DBG("dsi host already on");
2216 		goto unlock_ret;
2217 	}
2218 
2219 	msm_dsi_sfpb_config(msm_host, true);
2220 
2221 	ret = dsi_host_regulator_enable(msm_host);
2222 	if (ret) {
2223 		pr_err("%s:Failed to enable vregs.ret=%d\n",
2224 			__func__, ret);
2225 		goto unlock_ret;
2226 	}
2227 
2228 	pm_runtime_get_sync(&msm_host->pdev->dev);
2229 	ret = dsi_link_clk_enable(msm_host);
2230 	if (ret) {
2231 		pr_err("%s: failed to enable link clocks. ret=%d\n",
2232 		       __func__, ret);
2233 		goto fail_disable_reg;
2234 	}
2235 
2236 	ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
2237 	if (ret) {
2238 		pr_err("%s: failed to set pinctrl default state, %d\n",
2239 			__func__, ret);
2240 		goto fail_disable_clk;
2241 	}
2242 
2243 	dsi_timing_setup(msm_host);
2244 	dsi_sw_reset(msm_host);
2245 	dsi_ctrl_config(msm_host, true, phy_shared_timings);
2246 
2247 	if (msm_host->disp_en_gpio)
2248 		gpiod_set_value(msm_host->disp_en_gpio, 1);
2249 
2250 	msm_host->power_on = true;
2251 	mutex_unlock(&msm_host->dev_mutex);
2252 
2253 	return 0;
2254 
2255 fail_disable_clk:
2256 	dsi_link_clk_disable(msm_host);
2257 	pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2258 fail_disable_reg:
2259 	dsi_host_regulator_disable(msm_host);
2260 unlock_ret:
2261 	mutex_unlock(&msm_host->dev_mutex);
2262 	return ret;
2263 }
2264 
2265 int msm_dsi_host_power_off(struct mipi_dsi_host *host)
2266 {
2267 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2268 
2269 	mutex_lock(&msm_host->dev_mutex);
2270 	if (!msm_host->power_on) {
2271 		DBG("dsi host already off");
2272 		goto unlock_ret;
2273 	}
2274 
2275 	dsi_ctrl_config(msm_host, false, NULL);
2276 
2277 	if (msm_host->disp_en_gpio)
2278 		gpiod_set_value(msm_host->disp_en_gpio, 0);
2279 
2280 	pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
2281 
2282 	dsi_link_clk_disable(msm_host);
2283 	pm_runtime_put_autosuspend(&msm_host->pdev->dev);
2284 
2285 	dsi_host_regulator_disable(msm_host);
2286 
2287 	msm_dsi_sfpb_config(msm_host, false);
2288 
2289 	DBG("-");
2290 
2291 	msm_host->power_on = false;
2292 
2293 unlock_ret:
2294 	mutex_unlock(&msm_host->dev_mutex);
2295 	return 0;
2296 }
2297 
2298 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2299 					struct drm_display_mode *mode)
2300 {
2301 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2302 
2303 	if (msm_host->mode) {
2304 		drm_mode_destroy(msm_host->dev, msm_host->mode);
2305 		msm_host->mode = NULL;
2306 	}
2307 
2308 	msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
2309 	if (!msm_host->mode) {
2310 		pr_err("%s: cannot duplicate mode\n", __func__);
2311 		return -ENOMEM;
2312 	}
2313 
2314 	return 0;
2315 }
2316 
2317 struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
2318 				unsigned long *panel_flags)
2319 {
2320 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2321 	struct drm_panel *panel;
2322 
2323 	panel = of_drm_find_panel(msm_host->device_node);
2324 	if (panel_flags)
2325 			*panel_flags = msm_host->mode_flags;
2326 
2327 	return panel;
2328 }
2329 
2330 struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
2331 {
2332 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2333 
2334 	return of_drm_find_bridge(msm_host->device_node);
2335 }
2336