xref: /openbmc/linux/drivers/gpu/drm/msm/dsi/dsi_host.c (revision 82003e04)
1 /*
2  * Copyright (c) 2015, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/gpio.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/interrupt.h>
20 #include <linux/of_device.h>
21 #include <linux/of_gpio.h>
22 #include <linux/of_irq.h>
23 #include <linux/pinctrl/consumer.h>
24 #include <linux/of_graph.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/spinlock.h>
27 #include <linux/mfd/syscon.h>
28 #include <linux/regmap.h>
29 #include <video/mipi_display.h>
30 
31 #include "dsi.h"
32 #include "dsi.xml.h"
33 #include "sfpb.xml.h"
34 #include "dsi_cfg.h"
35 
36 static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
37 {
38 	u32 ver;
39 
40 	if (!major || !minor)
41 		return -EINVAL;
42 
43 	/*
44 	 * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0
45 	 * makes all other registers 4-byte shifted down.
46 	 *
47 	 * In order to identify between DSI6G(v3) and beyond, and DSIv2 and
48 	 * older, we read the DSI_VERSION register without any shift(offset
49 	 * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In
50 	 * the case of DSI6G, this has to be zero (the offset points to a
51 	 * scratch register which we never touch)
52 	 */
53 
54 	ver = msm_readl(base + REG_DSI_VERSION);
55 	if (ver) {
56 		/* older dsi host, there is no register shift */
57 		ver = FIELD(ver, DSI_VERSION_MAJOR);
58 		if (ver <= MSM_DSI_VER_MAJOR_V2) {
59 			/* old versions */
60 			*major = ver;
61 			*minor = 0;
62 			return 0;
63 		} else {
64 			return -EINVAL;
65 		}
66 	} else {
67 		/*
68 		 * newer host, offset 0 has 6G_HW_VERSION, the rest of the
69 		 * registers are shifted down, read DSI_VERSION again with
70 		 * the shifted offset
71 		 */
72 		ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION);
73 		ver = FIELD(ver, DSI_VERSION_MAJOR);
74 		if (ver == MSM_DSI_VER_MAJOR_6G) {
75 			/* 6G version */
76 			*major = ver;
77 			*minor = msm_readl(base + REG_DSI_6G_HW_VERSION);
78 			return 0;
79 		} else {
80 			return -EINVAL;
81 		}
82 	}
83 }
84 
85 #define DSI_ERR_STATE_ACK			0x0000
86 #define DSI_ERR_STATE_TIMEOUT			0x0001
87 #define DSI_ERR_STATE_DLN0_PHY			0x0002
88 #define DSI_ERR_STATE_FIFO			0x0004
89 #define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW	0x0008
90 #define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION	0x0010
91 #define DSI_ERR_STATE_PLL_UNLOCKED		0x0020
92 
93 #define DSI_CLK_CTRL_ENABLE_CLKS	\
94 		(DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \
95 		DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \
96 		DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \
97 		DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK)
98 
99 struct msm_dsi_host {
100 	struct mipi_dsi_host base;
101 
102 	struct platform_device *pdev;
103 	struct drm_device *dev;
104 
105 	int id;
106 
107 	void __iomem *ctrl_base;
108 	struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
109 
110 	struct clk *bus_clks[DSI_BUS_CLK_MAX];
111 
112 	struct clk *byte_clk;
113 	struct clk *esc_clk;
114 	struct clk *pixel_clk;
115 	struct clk *byte_clk_src;
116 	struct clk *pixel_clk_src;
117 
118 	u32 byte_clk_rate;
119 	u32 esc_clk_rate;
120 
121 	/* DSI v2 specific clocks */
122 	struct clk *src_clk;
123 	struct clk *esc_clk_src;
124 	struct clk *dsi_clk_src;
125 
126 	u32 src_clk_rate;
127 
128 	struct gpio_desc *disp_en_gpio;
129 	struct gpio_desc *te_gpio;
130 
131 	const struct msm_dsi_cfg_handler *cfg_hnd;
132 
133 	struct completion dma_comp;
134 	struct completion video_comp;
135 	struct mutex dev_mutex;
136 	struct mutex cmd_mutex;
137 	struct mutex clk_mutex;
138 	spinlock_t intr_lock; /* Protect interrupt ctrl register */
139 
140 	u32 err_work_state;
141 	struct work_struct err_work;
142 	struct work_struct hpd_work;
143 	struct workqueue_struct *workqueue;
144 
145 	/* DSI 6G TX buffer*/
146 	struct drm_gem_object *tx_gem_obj;
147 
148 	/* DSI v2 TX buffer */
149 	void *tx_buf;
150 	dma_addr_t tx_buf_paddr;
151 
152 	int tx_size;
153 
154 	u8 *rx_buf;
155 
156 	struct regmap *sfpb;
157 
158 	struct drm_display_mode *mode;
159 
160 	/* connected device info */
161 	struct device_node *device_node;
162 	unsigned int channel;
163 	unsigned int lanes;
164 	enum mipi_dsi_pixel_format format;
165 	unsigned long mode_flags;
166 
167 	/* lane data parsed via DT */
168 	int dlane_swap;
169 	int num_data_lanes;
170 
171 	u32 dma_cmd_ctrl_restore;
172 
173 	bool registered;
174 	bool power_on;
175 	int irq;
176 };
177 
178 static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt)
179 {
180 	switch (fmt) {
181 	case MIPI_DSI_FMT_RGB565:		return 16;
182 	case MIPI_DSI_FMT_RGB666_PACKED:	return 18;
183 	case MIPI_DSI_FMT_RGB666:
184 	case MIPI_DSI_FMT_RGB888:
185 	default:				return 24;
186 	}
187 }
188 
189 static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg)
190 {
191 	return msm_readl(msm_host->ctrl_base + reg);
192 }
193 static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data)
194 {
195 	msm_writel(data, msm_host->ctrl_base + reg);
196 }
197 
198 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host);
199 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host);
200 
201 static const struct msm_dsi_cfg_handler *dsi_get_config(
202 						struct msm_dsi_host *msm_host)
203 {
204 	const struct msm_dsi_cfg_handler *cfg_hnd = NULL;
205 	struct device *dev = &msm_host->pdev->dev;
206 	struct regulator *gdsc_reg;
207 	struct clk *ahb_clk;
208 	int ret;
209 	u32 major = 0, minor = 0;
210 
211 	gdsc_reg = regulator_get(dev, "gdsc");
212 	if (IS_ERR(gdsc_reg)) {
213 		pr_err("%s: cannot get gdsc\n", __func__);
214 		goto exit;
215 	}
216 
217 	ahb_clk = clk_get(dev, "iface_clk");
218 	if (IS_ERR(ahb_clk)) {
219 		pr_err("%s: cannot get interface clock\n", __func__);
220 		goto put_gdsc;
221 	}
222 
223 	ret = regulator_enable(gdsc_reg);
224 	if (ret) {
225 		pr_err("%s: unable to enable gdsc\n", __func__);
226 		goto put_clk;
227 	}
228 
229 	ret = clk_prepare_enable(ahb_clk);
230 	if (ret) {
231 		pr_err("%s: unable to enable ahb_clk\n", __func__);
232 		goto disable_gdsc;
233 	}
234 
235 	ret = dsi_get_version(msm_host->ctrl_base, &major, &minor);
236 	if (ret) {
237 		pr_err("%s: Invalid version\n", __func__);
238 		goto disable_clks;
239 	}
240 
241 	cfg_hnd = msm_dsi_cfg_get(major, minor);
242 
243 	DBG("%s: Version %x:%x\n", __func__, major, minor);
244 
245 disable_clks:
246 	clk_disable_unprepare(ahb_clk);
247 disable_gdsc:
248 	regulator_disable(gdsc_reg);
249 put_clk:
250 	clk_put(ahb_clk);
251 put_gdsc:
252 	regulator_put(gdsc_reg);
253 exit:
254 	return cfg_hnd;
255 }
256 
257 static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host)
258 {
259 	return container_of(host, struct msm_dsi_host, base);
260 }
261 
262 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host)
263 {
264 	struct regulator_bulk_data *s = msm_host->supplies;
265 	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
266 	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
267 	int i;
268 
269 	DBG("");
270 	for (i = num - 1; i >= 0; i--)
271 		if (regs[i].disable_load >= 0)
272 			regulator_set_load(s[i].consumer,
273 					   regs[i].disable_load);
274 
275 	regulator_bulk_disable(num, s);
276 }
277 
278 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host)
279 {
280 	struct regulator_bulk_data *s = msm_host->supplies;
281 	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
282 	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
283 	int ret, i;
284 
285 	DBG("");
286 	for (i = 0; i < num; i++) {
287 		if (regs[i].enable_load >= 0) {
288 			ret = regulator_set_load(s[i].consumer,
289 						 regs[i].enable_load);
290 			if (ret < 0) {
291 				pr_err("regulator %d set op mode failed, %d\n",
292 					i, ret);
293 				goto fail;
294 			}
295 		}
296 	}
297 
298 	ret = regulator_bulk_enable(num, s);
299 	if (ret < 0) {
300 		pr_err("regulator enable failed, %d\n", ret);
301 		goto fail;
302 	}
303 
304 	return 0;
305 
306 fail:
307 	for (i--; i >= 0; i--)
308 		regulator_set_load(s[i].consumer, regs[i].disable_load);
309 	return ret;
310 }
311 
312 static int dsi_regulator_init(struct msm_dsi_host *msm_host)
313 {
314 	struct regulator_bulk_data *s = msm_host->supplies;
315 	const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs;
316 	int num = msm_host->cfg_hnd->cfg->reg_cfg.num;
317 	int i, ret;
318 
319 	for (i = 0; i < num; i++)
320 		s[i].supply = regs[i].name;
321 
322 	ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s);
323 	if (ret < 0) {
324 		pr_err("%s: failed to init regulator, ret=%d\n",
325 						__func__, ret);
326 		return ret;
327 	}
328 
329 	return 0;
330 }
331 
332 static int dsi_clk_init(struct msm_dsi_host *msm_host)
333 {
334 	struct device *dev = &msm_host->pdev->dev;
335 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
336 	const struct msm_dsi_config *cfg = cfg_hnd->cfg;
337 	int i, ret = 0;
338 
339 	/* get bus clocks */
340 	for (i = 0; i < cfg->num_bus_clks; i++) {
341 		msm_host->bus_clks[i] = devm_clk_get(dev,
342 						cfg->bus_clk_names[i]);
343 		if (IS_ERR(msm_host->bus_clks[i])) {
344 			ret = PTR_ERR(msm_host->bus_clks[i]);
345 			pr_err("%s: Unable to get %s, ret = %d\n",
346 				__func__, cfg->bus_clk_names[i], ret);
347 			goto exit;
348 		}
349 	}
350 
351 	/* get link and source clocks */
352 	msm_host->byte_clk = devm_clk_get(dev, "byte_clk");
353 	if (IS_ERR(msm_host->byte_clk)) {
354 		ret = PTR_ERR(msm_host->byte_clk);
355 		pr_err("%s: can't find dsi_byte_clk. ret=%d\n",
356 			__func__, ret);
357 		msm_host->byte_clk = NULL;
358 		goto exit;
359 	}
360 
361 	msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk");
362 	if (IS_ERR(msm_host->pixel_clk)) {
363 		ret = PTR_ERR(msm_host->pixel_clk);
364 		pr_err("%s: can't find dsi_pixel_clk. ret=%d\n",
365 			__func__, ret);
366 		msm_host->pixel_clk = NULL;
367 		goto exit;
368 	}
369 
370 	msm_host->esc_clk = devm_clk_get(dev, "core_clk");
371 	if (IS_ERR(msm_host->esc_clk)) {
372 		ret = PTR_ERR(msm_host->esc_clk);
373 		pr_err("%s: can't find dsi_esc_clk. ret=%d\n",
374 			__func__, ret);
375 		msm_host->esc_clk = NULL;
376 		goto exit;
377 	}
378 
379 	msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
380 	if (!msm_host->byte_clk_src) {
381 		ret = -ENODEV;
382 		pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
383 		goto exit;
384 	}
385 
386 	msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk);
387 	if (!msm_host->pixel_clk_src) {
388 		ret = -ENODEV;
389 		pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
390 		goto exit;
391 	}
392 
393 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
394 		msm_host->src_clk = devm_clk_get(dev, "src_clk");
395 		if (IS_ERR(msm_host->src_clk)) {
396 			ret = PTR_ERR(msm_host->src_clk);
397 			pr_err("%s: can't find dsi_src_clk. ret=%d\n",
398 				__func__, ret);
399 			msm_host->src_clk = NULL;
400 			goto exit;
401 		}
402 
403 		msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
404 		if (!msm_host->esc_clk_src) {
405 			ret = -ENODEV;
406 			pr_err("%s: can't get esc_clk_src. ret=%d\n",
407 				__func__, ret);
408 			goto exit;
409 		}
410 
411 		msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
412 		if (!msm_host->dsi_clk_src) {
413 			ret = -ENODEV;
414 			pr_err("%s: can't get dsi_clk_src. ret=%d\n",
415 				__func__, ret);
416 		}
417 	}
418 exit:
419 	return ret;
420 }
421 
422 static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
423 {
424 	const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
425 	int i, ret;
426 
427 	DBG("id=%d", msm_host->id);
428 
429 	for (i = 0; i < cfg->num_bus_clks; i++) {
430 		ret = clk_prepare_enable(msm_host->bus_clks[i]);
431 		if (ret) {
432 			pr_err("%s: failed to enable bus clock %d ret %d\n",
433 				__func__, i, ret);
434 			goto err;
435 		}
436 	}
437 
438 	return 0;
439 err:
440 	for (; i > 0; i--)
441 		clk_disable_unprepare(msm_host->bus_clks[i]);
442 
443 	return ret;
444 }
445 
446 static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
447 {
448 	const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
449 	int i;
450 
451 	DBG("");
452 
453 	for (i = cfg->num_bus_clks - 1; i >= 0; i--)
454 		clk_disable_unprepare(msm_host->bus_clks[i]);
455 }
456 
457 static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
458 {
459 	int ret;
460 
461 	DBG("Set clk rates: pclk=%d, byteclk=%d",
462 		msm_host->mode->clock, msm_host->byte_clk_rate);
463 
464 	ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
465 	if (ret) {
466 		pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
467 		goto error;
468 	}
469 
470 	ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
471 	if (ret) {
472 		pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
473 		goto error;
474 	}
475 
476 	ret = clk_prepare_enable(msm_host->esc_clk);
477 	if (ret) {
478 		pr_err("%s: Failed to enable dsi esc clk\n", __func__);
479 		goto error;
480 	}
481 
482 	ret = clk_prepare_enable(msm_host->byte_clk);
483 	if (ret) {
484 		pr_err("%s: Failed to enable dsi byte clk\n", __func__);
485 		goto byte_clk_err;
486 	}
487 
488 	ret = clk_prepare_enable(msm_host->pixel_clk);
489 	if (ret) {
490 		pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
491 		goto pixel_clk_err;
492 	}
493 
494 	return 0;
495 
496 pixel_clk_err:
497 	clk_disable_unprepare(msm_host->byte_clk);
498 byte_clk_err:
499 	clk_disable_unprepare(msm_host->esc_clk);
500 error:
501 	return ret;
502 }
503 
504 static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
505 {
506 	int ret;
507 
508 	DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
509 		msm_host->mode->clock, msm_host->byte_clk_rate,
510 		msm_host->esc_clk_rate, msm_host->src_clk_rate);
511 
512 	ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
513 	if (ret) {
514 		pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret);
515 		goto error;
516 	}
517 
518 	ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate);
519 	if (ret) {
520 		pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret);
521 		goto error;
522 	}
523 
524 	ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate);
525 	if (ret) {
526 		pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret);
527 		goto error;
528 	}
529 
530 	ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
531 	if (ret) {
532 		pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
533 		goto error;
534 	}
535 
536 	ret = clk_prepare_enable(msm_host->byte_clk);
537 	if (ret) {
538 		pr_err("%s: Failed to enable dsi byte clk\n", __func__);
539 		goto error;
540 	}
541 
542 	ret = clk_prepare_enable(msm_host->esc_clk);
543 	if (ret) {
544 		pr_err("%s: Failed to enable dsi esc clk\n", __func__);
545 		goto esc_clk_err;
546 	}
547 
548 	ret = clk_prepare_enable(msm_host->src_clk);
549 	if (ret) {
550 		pr_err("%s: Failed to enable dsi src clk\n", __func__);
551 		goto src_clk_err;
552 	}
553 
554 	ret = clk_prepare_enable(msm_host->pixel_clk);
555 	if (ret) {
556 		pr_err("%s: Failed to enable dsi pixel clk\n", __func__);
557 		goto pixel_clk_err;
558 	}
559 
560 	return 0;
561 
562 pixel_clk_err:
563 	clk_disable_unprepare(msm_host->src_clk);
564 src_clk_err:
565 	clk_disable_unprepare(msm_host->esc_clk);
566 esc_clk_err:
567 	clk_disable_unprepare(msm_host->byte_clk);
568 error:
569 	return ret;
570 }
571 
572 static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
573 {
574 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
575 
576 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
577 		return dsi_link_clk_enable_6g(msm_host);
578 	else
579 		return dsi_link_clk_enable_v2(msm_host);
580 }
581 
582 static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
583 {
584 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
585 
586 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
587 		clk_disable_unprepare(msm_host->esc_clk);
588 		clk_disable_unprepare(msm_host->pixel_clk);
589 		clk_disable_unprepare(msm_host->byte_clk);
590 	} else {
591 		clk_disable_unprepare(msm_host->pixel_clk);
592 		clk_disable_unprepare(msm_host->src_clk);
593 		clk_disable_unprepare(msm_host->esc_clk);
594 		clk_disable_unprepare(msm_host->byte_clk);
595 	}
596 }
597 
598 static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable)
599 {
600 	int ret = 0;
601 
602 	mutex_lock(&msm_host->clk_mutex);
603 	if (enable) {
604 		ret = dsi_bus_clk_enable(msm_host);
605 		if (ret) {
606 			pr_err("%s: Can not enable bus clk, %d\n",
607 				__func__, ret);
608 			goto unlock_ret;
609 		}
610 		ret = dsi_link_clk_enable(msm_host);
611 		if (ret) {
612 			pr_err("%s: Can not enable link clk, %d\n",
613 				__func__, ret);
614 			dsi_bus_clk_disable(msm_host);
615 			goto unlock_ret;
616 		}
617 	} else {
618 		dsi_link_clk_disable(msm_host);
619 		dsi_bus_clk_disable(msm_host);
620 	}
621 
622 unlock_ret:
623 	mutex_unlock(&msm_host->clk_mutex);
624 	return ret;
625 }
626 
627 static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
628 {
629 	struct drm_display_mode *mode = msm_host->mode;
630 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
631 	u8 lanes = msm_host->lanes;
632 	u32 bpp = dsi_get_bpp(msm_host->format);
633 	u32 pclk_rate;
634 
635 	if (!mode) {
636 		pr_err("%s: mode not set\n", __func__);
637 		return -EINVAL;
638 	}
639 
640 	pclk_rate = mode->clock * 1000;
641 	if (lanes > 0) {
642 		msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
643 	} else {
644 		pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
645 		msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
646 	}
647 
648 	DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
649 
650 	msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
651 
652 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
653 		unsigned int esc_mhz, esc_div;
654 		unsigned long byte_mhz;
655 
656 		msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
657 
658 		/*
659 		 * esc clock is byte clock followed by a 4 bit divider,
660 		 * we need to find an escape clock frequency within the
661 		 * mipi DSI spec range within the maximum divider limit
662 		 * We iterate here between an escape clock frequencey
663 		 * between 20 Mhz to 5 Mhz and pick up the first one
664 		 * that can be supported by our divider
665 		 */
666 
667 		byte_mhz = msm_host->byte_clk_rate / 1000000;
668 
669 		for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
670 			esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
671 
672 			/*
673 			 * TODO: Ideally, we shouldn't know what sort of divider
674 			 * is available in mmss_cc, we're just assuming that
675 			 * it'll always be a 4 bit divider. Need to come up with
676 			 * a better way here.
677 			 */
678 			if (esc_div >= 1 && esc_div <= 16)
679 				break;
680 		}
681 
682 		if (esc_mhz < 5)
683 			return -EINVAL;
684 
685 		msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
686 
687 		DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
688 			msm_host->src_clk_rate);
689 	}
690 
691 	return 0;
692 }
693 
694 static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host)
695 {
696 	DBG("");
697 	dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET);
698 	/* Make sure fully reset */
699 	wmb();
700 	udelay(1000);
701 	dsi_write(msm_host, REG_DSI_PHY_RESET, 0);
702 	udelay(100);
703 }
704 
705 static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable)
706 {
707 	u32 intr;
708 	unsigned long flags;
709 
710 	spin_lock_irqsave(&msm_host->intr_lock, flags);
711 	intr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
712 
713 	if (enable)
714 		intr |= mask;
715 	else
716 		intr &= ~mask;
717 
718 	DBG("intr=%x enable=%d", intr, enable);
719 
720 	dsi_write(msm_host, REG_DSI_INTR_CTRL, intr);
721 	spin_unlock_irqrestore(&msm_host->intr_lock, flags);
722 }
723 
724 static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags)
725 {
726 	if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
727 		return BURST_MODE;
728 	else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
729 		return NON_BURST_SYNCH_PULSE;
730 
731 	return NON_BURST_SYNCH_EVENT;
732 }
733 
734 static inline enum dsi_vid_dst_format dsi_get_vid_fmt(
735 				const enum mipi_dsi_pixel_format mipi_fmt)
736 {
737 	switch (mipi_fmt) {
738 	case MIPI_DSI_FMT_RGB888:	return VID_DST_FORMAT_RGB888;
739 	case MIPI_DSI_FMT_RGB666:	return VID_DST_FORMAT_RGB666_LOOSE;
740 	case MIPI_DSI_FMT_RGB666_PACKED:	return VID_DST_FORMAT_RGB666;
741 	case MIPI_DSI_FMT_RGB565:	return VID_DST_FORMAT_RGB565;
742 	default:			return VID_DST_FORMAT_RGB888;
743 	}
744 }
745 
746 static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
747 				const enum mipi_dsi_pixel_format mipi_fmt)
748 {
749 	switch (mipi_fmt) {
750 	case MIPI_DSI_FMT_RGB888:	return CMD_DST_FORMAT_RGB888;
751 	case MIPI_DSI_FMT_RGB666_PACKED:
752 	case MIPI_DSI_FMT_RGB666:	return VID_DST_FORMAT_RGB666;
753 	case MIPI_DSI_FMT_RGB565:	return CMD_DST_FORMAT_RGB565;
754 	default:			return CMD_DST_FORMAT_RGB888;
755 	}
756 }
757 
758 static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
759 				u32 clk_pre, u32 clk_post)
760 {
761 	u32 flags = msm_host->mode_flags;
762 	enum mipi_dsi_pixel_format mipi_fmt = msm_host->format;
763 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
764 	u32 data = 0;
765 
766 	if (!enable) {
767 		dsi_write(msm_host, REG_DSI_CTRL, 0);
768 		return;
769 	}
770 
771 	if (flags & MIPI_DSI_MODE_VIDEO) {
772 		if (flags & MIPI_DSI_MODE_VIDEO_HSE)
773 			data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE;
774 		if (flags & MIPI_DSI_MODE_VIDEO_HFP)
775 			data |= DSI_VID_CFG0_HFP_POWER_STOP;
776 		if (flags & MIPI_DSI_MODE_VIDEO_HBP)
777 			data |= DSI_VID_CFG0_HBP_POWER_STOP;
778 		if (flags & MIPI_DSI_MODE_VIDEO_HSA)
779 			data |= DSI_VID_CFG0_HSA_POWER_STOP;
780 		/* Always set low power stop mode for BLLP
781 		 * to let command engine send packets
782 		 */
783 		data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP |
784 			DSI_VID_CFG0_BLLP_POWER_STOP;
785 		data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
786 		data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
787 		data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
788 		dsi_write(msm_host, REG_DSI_VID_CFG0, data);
789 
790 		/* Do not swap RGB colors */
791 		data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB);
792 		dsi_write(msm_host, REG_DSI_VID_CFG1, 0);
793 	} else {
794 		/* Do not swap RGB colors */
795 		data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB);
796 		data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt));
797 		dsi_write(msm_host, REG_DSI_CMD_CFG0, data);
798 
799 		data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) |
800 			DSI_CMD_CFG1_WR_MEM_CONTINUE(
801 					MIPI_DCS_WRITE_MEMORY_CONTINUE);
802 		/* Always insert DCS command */
803 		data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND;
804 		dsi_write(msm_host, REG_DSI_CMD_CFG1, data);
805 	}
806 
807 	dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL,
808 			DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER |
809 			DSI_CMD_DMA_CTRL_LOW_POWER);
810 
811 	data = 0;
812 	/* Always assume dedicated TE pin */
813 	data |= DSI_TRIG_CTRL_TE;
814 	data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE);
815 	data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW);
816 	data |= DSI_TRIG_CTRL_STREAM(msm_host->channel);
817 	if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
818 		(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2))
819 		data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME;
820 	dsi_write(msm_host, REG_DSI_TRIG_CTRL, data);
821 
822 	data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) |
823 		DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre);
824 	dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data);
825 
826 	data = 0;
827 	if (!(flags & MIPI_DSI_MODE_EOT_PACKET))
828 		data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND;
829 	dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data);
830 
831 	/* allow only ack-err-status to generate interrupt */
832 	dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0);
833 
834 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
835 
836 	dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
837 
838 	data = DSI_CTRL_CLK_EN;
839 
840 	DBG("lane number=%d", msm_host->lanes);
841 	data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
842 
843 	dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
844 		  DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
845 
846 	if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
847 		dsi_write(msm_host, REG_DSI_LANE_CTRL,
848 			DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
849 
850 	data |= DSI_CTRL_ENABLE;
851 
852 	dsi_write(msm_host, REG_DSI_CTRL, data);
853 }
854 
855 static void dsi_timing_setup(struct msm_dsi_host *msm_host)
856 {
857 	struct drm_display_mode *mode = msm_host->mode;
858 	u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
859 	u32 h_total = mode->htotal;
860 	u32 v_total = mode->vtotal;
861 	u32 hs_end = mode->hsync_end - mode->hsync_start;
862 	u32 vs_end = mode->vsync_end - mode->vsync_start;
863 	u32 ha_start = h_total - mode->hsync_start;
864 	u32 ha_end = ha_start + mode->hdisplay;
865 	u32 va_start = v_total - mode->vsync_start;
866 	u32 va_end = va_start + mode->vdisplay;
867 	u32 wc;
868 
869 	DBG("");
870 
871 	if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
872 		dsi_write(msm_host, REG_DSI_ACTIVE_H,
873 			DSI_ACTIVE_H_START(ha_start) |
874 			DSI_ACTIVE_H_END(ha_end));
875 		dsi_write(msm_host, REG_DSI_ACTIVE_V,
876 			DSI_ACTIVE_V_START(va_start) |
877 			DSI_ACTIVE_V_END(va_end));
878 		dsi_write(msm_host, REG_DSI_TOTAL,
879 			DSI_TOTAL_H_TOTAL(h_total - 1) |
880 			DSI_TOTAL_V_TOTAL(v_total - 1));
881 
882 		dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC,
883 			DSI_ACTIVE_HSYNC_START(hs_start) |
884 			DSI_ACTIVE_HSYNC_END(hs_end));
885 		dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0);
886 		dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS,
887 			DSI_ACTIVE_VSYNC_VPOS_START(vs_start) |
888 			DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
889 	} else {		/* command mode */
890 		/* image data and 1 byte write_memory_start cmd */
891 		wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
892 
893 		dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
894 			DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
895 			DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL(
896 					msm_host->channel) |
897 			DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE(
898 					MIPI_DSI_DCS_LONG_WRITE));
899 
900 		dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
901 			DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
902 			DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
903 	}
904 }
905 
906 static void dsi_sw_reset(struct msm_dsi_host *msm_host)
907 {
908 	dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
909 	wmb(); /* clocks need to be enabled before reset */
910 
911 	dsi_write(msm_host, REG_DSI_RESET, 1);
912 	wmb(); /* make sure reset happen */
913 	dsi_write(msm_host, REG_DSI_RESET, 0);
914 }
915 
916 static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
917 					bool video_mode, bool enable)
918 {
919 	u32 dsi_ctrl;
920 
921 	dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL);
922 
923 	if (!enable) {
924 		dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN |
925 				DSI_CTRL_CMD_MODE_EN);
926 		dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE |
927 					DSI_IRQ_MASK_VIDEO_DONE, 0);
928 	} else {
929 		if (video_mode) {
930 			dsi_ctrl |= DSI_CTRL_VID_MODE_EN;
931 		} else {		/* command mode */
932 			dsi_ctrl |= DSI_CTRL_CMD_MODE_EN;
933 			dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1);
934 		}
935 		dsi_ctrl |= DSI_CTRL_ENABLE;
936 	}
937 
938 	dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl);
939 }
940 
941 static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
942 {
943 	u32 data;
944 
945 	data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL);
946 
947 	if (mode == 0)
948 		data &= ~DSI_CMD_DMA_CTRL_LOW_POWER;
949 	else
950 		data |= DSI_CMD_DMA_CTRL_LOW_POWER;
951 
952 	dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data);
953 }
954 
955 static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
956 {
957 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
958 
959 	reinit_completion(&msm_host->video_comp);
960 
961 	wait_for_completion_timeout(&msm_host->video_comp,
962 			msecs_to_jiffies(70));
963 
964 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
965 }
966 
967 static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
968 {
969 	if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
970 		return;
971 
972 	if (msm_host->power_on) {
973 		dsi_wait4video_done(msm_host);
974 		/* delay 4 ms to skip BLLP */
975 		usleep_range(2000, 4000);
976 	}
977 }
978 
979 /* dsi_cmd */
980 static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
981 {
982 	struct drm_device *dev = msm_host->dev;
983 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
984 	int ret;
985 	u32 iova;
986 
987 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
988 		mutex_lock(&dev->struct_mutex);
989 		msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
990 		if (IS_ERR(msm_host->tx_gem_obj)) {
991 			ret = PTR_ERR(msm_host->tx_gem_obj);
992 			pr_err("%s: failed to allocate gem, %d\n",
993 				__func__, ret);
994 			msm_host->tx_gem_obj = NULL;
995 			mutex_unlock(&dev->struct_mutex);
996 			return ret;
997 		}
998 
999 		ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
1000 		mutex_unlock(&dev->struct_mutex);
1001 		if (ret) {
1002 			pr_err("%s: failed to get iova, %d\n", __func__, ret);
1003 			return ret;
1004 		}
1005 
1006 		if (iova & 0x07) {
1007 			pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
1008 			return -EINVAL;
1009 		}
1010 
1011 		msm_host->tx_size = msm_host->tx_gem_obj->size;
1012 	} else {
1013 		msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
1014 					&msm_host->tx_buf_paddr, GFP_KERNEL);
1015 		if (!msm_host->tx_buf) {
1016 			ret = -ENOMEM;
1017 			pr_err("%s: failed to allocate tx buf, %d\n",
1018 				__func__, ret);
1019 			return ret;
1020 		}
1021 
1022 		msm_host->tx_size = size;
1023 	}
1024 
1025 	return 0;
1026 }
1027 
1028 static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
1029 {
1030 	struct drm_device *dev = msm_host->dev;
1031 
1032 	if (msm_host->tx_gem_obj) {
1033 		msm_gem_put_iova(msm_host->tx_gem_obj, 0);
1034 		mutex_lock(&dev->struct_mutex);
1035 		msm_gem_free_object(msm_host->tx_gem_obj);
1036 		msm_host->tx_gem_obj = NULL;
1037 		mutex_unlock(&dev->struct_mutex);
1038 	}
1039 
1040 	if (msm_host->tx_buf)
1041 		dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf,
1042 			msm_host->tx_buf_paddr);
1043 }
1044 
1045 /*
1046  * prepare cmd buffer to be txed
1047  */
1048 static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
1049 			   const struct mipi_dsi_msg *msg)
1050 {
1051 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1052 	struct mipi_dsi_packet packet;
1053 	int len;
1054 	int ret;
1055 	u8 *data;
1056 
1057 	ret = mipi_dsi_create_packet(&packet, msg);
1058 	if (ret) {
1059 		pr_err("%s: create packet failed, %d\n", __func__, ret);
1060 		return ret;
1061 	}
1062 	len = (packet.size + 3) & (~0x3);
1063 
1064 	if (len > msm_host->tx_size) {
1065 		pr_err("%s: packet size is too big\n", __func__);
1066 		return -EINVAL;
1067 	}
1068 
1069 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
1070 		data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
1071 		if (IS_ERR(data)) {
1072 			ret = PTR_ERR(data);
1073 			pr_err("%s: get vaddr failed, %d\n", __func__, ret);
1074 			return ret;
1075 		}
1076 	} else {
1077 		data = msm_host->tx_buf;
1078 	}
1079 
1080 	/* MSM specific command format in memory */
1081 	data[0] = packet.header[1];
1082 	data[1] = packet.header[2];
1083 	data[2] = packet.header[0];
1084 	data[3] = BIT(7); /* Last packet */
1085 	if (mipi_dsi_packet_format_is_long(msg->type))
1086 		data[3] |= BIT(6);
1087 	if (msg->rx_buf && msg->rx_len)
1088 		data[3] |= BIT(5);
1089 
1090 	/* Long packet */
1091 	if (packet.payload && packet.payload_length)
1092 		memcpy(data + 4, packet.payload, packet.payload_length);
1093 
1094 	/* Append 0xff to the end */
1095 	if (packet.size < len)
1096 		memset(data + packet.size, 0xff, len - packet.size);
1097 
1098 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
1099 		msm_gem_put_vaddr(msm_host->tx_gem_obj);
1100 
1101 	return len;
1102 }
1103 
1104 /*
1105  * dsi_short_read1_resp: 1 parameter
1106  */
1107 static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1108 {
1109 	u8 *data = msg->rx_buf;
1110 	if (data && (msg->rx_len >= 1)) {
1111 		*data = buf[1]; /* strip out dcs type */
1112 		return 1;
1113 	} else {
1114 		pr_err("%s: read data does not match with rx_buf len %zu\n",
1115 			__func__, msg->rx_len);
1116 		return -EINVAL;
1117 	}
1118 }
1119 
1120 /*
1121  * dsi_short_read2_resp: 2 parameter
1122  */
1123 static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1124 {
1125 	u8 *data = msg->rx_buf;
1126 	if (data && (msg->rx_len >= 2)) {
1127 		data[0] = buf[1]; /* strip out dcs type */
1128 		data[1] = buf[2];
1129 		return 2;
1130 	} else {
1131 		pr_err("%s: read data does not match with rx_buf len %zu\n",
1132 			__func__, msg->rx_len);
1133 		return -EINVAL;
1134 	}
1135 }
1136 
1137 static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
1138 {
1139 	/* strip out 4 byte dcs header */
1140 	if (msg->rx_buf && msg->rx_len)
1141 		memcpy(msg->rx_buf, buf + 4, msg->rx_len);
1142 
1143 	return msg->rx_len;
1144 }
1145 
1146 static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
1147 {
1148 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1149 	int ret;
1150 	u32 dma_base;
1151 	bool triggered;
1152 
1153 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
1154 		ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &dma_base);
1155 		if (ret) {
1156 			pr_err("%s: failed to get iova: %d\n", __func__, ret);
1157 			return ret;
1158 		}
1159 	} else {
1160 		dma_base = msm_host->tx_buf_paddr;
1161 	}
1162 
1163 	reinit_completion(&msm_host->dma_comp);
1164 
1165 	dsi_wait4video_eng_busy(msm_host);
1166 
1167 	triggered = msm_dsi_manager_cmd_xfer_trigger(
1168 						msm_host->id, dma_base, len);
1169 	if (triggered) {
1170 		ret = wait_for_completion_timeout(&msm_host->dma_comp,
1171 					msecs_to_jiffies(200));
1172 		DBG("ret=%d", ret);
1173 		if (ret == 0)
1174 			ret = -ETIMEDOUT;
1175 		else
1176 			ret = len;
1177 	} else
1178 		ret = len;
1179 
1180 	return ret;
1181 }
1182 
1183 static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
1184 			u8 *buf, int rx_byte, int pkt_size)
1185 {
1186 	u32 *lp, *temp, data;
1187 	int i, j = 0, cnt;
1188 	u32 read_cnt;
1189 	u8 reg[16];
1190 	int repeated_bytes = 0;
1191 	int buf_offset = buf - msm_host->rx_buf;
1192 
1193 	lp = (u32 *)buf;
1194 	temp = (u32 *)reg;
1195 	cnt = (rx_byte + 3) >> 2;
1196 	if (cnt > 4)
1197 		cnt = 4; /* 4 x 32 bits registers only */
1198 
1199 	if (rx_byte == 4)
1200 		read_cnt = 4;
1201 	else
1202 		read_cnt = pkt_size + 6;
1203 
1204 	/*
1205 	 * In case of multiple reads from the panel, after the first read, there
1206 	 * is possibility that there are some bytes in the payload repeating in
1207 	 * the RDBK_DATA registers. Since we read all the parameters from the
1208 	 * panel right from the first byte for every pass. We need to skip the
1209 	 * repeating bytes and then append the new parameters to the rx buffer.
1210 	 */
1211 	if (read_cnt > 16) {
1212 		int bytes_shifted;
1213 		/* Any data more than 16 bytes will be shifted out.
1214 		 * The temp read buffer should already contain these bytes.
1215 		 * The remaining bytes in read buffer are the repeated bytes.
1216 		 */
1217 		bytes_shifted = read_cnt - 16;
1218 		repeated_bytes = buf_offset - bytes_shifted;
1219 	}
1220 
1221 	for (i = cnt - 1; i >= 0; i--) {
1222 		data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i));
1223 		*temp++ = ntohl(data); /* to host byte order */
1224 		DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data));
1225 	}
1226 
1227 	for (i = repeated_bytes; i < 16; i++)
1228 		buf[j++] = reg[i];
1229 
1230 	return j;
1231 }
1232 
1233 static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host,
1234 				const struct mipi_dsi_msg *msg)
1235 {
1236 	int len, ret;
1237 	int bllp_len = msm_host->mode->hdisplay *
1238 			dsi_get_bpp(msm_host->format) / 8;
1239 
1240 	len = dsi_cmd_dma_add(msm_host, msg);
1241 	if (!len) {
1242 		pr_err("%s: failed to add cmd type = 0x%x\n",
1243 			__func__,  msg->type);
1244 		return -EINVAL;
1245 	}
1246 
1247 	/* for video mode, do not send cmds more than
1248 	* one pixel line, since it only transmit it
1249 	* during BLLP.
1250 	*/
1251 	/* TODO: if the command is sent in LP mode, the bit rate is only
1252 	 * half of esc clk rate. In this case, if the video is already
1253 	 * actively streaming, we need to check more carefully if the
1254 	 * command can be fit into one BLLP.
1255 	 */
1256 	if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) {
1257 		pr_err("%s: cmd cannot fit into BLLP period, len=%d\n",
1258 			__func__, len);
1259 		return -EINVAL;
1260 	}
1261 
1262 	ret = dsi_cmd_dma_tx(msm_host, len);
1263 	if (ret < len) {
1264 		pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n",
1265 			__func__, msg->type, (*(u8 *)(msg->tx_buf)), len);
1266 		return -ECOMM;
1267 	}
1268 
1269 	return len;
1270 }
1271 
1272 static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
1273 {
1274 	u32 data0, data1;
1275 
1276 	data0 = dsi_read(msm_host, REG_DSI_CTRL);
1277 	data1 = data0;
1278 	data1 &= ~DSI_CTRL_ENABLE;
1279 	dsi_write(msm_host, REG_DSI_CTRL, data1);
1280 	/*
1281 	 * dsi controller need to be disabled before
1282 	 * clocks turned on
1283 	 */
1284 	wmb();
1285 
1286 	dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS);
1287 	wmb();	/* make sure clocks enabled */
1288 
1289 	/* dsi controller can only be reset while clocks are running */
1290 	dsi_write(msm_host, REG_DSI_RESET, 1);
1291 	wmb();	/* make sure reset happen */
1292 	dsi_write(msm_host, REG_DSI_RESET, 0);
1293 	wmb();	/* controller out of reset */
1294 	dsi_write(msm_host, REG_DSI_CTRL, data0);
1295 	wmb();	/* make sure dsi controller enabled again */
1296 }
1297 
1298 static void dsi_hpd_worker(struct work_struct *work)
1299 {
1300 	struct msm_dsi_host *msm_host =
1301 		container_of(work, struct msm_dsi_host, hpd_work);
1302 
1303 	drm_helper_hpd_irq_event(msm_host->dev);
1304 }
1305 
1306 static void dsi_err_worker(struct work_struct *work)
1307 {
1308 	struct msm_dsi_host *msm_host =
1309 		container_of(work, struct msm_dsi_host, err_work);
1310 	u32 status = msm_host->err_work_state;
1311 
1312 	pr_err_ratelimited("%s: status=%x\n", __func__, status);
1313 	if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
1314 		dsi_sw_reset_restore(msm_host);
1315 
1316 	/* It is safe to clear here because error irq is disabled. */
1317 	msm_host->err_work_state = 0;
1318 
1319 	/* enable dsi error interrupt */
1320 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1);
1321 }
1322 
1323 static void dsi_ack_err_status(struct msm_dsi_host *msm_host)
1324 {
1325 	u32 status;
1326 
1327 	status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS);
1328 
1329 	if (status) {
1330 		dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status);
1331 		/* Writing of an extra 0 needed to clear error bits */
1332 		dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0);
1333 		msm_host->err_work_state |= DSI_ERR_STATE_ACK;
1334 	}
1335 }
1336 
1337 static void dsi_timeout_status(struct msm_dsi_host *msm_host)
1338 {
1339 	u32 status;
1340 
1341 	status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS);
1342 
1343 	if (status) {
1344 		dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status);
1345 		msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT;
1346 	}
1347 }
1348 
1349 static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host)
1350 {
1351 	u32 status;
1352 
1353 	status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR);
1354 
1355 	if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC |
1356 			DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC |
1357 			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL |
1358 			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 |
1359 			DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) {
1360 		dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status);
1361 		msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY;
1362 	}
1363 }
1364 
1365 static void dsi_fifo_status(struct msm_dsi_host *msm_host)
1366 {
1367 	u32 status;
1368 
1369 	status = dsi_read(msm_host, REG_DSI_FIFO_STATUS);
1370 
1371 	/* fifo underflow, overflow */
1372 	if (status) {
1373 		dsi_write(msm_host, REG_DSI_FIFO_STATUS, status);
1374 		msm_host->err_work_state |= DSI_ERR_STATE_FIFO;
1375 		if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW)
1376 			msm_host->err_work_state |=
1377 					DSI_ERR_STATE_MDP_FIFO_UNDERFLOW;
1378 	}
1379 }
1380 
1381 static void dsi_status(struct msm_dsi_host *msm_host)
1382 {
1383 	u32 status;
1384 
1385 	status = dsi_read(msm_host, REG_DSI_STATUS0);
1386 
1387 	if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) {
1388 		dsi_write(msm_host, REG_DSI_STATUS0, status);
1389 		msm_host->err_work_state |=
1390 			DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION;
1391 	}
1392 }
1393 
1394 static void dsi_clk_status(struct msm_dsi_host *msm_host)
1395 {
1396 	u32 status;
1397 
1398 	status = dsi_read(msm_host, REG_DSI_CLK_STATUS);
1399 
1400 	if (status & DSI_CLK_STATUS_PLL_UNLOCKED) {
1401 		dsi_write(msm_host, REG_DSI_CLK_STATUS, status);
1402 		msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED;
1403 	}
1404 }
1405 
1406 static void dsi_error(struct msm_dsi_host *msm_host)
1407 {
1408 	/* disable dsi error interrupt */
1409 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0);
1410 
1411 	dsi_clk_status(msm_host);
1412 	dsi_fifo_status(msm_host);
1413 	dsi_ack_err_status(msm_host);
1414 	dsi_timeout_status(msm_host);
1415 	dsi_status(msm_host);
1416 	dsi_dln0_phy_err(msm_host);
1417 
1418 	queue_work(msm_host->workqueue, &msm_host->err_work);
1419 }
1420 
1421 static irqreturn_t dsi_host_irq(int irq, void *ptr)
1422 {
1423 	struct msm_dsi_host *msm_host = ptr;
1424 	u32 isr;
1425 	unsigned long flags;
1426 
1427 	if (!msm_host->ctrl_base)
1428 		return IRQ_HANDLED;
1429 
1430 	spin_lock_irqsave(&msm_host->intr_lock, flags);
1431 	isr = dsi_read(msm_host, REG_DSI_INTR_CTRL);
1432 	dsi_write(msm_host, REG_DSI_INTR_CTRL, isr);
1433 	spin_unlock_irqrestore(&msm_host->intr_lock, flags);
1434 
1435 	DBG("isr=0x%x, id=%d", isr, msm_host->id);
1436 
1437 	if (isr & DSI_IRQ_ERROR)
1438 		dsi_error(msm_host);
1439 
1440 	if (isr & DSI_IRQ_VIDEO_DONE)
1441 		complete(&msm_host->video_comp);
1442 
1443 	if (isr & DSI_IRQ_CMD_DMA_DONE)
1444 		complete(&msm_host->dma_comp);
1445 
1446 	return IRQ_HANDLED;
1447 }
1448 
1449 static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
1450 			struct device *panel_device)
1451 {
1452 	msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
1453 							 "disp-enable",
1454 							 GPIOD_OUT_LOW);
1455 	if (IS_ERR(msm_host->disp_en_gpio)) {
1456 		DBG("cannot get disp-enable-gpios %ld",
1457 				PTR_ERR(msm_host->disp_en_gpio));
1458 		return PTR_ERR(msm_host->disp_en_gpio);
1459 	}
1460 
1461 	msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
1462 								GPIOD_IN);
1463 	if (IS_ERR(msm_host->te_gpio)) {
1464 		DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
1465 		return PTR_ERR(msm_host->te_gpio);
1466 	}
1467 
1468 	return 0;
1469 }
1470 
1471 static int dsi_host_attach(struct mipi_dsi_host *host,
1472 					struct mipi_dsi_device *dsi)
1473 {
1474 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1475 	int ret;
1476 
1477 	if (dsi->lanes > msm_host->num_data_lanes)
1478 		return -EINVAL;
1479 
1480 	msm_host->channel = dsi->channel;
1481 	msm_host->lanes = dsi->lanes;
1482 	msm_host->format = dsi->format;
1483 	msm_host->mode_flags = dsi->mode_flags;
1484 
1485 	/* Some gpios defined in panel DT need to be controlled by host */
1486 	ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
1487 	if (ret)
1488 		return ret;
1489 
1490 	DBG("id=%d", msm_host->id);
1491 	if (msm_host->dev)
1492 		queue_work(msm_host->workqueue, &msm_host->hpd_work);
1493 
1494 	return 0;
1495 }
1496 
1497 static int dsi_host_detach(struct mipi_dsi_host *host,
1498 					struct mipi_dsi_device *dsi)
1499 {
1500 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1501 
1502 	msm_host->device_node = NULL;
1503 
1504 	DBG("id=%d", msm_host->id);
1505 	if (msm_host->dev)
1506 		queue_work(msm_host->workqueue, &msm_host->hpd_work);
1507 
1508 	return 0;
1509 }
1510 
1511 static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
1512 					const struct mipi_dsi_msg *msg)
1513 {
1514 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1515 	int ret;
1516 
1517 	if (!msg || !msm_host->power_on)
1518 		return -EINVAL;
1519 
1520 	mutex_lock(&msm_host->cmd_mutex);
1521 	ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg);
1522 	mutex_unlock(&msm_host->cmd_mutex);
1523 
1524 	return ret;
1525 }
1526 
1527 static struct mipi_dsi_host_ops dsi_host_ops = {
1528 	.attach = dsi_host_attach,
1529 	.detach = dsi_host_detach,
1530 	.transfer = dsi_host_transfer,
1531 };
1532 
1533 /*
1534  * List of supported physical to logical lane mappings.
1535  * For example, the 2nd entry represents the following mapping:
1536  *
1537  * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
1538  */
1539 static const int supported_data_lane_swaps[][4] = {
1540 	{ 0, 1, 2, 3 },
1541 	{ 3, 0, 1, 2 },
1542 	{ 2, 3, 0, 1 },
1543 	{ 1, 2, 3, 0 },
1544 	{ 0, 3, 2, 1 },
1545 	{ 1, 0, 3, 2 },
1546 	{ 2, 1, 0, 3 },
1547 	{ 3, 2, 1, 0 },
1548 };
1549 
1550 static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
1551 				    struct device_node *ep)
1552 {
1553 	struct device *dev = &msm_host->pdev->dev;
1554 	struct property *prop;
1555 	u32 lane_map[4];
1556 	int ret, i, len, num_lanes;
1557 
1558 	prop = of_find_property(ep, "data-lanes", &len);
1559 	if (!prop) {
1560 		dev_dbg(dev, "failed to find data lane mapping\n");
1561 		return -EINVAL;
1562 	}
1563 
1564 	num_lanes = len / sizeof(u32);
1565 
1566 	if (num_lanes < 1 || num_lanes > 4) {
1567 		dev_err(dev, "bad number of data lanes\n");
1568 		return -EINVAL;
1569 	}
1570 
1571 	msm_host->num_data_lanes = num_lanes;
1572 
1573 	ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
1574 					 num_lanes);
1575 	if (ret) {
1576 		dev_err(dev, "failed to read lane data\n");
1577 		return ret;
1578 	}
1579 
1580 	/*
1581 	 * compare DT specified physical-logical lane mappings with the ones
1582 	 * supported by hardware
1583 	 */
1584 	for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) {
1585 		const int *swap = supported_data_lane_swaps[i];
1586 		int j;
1587 
1588 		/*
1589 		 * the data-lanes array we get from DT has a logical->physical
1590 		 * mapping. The "data lane swap" register field represents
1591 		 * supported configurations in a physical->logical mapping.
1592 		 * Translate the DT mapping to what we understand and find a
1593 		 * configuration that works.
1594 		 */
1595 		for (j = 0; j < num_lanes; j++) {
1596 			if (lane_map[j] < 0 || lane_map[j] > 3)
1597 				dev_err(dev, "bad physical lane entry %u\n",
1598 					lane_map[j]);
1599 
1600 			if (swap[lane_map[j]] != j)
1601 				break;
1602 		}
1603 
1604 		if (j == num_lanes) {
1605 			msm_host->dlane_swap = i;
1606 			return 0;
1607 		}
1608 	}
1609 
1610 	return -EINVAL;
1611 }
1612 
1613 static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
1614 {
1615 	struct device *dev = &msm_host->pdev->dev;
1616 	struct device_node *np = dev->of_node;
1617 	struct device_node *endpoint, *device_node;
1618 	int ret;
1619 
1620 	/*
1621 	 * Get the endpoint of the output port of the DSI host. In our case,
1622 	 * this is mapped to port number with reg = 1. Don't return an error if
1623 	 * the remote endpoint isn't defined. It's possible that there is
1624 	 * nothing connected to the dsi output.
1625 	 */
1626 	endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
1627 	if (!endpoint) {
1628 		dev_dbg(dev, "%s: no endpoint\n", __func__);
1629 		return 0;
1630 	}
1631 
1632 	ret = dsi_host_parse_lane_data(msm_host, endpoint);
1633 	if (ret) {
1634 		dev_err(dev, "%s: invalid lane configuration %d\n",
1635 			__func__, ret);
1636 		goto err;
1637 	}
1638 
1639 	/* Get panel node from the output port's endpoint data */
1640 	device_node = of_graph_get_remote_port_parent(endpoint);
1641 	if (!device_node) {
1642 		dev_err(dev, "%s: no valid device\n", __func__);
1643 		ret = -ENODEV;
1644 		goto err;
1645 	}
1646 
1647 	msm_host->device_node = device_node;
1648 
1649 	if (of_property_read_bool(np, "syscon-sfpb")) {
1650 		msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
1651 					"syscon-sfpb");
1652 		if (IS_ERR(msm_host->sfpb)) {
1653 			dev_err(dev, "%s: failed to get sfpb regmap\n",
1654 				__func__);
1655 			ret = PTR_ERR(msm_host->sfpb);
1656 		}
1657 	}
1658 
1659 	of_node_put(device_node);
1660 
1661 err:
1662 	of_node_put(endpoint);
1663 
1664 	return ret;
1665 }
1666 
1667 static int dsi_host_get_id(struct msm_dsi_host *msm_host)
1668 {
1669 	struct platform_device *pdev = msm_host->pdev;
1670 	const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
1671 	struct resource *res;
1672 	int i;
1673 
1674 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
1675 	if (!res)
1676 		return -EINVAL;
1677 
1678 	for (i = 0; i < cfg->num_dsi; i++) {
1679 		if (cfg->io_start[i] == res->start)
1680 			return i;
1681 	}
1682 
1683 	return -EINVAL;
1684 }
1685 
1686 int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1687 {
1688 	struct msm_dsi_host *msm_host = NULL;
1689 	struct platform_device *pdev = msm_dsi->pdev;
1690 	int ret;
1691 
1692 	msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
1693 	if (!msm_host) {
1694 		pr_err("%s: FAILED: cannot alloc dsi host\n",
1695 		       __func__);
1696 		ret = -ENOMEM;
1697 		goto fail;
1698 	}
1699 
1700 	msm_host->pdev = pdev;
1701 
1702 	ret = dsi_host_parse_dt(msm_host);
1703 	if (ret) {
1704 		pr_err("%s: failed to parse dt\n", __func__);
1705 		goto fail;
1706 	}
1707 
1708 	msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL");
1709 	if (IS_ERR(msm_host->ctrl_base)) {
1710 		pr_err("%s: unable to map Dsi ctrl base\n", __func__);
1711 		ret = PTR_ERR(msm_host->ctrl_base);
1712 		goto fail;
1713 	}
1714 
1715 	msm_host->cfg_hnd = dsi_get_config(msm_host);
1716 	if (!msm_host->cfg_hnd) {
1717 		ret = -EINVAL;
1718 		pr_err("%s: get config failed\n", __func__);
1719 		goto fail;
1720 	}
1721 
1722 	msm_host->id = dsi_host_get_id(msm_host);
1723 	if (msm_host->id < 0) {
1724 		ret = msm_host->id;
1725 		pr_err("%s: unable to identify DSI host index\n", __func__);
1726 		goto fail;
1727 	}
1728 
1729 	/* fixup base address by io offset */
1730 	msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
1731 
1732 	ret = dsi_regulator_init(msm_host);
1733 	if (ret) {
1734 		pr_err("%s: regulator init failed\n", __func__);
1735 		goto fail;
1736 	}
1737 
1738 	ret = dsi_clk_init(msm_host);
1739 	if (ret) {
1740 		pr_err("%s: unable to initialize dsi clks\n", __func__);
1741 		goto fail;
1742 	}
1743 
1744 	msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
1745 	if (!msm_host->rx_buf) {
1746 		pr_err("%s: alloc rx temp buf failed\n", __func__);
1747 		goto fail;
1748 	}
1749 
1750 	init_completion(&msm_host->dma_comp);
1751 	init_completion(&msm_host->video_comp);
1752 	mutex_init(&msm_host->dev_mutex);
1753 	mutex_init(&msm_host->cmd_mutex);
1754 	mutex_init(&msm_host->clk_mutex);
1755 	spin_lock_init(&msm_host->intr_lock);
1756 
1757 	/* setup workqueue */
1758 	msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
1759 	INIT_WORK(&msm_host->err_work, dsi_err_worker);
1760 	INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
1761 
1762 	msm_dsi->host = &msm_host->base;
1763 	msm_dsi->id = msm_host->id;
1764 
1765 	DBG("Dsi Host %d initialized", msm_host->id);
1766 	return 0;
1767 
1768 fail:
1769 	return ret;
1770 }
1771 
1772 void msm_dsi_host_destroy(struct mipi_dsi_host *host)
1773 {
1774 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1775 
1776 	DBG("");
1777 	dsi_tx_buf_free(msm_host);
1778 	if (msm_host->workqueue) {
1779 		flush_workqueue(msm_host->workqueue);
1780 		destroy_workqueue(msm_host->workqueue);
1781 		msm_host->workqueue = NULL;
1782 	}
1783 
1784 	mutex_destroy(&msm_host->clk_mutex);
1785 	mutex_destroy(&msm_host->cmd_mutex);
1786 	mutex_destroy(&msm_host->dev_mutex);
1787 }
1788 
1789 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
1790 					struct drm_device *dev)
1791 {
1792 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1793 	struct platform_device *pdev = msm_host->pdev;
1794 	int ret;
1795 
1796 	msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1797 	if (msm_host->irq < 0) {
1798 		ret = msm_host->irq;
1799 		dev_err(dev->dev, "failed to get irq: %d\n", ret);
1800 		return ret;
1801 	}
1802 
1803 	ret = devm_request_irq(&pdev->dev, msm_host->irq,
1804 			dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
1805 			"dsi_isr", msm_host);
1806 	if (ret < 0) {
1807 		dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
1808 				msm_host->irq, ret);
1809 		return ret;
1810 	}
1811 
1812 	msm_host->dev = dev;
1813 	ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
1814 	if (ret) {
1815 		pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
1816 		return ret;
1817 	}
1818 
1819 	return 0;
1820 }
1821 
1822 int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
1823 {
1824 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1825 	int ret;
1826 
1827 	/* Register mipi dsi host */
1828 	if (!msm_host->registered) {
1829 		host->dev = &msm_host->pdev->dev;
1830 		host->ops = &dsi_host_ops;
1831 		ret = mipi_dsi_host_register(host);
1832 		if (ret)
1833 			return ret;
1834 
1835 		msm_host->registered = true;
1836 
1837 		/* If the panel driver has not been probed after host register,
1838 		 * we should defer the host's probe.
1839 		 * It makes sure panel is connected when fbcon detects
1840 		 * connector status and gets the proper display mode to
1841 		 * create framebuffer.
1842 		 * Don't try to defer if there is nothing connected to the dsi
1843 		 * output
1844 		 */
1845 		if (check_defer && msm_host->device_node) {
1846 			if (!of_drm_find_panel(msm_host->device_node))
1847 				if (!of_drm_find_bridge(msm_host->device_node))
1848 					return -EPROBE_DEFER;
1849 		}
1850 	}
1851 
1852 	return 0;
1853 }
1854 
1855 void msm_dsi_host_unregister(struct mipi_dsi_host *host)
1856 {
1857 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1858 
1859 	if (msm_host->registered) {
1860 		mipi_dsi_host_unregister(host);
1861 		host->dev = NULL;
1862 		host->ops = NULL;
1863 		msm_host->registered = false;
1864 	}
1865 }
1866 
1867 int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
1868 				const struct mipi_dsi_msg *msg)
1869 {
1870 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1871 
1872 	/* TODO: make sure dsi_cmd_mdp is idle.
1873 	 * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
1874 	 * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed.
1875 	 * How to handle the old versions? Wait for mdp cmd done?
1876 	 */
1877 
1878 	/*
1879 	 * mdss interrupt is generated in mdp core clock domain
1880 	 * mdp clock need to be enabled to receive dsi interrupt
1881 	 */
1882 	dsi_clk_ctrl(msm_host, 1);
1883 
1884 	/* TODO: vote for bus bandwidth */
1885 
1886 	if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1887 		dsi_set_tx_power_mode(0, msm_host);
1888 
1889 	msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL);
1890 	dsi_write(msm_host, REG_DSI_CTRL,
1891 		msm_host->dma_cmd_ctrl_restore |
1892 		DSI_CTRL_CMD_MODE_EN |
1893 		DSI_CTRL_ENABLE);
1894 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1);
1895 
1896 	return 0;
1897 }
1898 
1899 void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
1900 				const struct mipi_dsi_msg *msg)
1901 {
1902 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1903 
1904 	dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
1905 	dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
1906 
1907 	if (!(msg->flags & MIPI_DSI_MSG_USE_LPM))
1908 		dsi_set_tx_power_mode(1, msm_host);
1909 
1910 	/* TODO: unvote for bus bandwidth */
1911 
1912 	dsi_clk_ctrl(msm_host, 0);
1913 }
1914 
1915 int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
1916 				const struct mipi_dsi_msg *msg)
1917 {
1918 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1919 
1920 	return dsi_cmds2buf_tx(msm_host, msg);
1921 }
1922 
1923 int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
1924 				const struct mipi_dsi_msg *msg)
1925 {
1926 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1927 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
1928 	int data_byte, rx_byte, dlen, end;
1929 	int short_response, diff, pkt_size, ret = 0;
1930 	char cmd;
1931 	int rlen = msg->rx_len;
1932 	u8 *buf;
1933 
1934 	if (rlen <= 2) {
1935 		short_response = 1;
1936 		pkt_size = rlen;
1937 		rx_byte = 4;
1938 	} else {
1939 		short_response = 0;
1940 		data_byte = 10;	/* first read */
1941 		if (rlen < data_byte)
1942 			pkt_size = rlen;
1943 		else
1944 			pkt_size = data_byte;
1945 		rx_byte = data_byte + 6; /* 4 header + 2 crc */
1946 	}
1947 
1948 	buf = msm_host->rx_buf;
1949 	end = 0;
1950 	while (!end) {
1951 		u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8};
1952 		struct mipi_dsi_msg max_pkt_size_msg = {
1953 			.channel = msg->channel,
1954 			.type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
1955 			.tx_len = 2,
1956 			.tx_buf = tx,
1957 		};
1958 
1959 		DBG("rlen=%d pkt_size=%d rx_byte=%d",
1960 			rlen, pkt_size, rx_byte);
1961 
1962 		ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg);
1963 		if (ret < 2) {
1964 			pr_err("%s: Set max pkt size failed, %d\n",
1965 				__func__, ret);
1966 			return -EINVAL;
1967 		}
1968 
1969 		if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) &&
1970 			(cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) {
1971 			/* Clear the RDBK_DATA registers */
1972 			dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL,
1973 					DSI_RDBK_DATA_CTRL_CLR);
1974 			wmb(); /* make sure the RDBK registers are cleared */
1975 			dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0);
1976 			wmb(); /* release cleared status before transfer */
1977 		}
1978 
1979 		ret = dsi_cmds2buf_tx(msm_host, msg);
1980 		if (ret < msg->tx_len) {
1981 			pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret);
1982 			return ret;
1983 		}
1984 
1985 		/*
1986 		 * once cmd_dma_done interrupt received,
1987 		 * return data from client is ready and stored
1988 		 * at RDBK_DATA register already
1989 		 * since rx fifo is 16 bytes, dcs header is kept at first loop,
1990 		 * after that dcs header lost during shift into registers
1991 		 */
1992 		dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size);
1993 
1994 		if (dlen <= 0)
1995 			return 0;
1996 
1997 		if (short_response)
1998 			break;
1999 
2000 		if (rlen <= data_byte) {
2001 			diff = data_byte - rlen;
2002 			end = 1;
2003 		} else {
2004 			diff = 0;
2005 			rlen -= data_byte;
2006 		}
2007 
2008 		if (!end) {
2009 			dlen -= 2; /* 2 crc */
2010 			dlen -= diff;
2011 			buf += dlen;	/* next start position */
2012 			data_byte = 14;	/* NOT first read */
2013 			if (rlen < data_byte)
2014 				pkt_size += rlen;
2015 			else
2016 				pkt_size += data_byte;
2017 			DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff);
2018 		}
2019 	}
2020 
2021 	/*
2022 	 * For single Long read, if the requested rlen < 10,
2023 	 * we need to shift the start position of rx
2024 	 * data buffer to skip the bytes which are not
2025 	 * updated.
2026 	 */
2027 	if (pkt_size < 10 && !short_response)
2028 		buf = msm_host->rx_buf + (10 - rlen);
2029 	else
2030 		buf = msm_host->rx_buf;
2031 
2032 	cmd = buf[0];
2033 	switch (cmd) {
2034 	case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
2035 		pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
2036 		ret = 0;
2037 		break;
2038 	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
2039 	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
2040 		ret = dsi_short_read1_resp(buf, msg);
2041 		break;
2042 	case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
2043 	case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
2044 		ret = dsi_short_read2_resp(buf, msg);
2045 		break;
2046 	case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
2047 	case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
2048 		ret = dsi_long_read_resp(buf, msg);
2049 		break;
2050 	default:
2051 		pr_warn("%s:Invalid response cmd\n", __func__);
2052 		ret = 0;
2053 	}
2054 
2055 	return ret;
2056 }
2057 
2058 void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
2059 				  u32 len)
2060 {
2061 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2062 
2063 	dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base);
2064 	dsi_write(msm_host, REG_DSI_DMA_LEN, len);
2065 	dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
2066 
2067 	/* Make sure trigger happens */
2068 	wmb();
2069 }
2070 
2071 int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
2072 	struct msm_dsi_pll *src_pll)
2073 {
2074 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2075 	const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
2076 	struct clk *byte_clk_provider, *pixel_clk_provider;
2077 	int ret;
2078 
2079 	ret = msm_dsi_pll_get_clk_provider(src_pll,
2080 				&byte_clk_provider, &pixel_clk_provider);
2081 	if (ret) {
2082 		pr_info("%s: can't get provider from pll, don't set parent\n",
2083 			__func__);
2084 		return 0;
2085 	}
2086 
2087 	ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
2088 	if (ret) {
2089 		pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
2090 			__func__, ret);
2091 		goto exit;
2092 	}
2093 
2094 	ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
2095 	if (ret) {
2096 		pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
2097 			__func__, ret);
2098 		goto exit;
2099 	}
2100 
2101 	if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
2102 		ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
2103 		if (ret) {
2104 			pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
2105 				__func__, ret);
2106 			goto exit;
2107 		}
2108 
2109 		ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
2110 		if (ret) {
2111 			pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
2112 				__func__, ret);
2113 			goto exit;
2114 		}
2115 	}
2116 
2117 exit:
2118 	return ret;
2119 }
2120 
2121 int msm_dsi_host_enable(struct mipi_dsi_host *host)
2122 {
2123 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2124 
2125 	dsi_op_mode_config(msm_host,
2126 		!!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true);
2127 
2128 	/* TODO: clock should be turned off for command mode,
2129 	 * and only turned on before MDP START.
2130 	 * This part of code should be enabled once mdp driver support it.
2131 	 */
2132 	/* if (msm_panel->mode == MSM_DSI_CMD_MODE)
2133 		dsi_clk_ctrl(msm_host, 0); */
2134 
2135 	return 0;
2136 }
2137 
2138 int msm_dsi_host_disable(struct mipi_dsi_host *host)
2139 {
2140 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2141 
2142 	dsi_op_mode_config(msm_host,
2143 		!!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
2144 
2145 	/* Since we have disabled INTF, the video engine won't stop so that
2146 	 * the cmd engine will be blocked.
2147 	 * Reset to disable video engine so that we can send off cmd.
2148 	 */
2149 	dsi_sw_reset(msm_host);
2150 
2151 	return 0;
2152 }
2153 
2154 static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
2155 {
2156 	enum sfpb_ahb_arb_master_port_en en;
2157 
2158 	if (!msm_host->sfpb)
2159 		return;
2160 
2161 	en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE;
2162 
2163 	regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG,
2164 			SFPB_GPREG_MASTER_PORT_EN__MASK,
2165 			SFPB_GPREG_MASTER_PORT_EN(en));
2166 }
2167 
2168 int msm_dsi_host_power_on(struct mipi_dsi_host *host)
2169 {
2170 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2171 	u32 clk_pre = 0, clk_post = 0;
2172 	int ret = 0;
2173 
2174 	mutex_lock(&msm_host->dev_mutex);
2175 	if (msm_host->power_on) {
2176 		DBG("dsi host already on");
2177 		goto unlock_ret;
2178 	}
2179 
2180 	msm_dsi_sfpb_config(msm_host, true);
2181 
2182 	ret = dsi_calc_clk_rate(msm_host);
2183 	if (ret) {
2184 		pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
2185 		goto unlock_ret;
2186 	}
2187 
2188 	ret = dsi_host_regulator_enable(msm_host);
2189 	if (ret) {
2190 		pr_err("%s:Failed to enable vregs.ret=%d\n",
2191 			__func__, ret);
2192 		goto unlock_ret;
2193 	}
2194 
2195 	ret = dsi_bus_clk_enable(msm_host);
2196 	if (ret) {
2197 		pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret);
2198 		goto fail_disable_reg;
2199 	}
2200 
2201 	dsi_phy_sw_reset(msm_host);
2202 	ret = msm_dsi_manager_phy_enable(msm_host->id,
2203 					msm_host->byte_clk_rate * 8,
2204 					msm_host->esc_clk_rate,
2205 					&clk_pre, &clk_post);
2206 	dsi_bus_clk_disable(msm_host);
2207 	if (ret) {
2208 		pr_err("%s: failed to enable phy, %d\n", __func__, ret);
2209 		goto fail_disable_reg;
2210 	}
2211 
2212 	ret = dsi_clk_ctrl(msm_host, 1);
2213 	if (ret) {
2214 		pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret);
2215 		goto fail_disable_reg;
2216 	}
2217 
2218 	ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev);
2219 	if (ret) {
2220 		pr_err("%s: failed to set pinctrl default state, %d\n",
2221 			__func__, ret);
2222 		goto fail_disable_clk;
2223 	}
2224 
2225 	dsi_timing_setup(msm_host);
2226 	dsi_sw_reset(msm_host);
2227 	dsi_ctrl_config(msm_host, true, clk_pre, clk_post);
2228 
2229 	if (msm_host->disp_en_gpio)
2230 		gpiod_set_value(msm_host->disp_en_gpio, 1);
2231 
2232 	msm_host->power_on = true;
2233 	mutex_unlock(&msm_host->dev_mutex);
2234 
2235 	return 0;
2236 
2237 fail_disable_clk:
2238 	dsi_clk_ctrl(msm_host, 0);
2239 fail_disable_reg:
2240 	dsi_host_regulator_disable(msm_host);
2241 unlock_ret:
2242 	mutex_unlock(&msm_host->dev_mutex);
2243 	return ret;
2244 }
2245 
2246 int msm_dsi_host_power_off(struct mipi_dsi_host *host)
2247 {
2248 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2249 
2250 	mutex_lock(&msm_host->dev_mutex);
2251 	if (!msm_host->power_on) {
2252 		DBG("dsi host already off");
2253 		goto unlock_ret;
2254 	}
2255 
2256 	dsi_ctrl_config(msm_host, false, 0, 0);
2257 
2258 	if (msm_host->disp_en_gpio)
2259 		gpiod_set_value(msm_host->disp_en_gpio, 0);
2260 
2261 	pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
2262 
2263 	msm_dsi_manager_phy_disable(msm_host->id);
2264 
2265 	dsi_clk_ctrl(msm_host, 0);
2266 
2267 	dsi_host_regulator_disable(msm_host);
2268 
2269 	msm_dsi_sfpb_config(msm_host, false);
2270 
2271 	DBG("-");
2272 
2273 	msm_host->power_on = false;
2274 
2275 unlock_ret:
2276 	mutex_unlock(&msm_host->dev_mutex);
2277 	return 0;
2278 }
2279 
2280 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2281 					struct drm_display_mode *mode)
2282 {
2283 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2284 
2285 	if (msm_host->mode) {
2286 		drm_mode_destroy(msm_host->dev, msm_host->mode);
2287 		msm_host->mode = NULL;
2288 	}
2289 
2290 	msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
2291 	if (!msm_host->mode) {
2292 		pr_err("%s: cannot duplicate mode\n", __func__);
2293 		return -ENOMEM;
2294 	}
2295 
2296 	return 0;
2297 }
2298 
2299 struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
2300 				unsigned long *panel_flags)
2301 {
2302 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2303 	struct drm_panel *panel;
2304 
2305 	panel = of_drm_find_panel(msm_host->device_node);
2306 	if (panel_flags)
2307 			*panel_flags = msm_host->mode_flags;
2308 
2309 	return panel;
2310 }
2311 
2312 struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host)
2313 {
2314 	struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2315 
2316 	return of_drm_find_bridge(msm_host->device_node);
2317 }
2318