1 /* 2 * Copyright (c) 2015, The Linux Foundation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 #include <linux/clk.h> 15 #include <linux/delay.h> 16 #include <linux/err.h> 17 #include <linux/gpio.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/interrupt.h> 20 #include <linux/of_device.h> 21 #include <linux/of_gpio.h> 22 #include <linux/of_irq.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/spinlock.h> 25 #include <video/mipi_display.h> 26 27 #include "dsi.h" 28 #include "dsi.xml.h" 29 30 #define MSM_DSI_VER_MAJOR_V2 0x02 31 #define MSM_DSI_VER_MAJOR_6G 0x03 32 #define MSM_DSI_6G_VER_MINOR_V1_0 0x10000000 33 #define MSM_DSI_6G_VER_MINOR_V1_1 0x10010000 34 #define MSM_DSI_6G_VER_MINOR_V1_1_1 0x10010001 35 #define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000 36 #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 37 38 #define DSI_6G_REG_SHIFT 4 39 40 struct dsi_config { 41 u32 major; 42 u32 minor; 43 u32 io_offset; 44 struct dsi_reg_config reg_cfg; 45 }; 46 47 static const struct dsi_config dsi_cfgs[] = { 48 {MSM_DSI_VER_MAJOR_V2, 0, 0, {0,} }, 49 { /* 8974 v1 */ 50 .major = MSM_DSI_VER_MAJOR_6G, 51 .minor = MSM_DSI_6G_VER_MINOR_V1_0, 52 .io_offset = DSI_6G_REG_SHIFT, 53 .reg_cfg = { 54 .num = 4, 55 .regs = { 56 {"gdsc", -1, -1, -1, -1}, 57 {"vdd", 3000000, 3000000, 150000, 100}, 58 {"vdda", 1200000, 1200000, 100000, 100}, 59 {"vddio", 1800000, 1800000, 100000, 100}, 60 }, 61 }, 62 }, 63 { /* 8974 v2 */ 64 .major = MSM_DSI_VER_MAJOR_6G, 65 .minor = MSM_DSI_6G_VER_MINOR_V1_1, 66 .io_offset = DSI_6G_REG_SHIFT, 67 .reg_cfg = { 68 .num = 4, 69 .regs = { 70 {"gdsc", -1, -1, -1, -1}, 71 {"vdd", 3000000, 3000000, 150000, 100}, 72 {"vdda", 1200000, 1200000, 100000, 100}, 73 {"vddio", 1800000, 1800000, 100000, 100}, 74 }, 75 }, 76 }, 77 { /* 8974 v3 */ 78 .major = MSM_DSI_VER_MAJOR_6G, 79 .minor = MSM_DSI_6G_VER_MINOR_V1_1_1, 80 .io_offset = DSI_6G_REG_SHIFT, 81 .reg_cfg = { 82 .num = 4, 83 .regs = { 84 {"gdsc", -1, -1, -1, -1}, 85 {"vdd", 3000000, 3000000, 150000, 100}, 86 {"vdda", 1200000, 1200000, 100000, 100}, 87 {"vddio", 1800000, 1800000, 100000, 100}, 88 }, 89 }, 90 }, 91 { /* 8084 */ 92 .major = MSM_DSI_VER_MAJOR_6G, 93 .minor = MSM_DSI_6G_VER_MINOR_V1_2, 94 .io_offset = DSI_6G_REG_SHIFT, 95 .reg_cfg = { 96 .num = 4, 97 .regs = { 98 {"gdsc", -1, -1, -1, -1}, 99 {"vdd", 3000000, 3000000, 150000, 100}, 100 {"vdda", 1200000, 1200000, 100000, 100}, 101 {"vddio", 1800000, 1800000, 100000, 100}, 102 }, 103 }, 104 }, 105 { /* 8916 */ 106 .major = MSM_DSI_VER_MAJOR_6G, 107 .minor = MSM_DSI_6G_VER_MINOR_V1_3_1, 108 .io_offset = DSI_6G_REG_SHIFT, 109 .reg_cfg = { 110 .num = 4, 111 .regs = { 112 {"gdsc", -1, -1, -1, -1}, 113 {"vdd", 2850000, 2850000, 100000, 100}, 114 {"vdda", 1200000, 1200000, 100000, 100}, 115 {"vddio", 1800000, 1800000, 100000, 100}, 116 }, 117 }, 118 }, 119 }; 120 121 static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) 122 { 123 u32 ver; 124 u32 ver_6g; 125 126 if (!major || !minor) 127 return -EINVAL; 128 129 /* From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0 130 * makes all other registers 4-byte shifted down. 131 */ 132 ver_6g = msm_readl(base + REG_DSI_6G_HW_VERSION); 133 if (ver_6g == 0) { 134 ver = msm_readl(base + REG_DSI_VERSION); 135 ver = FIELD(ver, DSI_VERSION_MAJOR); 136 if (ver <= MSM_DSI_VER_MAJOR_V2) { 137 /* old versions */ 138 *major = ver; 139 *minor = 0; 140 return 0; 141 } else { 142 return -EINVAL; 143 } 144 } else { 145 ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION); 146 ver = FIELD(ver, DSI_VERSION_MAJOR); 147 if (ver == MSM_DSI_VER_MAJOR_6G) { 148 /* 6G version */ 149 *major = ver; 150 *minor = ver_6g; 151 return 0; 152 } else { 153 return -EINVAL; 154 } 155 } 156 } 157 158 #define DSI_ERR_STATE_ACK 0x0000 159 #define DSI_ERR_STATE_TIMEOUT 0x0001 160 #define DSI_ERR_STATE_DLN0_PHY 0x0002 161 #define DSI_ERR_STATE_FIFO 0x0004 162 #define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008 163 #define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010 164 #define DSI_ERR_STATE_PLL_UNLOCKED 0x0020 165 166 #define DSI_CLK_CTRL_ENABLE_CLKS \ 167 (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \ 168 DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \ 169 DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \ 170 DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK) 171 172 struct msm_dsi_host { 173 struct mipi_dsi_host base; 174 175 struct platform_device *pdev; 176 struct drm_device *dev; 177 178 int id; 179 180 void __iomem *ctrl_base; 181 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX]; 182 struct clk *mdp_core_clk; 183 struct clk *ahb_clk; 184 struct clk *axi_clk; 185 struct clk *mmss_misc_ahb_clk; 186 struct clk *byte_clk; 187 struct clk *esc_clk; 188 struct clk *pixel_clk; 189 struct clk *byte_clk_src; 190 struct clk *pixel_clk_src; 191 192 u32 byte_clk_rate; 193 194 struct gpio_desc *disp_en_gpio; 195 struct gpio_desc *te_gpio; 196 197 const struct dsi_config *cfg; 198 199 struct completion dma_comp; 200 struct completion video_comp; 201 struct mutex dev_mutex; 202 struct mutex cmd_mutex; 203 struct mutex clk_mutex; 204 spinlock_t intr_lock; /* Protect interrupt ctrl register */ 205 206 u32 err_work_state; 207 struct work_struct err_work; 208 struct workqueue_struct *workqueue; 209 210 struct drm_gem_object *tx_gem_obj; 211 u8 *rx_buf; 212 213 struct drm_display_mode *mode; 214 215 /* Panel info */ 216 struct device_node *panel_node; 217 unsigned int channel; 218 unsigned int lanes; 219 enum mipi_dsi_pixel_format format; 220 unsigned long mode_flags; 221 222 u32 dma_cmd_ctrl_restore; 223 224 bool registered; 225 bool power_on; 226 int irq; 227 }; 228 229 static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt) 230 { 231 switch (fmt) { 232 case MIPI_DSI_FMT_RGB565: return 16; 233 case MIPI_DSI_FMT_RGB666_PACKED: return 18; 234 case MIPI_DSI_FMT_RGB666: 235 case MIPI_DSI_FMT_RGB888: 236 default: return 24; 237 } 238 } 239 240 static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg) 241 { 242 return msm_readl(msm_host->ctrl_base + msm_host->cfg->io_offset + reg); 243 } 244 static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data) 245 { 246 msm_writel(data, msm_host->ctrl_base + msm_host->cfg->io_offset + reg); 247 } 248 249 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host); 250 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host); 251 252 static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host) 253 { 254 const struct dsi_config *cfg; 255 struct regulator *gdsc_reg; 256 int i, ret; 257 u32 major = 0, minor = 0; 258 259 gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc"); 260 if (IS_ERR(gdsc_reg)) { 261 pr_err("%s: cannot get gdsc\n", __func__); 262 goto fail; 263 } 264 ret = regulator_enable(gdsc_reg); 265 if (ret) { 266 pr_err("%s: unable to enable gdsc\n", __func__); 267 regulator_put(gdsc_reg); 268 goto fail; 269 } 270 ret = clk_prepare_enable(msm_host->ahb_clk); 271 if (ret) { 272 pr_err("%s: unable to enable ahb_clk\n", __func__); 273 regulator_disable(gdsc_reg); 274 regulator_put(gdsc_reg); 275 goto fail; 276 } 277 278 ret = dsi_get_version(msm_host->ctrl_base, &major, &minor); 279 280 clk_disable_unprepare(msm_host->ahb_clk); 281 regulator_disable(gdsc_reg); 282 regulator_put(gdsc_reg); 283 if (ret) { 284 pr_err("%s: Invalid version\n", __func__); 285 goto fail; 286 } 287 288 for (i = 0; i < ARRAY_SIZE(dsi_cfgs); i++) { 289 cfg = dsi_cfgs + i; 290 if ((cfg->major == major) && (cfg->minor == minor)) 291 return cfg; 292 } 293 pr_err("%s: Version %x:%x not support\n", __func__, major, minor); 294 295 fail: 296 return NULL; 297 } 298 299 static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host) 300 { 301 return container_of(host, struct msm_dsi_host, base); 302 } 303 304 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host) 305 { 306 struct regulator_bulk_data *s = msm_host->supplies; 307 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; 308 int num = msm_host->cfg->reg_cfg.num; 309 int i; 310 311 DBG(""); 312 for (i = num - 1; i >= 0; i--) 313 if (regs[i].disable_load >= 0) 314 regulator_set_load(s[i].consumer, 315 regs[i].disable_load); 316 317 regulator_bulk_disable(num, s); 318 } 319 320 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host) 321 { 322 struct regulator_bulk_data *s = msm_host->supplies; 323 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; 324 int num = msm_host->cfg->reg_cfg.num; 325 int ret, i; 326 327 DBG(""); 328 for (i = 0; i < num; i++) { 329 if (regs[i].enable_load >= 0) { 330 ret = regulator_set_load(s[i].consumer, 331 regs[i].enable_load); 332 if (ret < 0) { 333 pr_err("regulator %d set op mode failed, %d\n", 334 i, ret); 335 goto fail; 336 } 337 } 338 } 339 340 ret = regulator_bulk_enable(num, s); 341 if (ret < 0) { 342 pr_err("regulator enable failed, %d\n", ret); 343 goto fail; 344 } 345 346 return 0; 347 348 fail: 349 for (i--; i >= 0; i--) 350 regulator_set_load(s[i].consumer, regs[i].disable_load); 351 return ret; 352 } 353 354 static int dsi_regulator_init(struct msm_dsi_host *msm_host) 355 { 356 struct regulator_bulk_data *s = msm_host->supplies; 357 const struct dsi_reg_entry *regs = msm_host->cfg->reg_cfg.regs; 358 int num = msm_host->cfg->reg_cfg.num; 359 int i, ret; 360 361 for (i = 0; i < num; i++) 362 s[i].supply = regs[i].name; 363 364 ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s); 365 if (ret < 0) { 366 pr_err("%s: failed to init regulator, ret=%d\n", 367 __func__, ret); 368 return ret; 369 } 370 371 for (i = 0; i < num; i++) { 372 if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) { 373 ret = regulator_set_voltage(s[i].consumer, 374 regs[i].min_voltage, regs[i].max_voltage); 375 if (ret < 0) { 376 pr_err("regulator %d set voltage failed, %d\n", 377 i, ret); 378 return ret; 379 } 380 } 381 } 382 383 return 0; 384 } 385 386 static int dsi_clk_init(struct msm_dsi_host *msm_host) 387 { 388 struct device *dev = &msm_host->pdev->dev; 389 int ret = 0; 390 391 msm_host->mdp_core_clk = devm_clk_get(dev, "mdp_core_clk"); 392 if (IS_ERR(msm_host->mdp_core_clk)) { 393 ret = PTR_ERR(msm_host->mdp_core_clk); 394 pr_err("%s: Unable to get mdp core clk. ret=%d\n", 395 __func__, ret); 396 goto exit; 397 } 398 399 msm_host->ahb_clk = devm_clk_get(dev, "iface_clk"); 400 if (IS_ERR(msm_host->ahb_clk)) { 401 ret = PTR_ERR(msm_host->ahb_clk); 402 pr_err("%s: Unable to get mdss ahb clk. ret=%d\n", 403 __func__, ret); 404 goto exit; 405 } 406 407 msm_host->axi_clk = devm_clk_get(dev, "bus_clk"); 408 if (IS_ERR(msm_host->axi_clk)) { 409 ret = PTR_ERR(msm_host->axi_clk); 410 pr_err("%s: Unable to get axi bus clk. ret=%d\n", 411 __func__, ret); 412 goto exit; 413 } 414 415 msm_host->mmss_misc_ahb_clk = devm_clk_get(dev, "core_mmss_clk"); 416 if (IS_ERR(msm_host->mmss_misc_ahb_clk)) { 417 ret = PTR_ERR(msm_host->mmss_misc_ahb_clk); 418 pr_err("%s: Unable to get mmss misc ahb clk. ret=%d\n", 419 __func__, ret); 420 goto exit; 421 } 422 423 msm_host->byte_clk = devm_clk_get(dev, "byte_clk"); 424 if (IS_ERR(msm_host->byte_clk)) { 425 ret = PTR_ERR(msm_host->byte_clk); 426 pr_err("%s: can't find dsi_byte_clk. ret=%d\n", 427 __func__, ret); 428 msm_host->byte_clk = NULL; 429 goto exit; 430 } 431 432 msm_host->pixel_clk = devm_clk_get(dev, "pixel_clk"); 433 if (IS_ERR(msm_host->pixel_clk)) { 434 ret = PTR_ERR(msm_host->pixel_clk); 435 pr_err("%s: can't find dsi_pixel_clk. ret=%d\n", 436 __func__, ret); 437 msm_host->pixel_clk = NULL; 438 goto exit; 439 } 440 441 msm_host->esc_clk = devm_clk_get(dev, "core_clk"); 442 if (IS_ERR(msm_host->esc_clk)) { 443 ret = PTR_ERR(msm_host->esc_clk); 444 pr_err("%s: can't find dsi_esc_clk. ret=%d\n", 445 __func__, ret); 446 msm_host->esc_clk = NULL; 447 goto exit; 448 } 449 450 msm_host->byte_clk_src = devm_clk_get(dev, "byte_clk_src"); 451 if (IS_ERR(msm_host->byte_clk_src)) { 452 ret = PTR_ERR(msm_host->byte_clk_src); 453 pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret); 454 msm_host->byte_clk_src = NULL; 455 goto exit; 456 } 457 458 msm_host->pixel_clk_src = devm_clk_get(dev, "pixel_clk_src"); 459 if (IS_ERR(msm_host->pixel_clk_src)) { 460 ret = PTR_ERR(msm_host->pixel_clk_src); 461 pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret); 462 msm_host->pixel_clk_src = NULL; 463 goto exit; 464 } 465 466 exit: 467 return ret; 468 } 469 470 static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host) 471 { 472 int ret; 473 474 DBG("id=%d", msm_host->id); 475 476 ret = clk_prepare_enable(msm_host->mdp_core_clk); 477 if (ret) { 478 pr_err("%s: failed to enable mdp_core_clock, %d\n", 479 __func__, ret); 480 goto core_clk_err; 481 } 482 483 ret = clk_prepare_enable(msm_host->ahb_clk); 484 if (ret) { 485 pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret); 486 goto ahb_clk_err; 487 } 488 489 ret = clk_prepare_enable(msm_host->axi_clk); 490 if (ret) { 491 pr_err("%s: failed to enable ahb clock, %d\n", __func__, ret); 492 goto axi_clk_err; 493 } 494 495 ret = clk_prepare_enable(msm_host->mmss_misc_ahb_clk); 496 if (ret) { 497 pr_err("%s: failed to enable mmss misc ahb clk, %d\n", 498 __func__, ret); 499 goto misc_ahb_clk_err; 500 } 501 502 return 0; 503 504 misc_ahb_clk_err: 505 clk_disable_unprepare(msm_host->axi_clk); 506 axi_clk_err: 507 clk_disable_unprepare(msm_host->ahb_clk); 508 ahb_clk_err: 509 clk_disable_unprepare(msm_host->mdp_core_clk); 510 core_clk_err: 511 return ret; 512 } 513 514 static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host) 515 { 516 DBG(""); 517 clk_disable_unprepare(msm_host->mmss_misc_ahb_clk); 518 clk_disable_unprepare(msm_host->axi_clk); 519 clk_disable_unprepare(msm_host->ahb_clk); 520 clk_disable_unprepare(msm_host->mdp_core_clk); 521 } 522 523 static int dsi_link_clk_enable(struct msm_dsi_host *msm_host) 524 { 525 int ret; 526 527 DBG("Set clk rates: pclk=%d, byteclk=%d", 528 msm_host->mode->clock, msm_host->byte_clk_rate); 529 530 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate); 531 if (ret) { 532 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret); 533 goto error; 534 } 535 536 ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000); 537 if (ret) { 538 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); 539 goto error; 540 } 541 542 ret = clk_prepare_enable(msm_host->esc_clk); 543 if (ret) { 544 pr_err("%s: Failed to enable dsi esc clk\n", __func__); 545 goto error; 546 } 547 548 ret = clk_prepare_enable(msm_host->byte_clk); 549 if (ret) { 550 pr_err("%s: Failed to enable dsi byte clk\n", __func__); 551 goto byte_clk_err; 552 } 553 554 ret = clk_prepare_enable(msm_host->pixel_clk); 555 if (ret) { 556 pr_err("%s: Failed to enable dsi pixel clk\n", __func__); 557 goto pixel_clk_err; 558 } 559 560 return 0; 561 562 pixel_clk_err: 563 clk_disable_unprepare(msm_host->byte_clk); 564 byte_clk_err: 565 clk_disable_unprepare(msm_host->esc_clk); 566 error: 567 return ret; 568 } 569 570 static void dsi_link_clk_disable(struct msm_dsi_host *msm_host) 571 { 572 clk_disable_unprepare(msm_host->esc_clk); 573 clk_disable_unprepare(msm_host->pixel_clk); 574 clk_disable_unprepare(msm_host->byte_clk); 575 } 576 577 static int dsi_clk_ctrl(struct msm_dsi_host *msm_host, bool enable) 578 { 579 int ret = 0; 580 581 mutex_lock(&msm_host->clk_mutex); 582 if (enable) { 583 ret = dsi_bus_clk_enable(msm_host); 584 if (ret) { 585 pr_err("%s: Can not enable bus clk, %d\n", 586 __func__, ret); 587 goto unlock_ret; 588 } 589 ret = dsi_link_clk_enable(msm_host); 590 if (ret) { 591 pr_err("%s: Can not enable link clk, %d\n", 592 __func__, ret); 593 dsi_bus_clk_disable(msm_host); 594 goto unlock_ret; 595 } 596 } else { 597 dsi_link_clk_disable(msm_host); 598 dsi_bus_clk_disable(msm_host); 599 } 600 601 unlock_ret: 602 mutex_unlock(&msm_host->clk_mutex); 603 return ret; 604 } 605 606 static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host) 607 { 608 struct drm_display_mode *mode = msm_host->mode; 609 u8 lanes = msm_host->lanes; 610 u32 bpp = dsi_get_bpp(msm_host->format); 611 u32 pclk_rate; 612 613 if (!mode) { 614 pr_err("%s: mode not set\n", __func__); 615 return -EINVAL; 616 } 617 618 pclk_rate = mode->clock * 1000; 619 if (lanes > 0) { 620 msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes); 621 } else { 622 pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__); 623 msm_host->byte_clk_rate = (pclk_rate * bpp) / 8; 624 } 625 626 DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate); 627 628 return 0; 629 } 630 631 static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host) 632 { 633 DBG(""); 634 dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET); 635 /* Make sure fully reset */ 636 wmb(); 637 udelay(1000); 638 dsi_write(msm_host, REG_DSI_PHY_RESET, 0); 639 udelay(100); 640 } 641 642 static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable) 643 { 644 u32 intr; 645 unsigned long flags; 646 647 spin_lock_irqsave(&msm_host->intr_lock, flags); 648 intr = dsi_read(msm_host, REG_DSI_INTR_CTRL); 649 650 if (enable) 651 intr |= mask; 652 else 653 intr &= ~mask; 654 655 DBG("intr=%x enable=%d", intr, enable); 656 657 dsi_write(msm_host, REG_DSI_INTR_CTRL, intr); 658 spin_unlock_irqrestore(&msm_host->intr_lock, flags); 659 } 660 661 static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags) 662 { 663 if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST) 664 return BURST_MODE; 665 else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) 666 return NON_BURST_SYNCH_PULSE; 667 668 return NON_BURST_SYNCH_EVENT; 669 } 670 671 static inline enum dsi_vid_dst_format dsi_get_vid_fmt( 672 const enum mipi_dsi_pixel_format mipi_fmt) 673 { 674 switch (mipi_fmt) { 675 case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888; 676 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE; 677 case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666; 678 case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565; 679 default: return VID_DST_FORMAT_RGB888; 680 } 681 } 682 683 static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( 684 const enum mipi_dsi_pixel_format mipi_fmt) 685 { 686 switch (mipi_fmt) { 687 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; 688 case MIPI_DSI_FMT_RGB666_PACKED: 689 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666; 690 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; 691 default: return CMD_DST_FORMAT_RGB888; 692 } 693 } 694 695 static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, 696 u32 clk_pre, u32 clk_post) 697 { 698 u32 flags = msm_host->mode_flags; 699 enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; 700 u32 data = 0; 701 702 if (!enable) { 703 dsi_write(msm_host, REG_DSI_CTRL, 0); 704 return; 705 } 706 707 if (flags & MIPI_DSI_MODE_VIDEO) { 708 if (flags & MIPI_DSI_MODE_VIDEO_HSE) 709 data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE; 710 if (flags & MIPI_DSI_MODE_VIDEO_HFP) 711 data |= DSI_VID_CFG0_HFP_POWER_STOP; 712 if (flags & MIPI_DSI_MODE_VIDEO_HBP) 713 data |= DSI_VID_CFG0_HBP_POWER_STOP; 714 if (flags & MIPI_DSI_MODE_VIDEO_HSA) 715 data |= DSI_VID_CFG0_HSA_POWER_STOP; 716 /* Always set low power stop mode for BLLP 717 * to let command engine send packets 718 */ 719 data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP | 720 DSI_VID_CFG0_BLLP_POWER_STOP; 721 data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags)); 722 data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt)); 723 data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel); 724 dsi_write(msm_host, REG_DSI_VID_CFG0, data); 725 726 /* Do not swap RGB colors */ 727 data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB); 728 dsi_write(msm_host, REG_DSI_VID_CFG1, 0); 729 } else { 730 /* Do not swap RGB colors */ 731 data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB); 732 data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt)); 733 dsi_write(msm_host, REG_DSI_CMD_CFG0, data); 734 735 data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) | 736 DSI_CMD_CFG1_WR_MEM_CONTINUE( 737 MIPI_DCS_WRITE_MEMORY_CONTINUE); 738 /* Always insert DCS command */ 739 data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND; 740 dsi_write(msm_host, REG_DSI_CMD_CFG1, data); 741 } 742 743 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, 744 DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER | 745 DSI_CMD_DMA_CTRL_LOW_POWER); 746 747 data = 0; 748 /* Always assume dedicated TE pin */ 749 data |= DSI_TRIG_CTRL_TE; 750 data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE); 751 data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW); 752 data |= DSI_TRIG_CTRL_STREAM(msm_host->channel); 753 if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) && 754 (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_2)) 755 data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; 756 dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); 757 758 data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) | 759 DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre); 760 dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data); 761 762 data = 0; 763 if (!(flags & MIPI_DSI_MODE_EOT_PACKET)) 764 data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND; 765 dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data); 766 767 /* allow only ack-err-status to generate interrupt */ 768 dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0); 769 770 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1); 771 772 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); 773 774 data = DSI_CTRL_CLK_EN; 775 776 DBG("lane number=%d", msm_host->lanes); 777 if (msm_host->lanes == 2) { 778 data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2; 779 /* swap lanes for 2-lane panel for better performance */ 780 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, 781 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230)); 782 } else { 783 /* Take 4 lanes as default */ 784 data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 | 785 DSI_CTRL_LANE3; 786 /* Do not swap lanes for 4-lane panel */ 787 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, 788 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123)); 789 } 790 791 if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) 792 dsi_write(msm_host, REG_DSI_LANE_CTRL, 793 DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST); 794 795 data |= DSI_CTRL_ENABLE; 796 797 dsi_write(msm_host, REG_DSI_CTRL, data); 798 } 799 800 static void dsi_timing_setup(struct msm_dsi_host *msm_host) 801 { 802 struct drm_display_mode *mode = msm_host->mode; 803 u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */ 804 u32 h_total = mode->htotal; 805 u32 v_total = mode->vtotal; 806 u32 hs_end = mode->hsync_end - mode->hsync_start; 807 u32 vs_end = mode->vsync_end - mode->vsync_start; 808 u32 ha_start = h_total - mode->hsync_start; 809 u32 ha_end = ha_start + mode->hdisplay; 810 u32 va_start = v_total - mode->vsync_start; 811 u32 va_end = va_start + mode->vdisplay; 812 u32 wc; 813 814 DBG(""); 815 816 if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) { 817 dsi_write(msm_host, REG_DSI_ACTIVE_H, 818 DSI_ACTIVE_H_START(ha_start) | 819 DSI_ACTIVE_H_END(ha_end)); 820 dsi_write(msm_host, REG_DSI_ACTIVE_V, 821 DSI_ACTIVE_V_START(va_start) | 822 DSI_ACTIVE_V_END(va_end)); 823 dsi_write(msm_host, REG_DSI_TOTAL, 824 DSI_TOTAL_H_TOTAL(h_total - 1) | 825 DSI_TOTAL_V_TOTAL(v_total - 1)); 826 827 dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC, 828 DSI_ACTIVE_HSYNC_START(hs_start) | 829 DSI_ACTIVE_HSYNC_END(hs_end)); 830 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0); 831 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS, 832 DSI_ACTIVE_VSYNC_VPOS_START(vs_start) | 833 DSI_ACTIVE_VSYNC_VPOS_END(vs_end)); 834 } else { /* command mode */ 835 /* image data and 1 byte write_memory_start cmd */ 836 wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1; 837 838 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL, 839 DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) | 840 DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL( 841 msm_host->channel) | 842 DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE( 843 MIPI_DSI_DCS_LONG_WRITE)); 844 845 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL, 846 DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) | 847 DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay)); 848 } 849 } 850 851 static void dsi_sw_reset(struct msm_dsi_host *msm_host) 852 { 853 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); 854 wmb(); /* clocks need to be enabled before reset */ 855 856 dsi_write(msm_host, REG_DSI_RESET, 1); 857 wmb(); /* make sure reset happen */ 858 dsi_write(msm_host, REG_DSI_RESET, 0); 859 } 860 861 static void dsi_op_mode_config(struct msm_dsi_host *msm_host, 862 bool video_mode, bool enable) 863 { 864 u32 dsi_ctrl; 865 866 dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL); 867 868 if (!enable) { 869 dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN | 870 DSI_CTRL_CMD_MODE_EN); 871 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE | 872 DSI_IRQ_MASK_VIDEO_DONE, 0); 873 } else { 874 if (video_mode) { 875 dsi_ctrl |= DSI_CTRL_VID_MODE_EN; 876 } else { /* command mode */ 877 dsi_ctrl |= DSI_CTRL_CMD_MODE_EN; 878 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1); 879 } 880 dsi_ctrl |= DSI_CTRL_ENABLE; 881 } 882 883 dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl); 884 } 885 886 static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host) 887 { 888 u32 data; 889 890 data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL); 891 892 if (mode == 0) 893 data &= ~DSI_CMD_DMA_CTRL_LOW_POWER; 894 else 895 data |= DSI_CMD_DMA_CTRL_LOW_POWER; 896 897 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data); 898 } 899 900 static void dsi_wait4video_done(struct msm_dsi_host *msm_host) 901 { 902 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1); 903 904 reinit_completion(&msm_host->video_comp); 905 906 wait_for_completion_timeout(&msm_host->video_comp, 907 msecs_to_jiffies(70)); 908 909 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); 910 } 911 912 static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host) 913 { 914 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) 915 return; 916 917 if (msm_host->power_on) { 918 dsi_wait4video_done(msm_host); 919 /* delay 4 ms to skip BLLP */ 920 usleep_range(2000, 4000); 921 } 922 } 923 924 /* dsi_cmd */ 925 static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size) 926 { 927 struct drm_device *dev = msm_host->dev; 928 int ret; 929 u32 iova; 930 931 mutex_lock(&dev->struct_mutex); 932 msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED); 933 if (IS_ERR(msm_host->tx_gem_obj)) { 934 ret = PTR_ERR(msm_host->tx_gem_obj); 935 pr_err("%s: failed to allocate gem, %d\n", __func__, ret); 936 msm_host->tx_gem_obj = NULL; 937 mutex_unlock(&dev->struct_mutex); 938 return ret; 939 } 940 941 ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova); 942 if (ret) { 943 pr_err("%s: failed to get iova, %d\n", __func__, ret); 944 return ret; 945 } 946 mutex_unlock(&dev->struct_mutex); 947 948 if (iova & 0x07) { 949 pr_err("%s: buf NOT 8 bytes aligned\n", __func__); 950 return -EINVAL; 951 } 952 953 return 0; 954 } 955 956 static void dsi_tx_buf_free(struct msm_dsi_host *msm_host) 957 { 958 struct drm_device *dev = msm_host->dev; 959 960 if (msm_host->tx_gem_obj) { 961 msm_gem_put_iova(msm_host->tx_gem_obj, 0); 962 mutex_lock(&dev->struct_mutex); 963 msm_gem_free_object(msm_host->tx_gem_obj); 964 msm_host->tx_gem_obj = NULL; 965 mutex_unlock(&dev->struct_mutex); 966 } 967 } 968 969 /* 970 * prepare cmd buffer to be txed 971 */ 972 static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem, 973 const struct mipi_dsi_msg *msg) 974 { 975 struct mipi_dsi_packet packet; 976 int len; 977 int ret; 978 u8 *data; 979 980 ret = mipi_dsi_create_packet(&packet, msg); 981 if (ret) { 982 pr_err("%s: create packet failed, %d\n", __func__, ret); 983 return ret; 984 } 985 len = (packet.size + 3) & (~0x3); 986 987 if (len > tx_gem->size) { 988 pr_err("%s: packet size is too big\n", __func__); 989 return -EINVAL; 990 } 991 992 data = msm_gem_vaddr(tx_gem); 993 994 if (IS_ERR(data)) { 995 ret = PTR_ERR(data); 996 pr_err("%s: get vaddr failed, %d\n", __func__, ret); 997 return ret; 998 } 999 1000 /* MSM specific command format in memory */ 1001 data[0] = packet.header[1]; 1002 data[1] = packet.header[2]; 1003 data[2] = packet.header[0]; 1004 data[3] = BIT(7); /* Last packet */ 1005 if (mipi_dsi_packet_format_is_long(msg->type)) 1006 data[3] |= BIT(6); 1007 if (msg->rx_buf && msg->rx_len) 1008 data[3] |= BIT(5); 1009 1010 /* Long packet */ 1011 if (packet.payload && packet.payload_length) 1012 memcpy(data + 4, packet.payload, packet.payload_length); 1013 1014 /* Append 0xff to the end */ 1015 if (packet.size < len) 1016 memset(data + packet.size, 0xff, len - packet.size); 1017 1018 return len; 1019 } 1020 1021 /* 1022 * dsi_short_read1_resp: 1 parameter 1023 */ 1024 static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg) 1025 { 1026 u8 *data = msg->rx_buf; 1027 if (data && (msg->rx_len >= 1)) { 1028 *data = buf[1]; /* strip out dcs type */ 1029 return 1; 1030 } else { 1031 pr_err("%s: read data does not match with rx_buf len %zu\n", 1032 __func__, msg->rx_len); 1033 return -EINVAL; 1034 } 1035 } 1036 1037 /* 1038 * dsi_short_read2_resp: 2 parameter 1039 */ 1040 static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg) 1041 { 1042 u8 *data = msg->rx_buf; 1043 if (data && (msg->rx_len >= 2)) { 1044 data[0] = buf[1]; /* strip out dcs type */ 1045 data[1] = buf[2]; 1046 return 2; 1047 } else { 1048 pr_err("%s: read data does not match with rx_buf len %zu\n", 1049 __func__, msg->rx_len); 1050 return -EINVAL; 1051 } 1052 } 1053 1054 static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg) 1055 { 1056 /* strip out 4 byte dcs header */ 1057 if (msg->rx_buf && msg->rx_len) 1058 memcpy(msg->rx_buf, buf + 4, msg->rx_len); 1059 1060 return msg->rx_len; 1061 } 1062 1063 1064 static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) 1065 { 1066 int ret; 1067 u32 iova; 1068 bool triggered; 1069 1070 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova); 1071 if (ret) { 1072 pr_err("%s: failed to get iova: %d\n", __func__, ret); 1073 return ret; 1074 } 1075 1076 reinit_completion(&msm_host->dma_comp); 1077 1078 dsi_wait4video_eng_busy(msm_host); 1079 1080 triggered = msm_dsi_manager_cmd_xfer_trigger( 1081 msm_host->id, iova, len); 1082 if (triggered) { 1083 ret = wait_for_completion_timeout(&msm_host->dma_comp, 1084 msecs_to_jiffies(200)); 1085 DBG("ret=%d", ret); 1086 if (ret == 0) 1087 ret = -ETIMEDOUT; 1088 else 1089 ret = len; 1090 } else 1091 ret = len; 1092 1093 return ret; 1094 } 1095 1096 static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host, 1097 u8 *buf, int rx_byte, int pkt_size) 1098 { 1099 u32 *lp, *temp, data; 1100 int i, j = 0, cnt; 1101 u32 read_cnt; 1102 u8 reg[16]; 1103 int repeated_bytes = 0; 1104 int buf_offset = buf - msm_host->rx_buf; 1105 1106 lp = (u32 *)buf; 1107 temp = (u32 *)reg; 1108 cnt = (rx_byte + 3) >> 2; 1109 if (cnt > 4) 1110 cnt = 4; /* 4 x 32 bits registers only */ 1111 1112 if (rx_byte == 4) 1113 read_cnt = 4; 1114 else 1115 read_cnt = pkt_size + 6; 1116 1117 /* 1118 * In case of multiple reads from the panel, after the first read, there 1119 * is possibility that there are some bytes in the payload repeating in 1120 * the RDBK_DATA registers. Since we read all the parameters from the 1121 * panel right from the first byte for every pass. We need to skip the 1122 * repeating bytes and then append the new parameters to the rx buffer. 1123 */ 1124 if (read_cnt > 16) { 1125 int bytes_shifted; 1126 /* Any data more than 16 bytes will be shifted out. 1127 * The temp read buffer should already contain these bytes. 1128 * The remaining bytes in read buffer are the repeated bytes. 1129 */ 1130 bytes_shifted = read_cnt - 16; 1131 repeated_bytes = buf_offset - bytes_shifted; 1132 } 1133 1134 for (i = cnt - 1; i >= 0; i--) { 1135 data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i)); 1136 *temp++ = ntohl(data); /* to host byte order */ 1137 DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data)); 1138 } 1139 1140 for (i = repeated_bytes; i < 16; i++) 1141 buf[j++] = reg[i]; 1142 1143 return j; 1144 } 1145 1146 static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host, 1147 const struct mipi_dsi_msg *msg) 1148 { 1149 int len, ret; 1150 int bllp_len = msm_host->mode->hdisplay * 1151 dsi_get_bpp(msm_host->format) / 8; 1152 1153 len = dsi_cmd_dma_add(msm_host->tx_gem_obj, msg); 1154 if (!len) { 1155 pr_err("%s: failed to add cmd type = 0x%x\n", 1156 __func__, msg->type); 1157 return -EINVAL; 1158 } 1159 1160 /* for video mode, do not send cmds more than 1161 * one pixel line, since it only transmit it 1162 * during BLLP. 1163 */ 1164 /* TODO: if the command is sent in LP mode, the bit rate is only 1165 * half of esc clk rate. In this case, if the video is already 1166 * actively streaming, we need to check more carefully if the 1167 * command can be fit into one BLLP. 1168 */ 1169 if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) { 1170 pr_err("%s: cmd cannot fit into BLLP period, len=%d\n", 1171 __func__, len); 1172 return -EINVAL; 1173 } 1174 1175 ret = dsi_cmd_dma_tx(msm_host, len); 1176 if (ret < len) { 1177 pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n", 1178 __func__, msg->type, (*(u8 *)(msg->tx_buf)), len); 1179 return -ECOMM; 1180 } 1181 1182 return len; 1183 } 1184 1185 static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host) 1186 { 1187 u32 data0, data1; 1188 1189 data0 = dsi_read(msm_host, REG_DSI_CTRL); 1190 data1 = data0; 1191 data1 &= ~DSI_CTRL_ENABLE; 1192 dsi_write(msm_host, REG_DSI_CTRL, data1); 1193 /* 1194 * dsi controller need to be disabled before 1195 * clocks turned on 1196 */ 1197 wmb(); 1198 1199 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); 1200 wmb(); /* make sure clocks enabled */ 1201 1202 /* dsi controller can only be reset while clocks are running */ 1203 dsi_write(msm_host, REG_DSI_RESET, 1); 1204 wmb(); /* make sure reset happen */ 1205 dsi_write(msm_host, REG_DSI_RESET, 0); 1206 wmb(); /* controller out of reset */ 1207 dsi_write(msm_host, REG_DSI_CTRL, data0); 1208 wmb(); /* make sure dsi controller enabled again */ 1209 } 1210 1211 static void dsi_err_worker(struct work_struct *work) 1212 { 1213 struct msm_dsi_host *msm_host = 1214 container_of(work, struct msm_dsi_host, err_work); 1215 u32 status = msm_host->err_work_state; 1216 1217 pr_err_ratelimited("%s: status=%x\n", __func__, status); 1218 if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW) 1219 dsi_sw_reset_restore(msm_host); 1220 1221 /* It is safe to clear here because error irq is disabled. */ 1222 msm_host->err_work_state = 0; 1223 1224 /* enable dsi error interrupt */ 1225 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1); 1226 } 1227 1228 static void dsi_ack_err_status(struct msm_dsi_host *msm_host) 1229 { 1230 u32 status; 1231 1232 status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS); 1233 1234 if (status) { 1235 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status); 1236 /* Writing of an extra 0 needed to clear error bits */ 1237 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0); 1238 msm_host->err_work_state |= DSI_ERR_STATE_ACK; 1239 } 1240 } 1241 1242 static void dsi_timeout_status(struct msm_dsi_host *msm_host) 1243 { 1244 u32 status; 1245 1246 status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS); 1247 1248 if (status) { 1249 dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status); 1250 msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT; 1251 } 1252 } 1253 1254 static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host) 1255 { 1256 u32 status; 1257 1258 status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR); 1259 1260 if (status) { 1261 dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status); 1262 msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY; 1263 } 1264 } 1265 1266 static void dsi_fifo_status(struct msm_dsi_host *msm_host) 1267 { 1268 u32 status; 1269 1270 status = dsi_read(msm_host, REG_DSI_FIFO_STATUS); 1271 1272 /* fifo underflow, overflow */ 1273 if (status) { 1274 dsi_write(msm_host, REG_DSI_FIFO_STATUS, status); 1275 msm_host->err_work_state |= DSI_ERR_STATE_FIFO; 1276 if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW) 1277 msm_host->err_work_state |= 1278 DSI_ERR_STATE_MDP_FIFO_UNDERFLOW; 1279 } 1280 } 1281 1282 static void dsi_status(struct msm_dsi_host *msm_host) 1283 { 1284 u32 status; 1285 1286 status = dsi_read(msm_host, REG_DSI_STATUS0); 1287 1288 if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) { 1289 dsi_write(msm_host, REG_DSI_STATUS0, status); 1290 msm_host->err_work_state |= 1291 DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION; 1292 } 1293 } 1294 1295 static void dsi_clk_status(struct msm_dsi_host *msm_host) 1296 { 1297 u32 status; 1298 1299 status = dsi_read(msm_host, REG_DSI_CLK_STATUS); 1300 1301 if (status & DSI_CLK_STATUS_PLL_UNLOCKED) { 1302 dsi_write(msm_host, REG_DSI_CLK_STATUS, status); 1303 msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED; 1304 } 1305 } 1306 1307 static void dsi_error(struct msm_dsi_host *msm_host) 1308 { 1309 /* disable dsi error interrupt */ 1310 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0); 1311 1312 dsi_clk_status(msm_host); 1313 dsi_fifo_status(msm_host); 1314 dsi_ack_err_status(msm_host); 1315 dsi_timeout_status(msm_host); 1316 dsi_status(msm_host); 1317 dsi_dln0_phy_err(msm_host); 1318 1319 queue_work(msm_host->workqueue, &msm_host->err_work); 1320 } 1321 1322 static irqreturn_t dsi_host_irq(int irq, void *ptr) 1323 { 1324 struct msm_dsi_host *msm_host = ptr; 1325 u32 isr; 1326 unsigned long flags; 1327 1328 if (!msm_host->ctrl_base) 1329 return IRQ_HANDLED; 1330 1331 spin_lock_irqsave(&msm_host->intr_lock, flags); 1332 isr = dsi_read(msm_host, REG_DSI_INTR_CTRL); 1333 dsi_write(msm_host, REG_DSI_INTR_CTRL, isr); 1334 spin_unlock_irqrestore(&msm_host->intr_lock, flags); 1335 1336 DBG("isr=0x%x, id=%d", isr, msm_host->id); 1337 1338 if (isr & DSI_IRQ_ERROR) 1339 dsi_error(msm_host); 1340 1341 if (isr & DSI_IRQ_VIDEO_DONE) 1342 complete(&msm_host->video_comp); 1343 1344 if (isr & DSI_IRQ_CMD_DMA_DONE) 1345 complete(&msm_host->dma_comp); 1346 1347 return IRQ_HANDLED; 1348 } 1349 1350 static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host, 1351 struct device *panel_device) 1352 { 1353 msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device, 1354 "disp-enable", 1355 GPIOD_OUT_LOW); 1356 if (IS_ERR(msm_host->disp_en_gpio)) { 1357 DBG("cannot get disp-enable-gpios %ld", 1358 PTR_ERR(msm_host->disp_en_gpio)); 1359 return PTR_ERR(msm_host->disp_en_gpio); 1360 } 1361 1362 msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te", GPIOD_IN); 1363 if (IS_ERR(msm_host->te_gpio)) { 1364 DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio)); 1365 return PTR_ERR(msm_host->te_gpio); 1366 } 1367 1368 return 0; 1369 } 1370 1371 static int dsi_host_attach(struct mipi_dsi_host *host, 1372 struct mipi_dsi_device *dsi) 1373 { 1374 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1375 int ret; 1376 1377 msm_host->channel = dsi->channel; 1378 msm_host->lanes = dsi->lanes; 1379 msm_host->format = dsi->format; 1380 msm_host->mode_flags = dsi->mode_flags; 1381 1382 msm_host->panel_node = dsi->dev.of_node; 1383 1384 /* Some gpios defined in panel DT need to be controlled by host */ 1385 ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); 1386 if (ret) 1387 return ret; 1388 1389 DBG("id=%d", msm_host->id); 1390 if (msm_host->dev) 1391 drm_helper_hpd_irq_event(msm_host->dev); 1392 1393 return 0; 1394 } 1395 1396 static int dsi_host_detach(struct mipi_dsi_host *host, 1397 struct mipi_dsi_device *dsi) 1398 { 1399 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1400 1401 msm_host->panel_node = NULL; 1402 1403 DBG("id=%d", msm_host->id); 1404 if (msm_host->dev) 1405 drm_helper_hpd_irq_event(msm_host->dev); 1406 1407 return 0; 1408 } 1409 1410 static ssize_t dsi_host_transfer(struct mipi_dsi_host *host, 1411 const struct mipi_dsi_msg *msg) 1412 { 1413 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1414 int ret; 1415 1416 if (!msg || !msm_host->power_on) 1417 return -EINVAL; 1418 1419 mutex_lock(&msm_host->cmd_mutex); 1420 ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg); 1421 mutex_unlock(&msm_host->cmd_mutex); 1422 1423 return ret; 1424 } 1425 1426 static struct mipi_dsi_host_ops dsi_host_ops = { 1427 .attach = dsi_host_attach, 1428 .detach = dsi_host_detach, 1429 .transfer = dsi_host_transfer, 1430 }; 1431 1432 int msm_dsi_host_init(struct msm_dsi *msm_dsi) 1433 { 1434 struct msm_dsi_host *msm_host = NULL; 1435 struct platform_device *pdev = msm_dsi->pdev; 1436 int ret; 1437 1438 msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL); 1439 if (!msm_host) { 1440 pr_err("%s: FAILED: cannot alloc dsi host\n", 1441 __func__); 1442 ret = -ENOMEM; 1443 goto fail; 1444 } 1445 1446 ret = of_property_read_u32(pdev->dev.of_node, 1447 "qcom,dsi-host-index", &msm_host->id); 1448 if (ret) { 1449 dev_err(&pdev->dev, 1450 "%s: host index not specified, ret=%d\n", 1451 __func__, ret); 1452 goto fail; 1453 } 1454 msm_host->pdev = pdev; 1455 1456 ret = dsi_clk_init(msm_host); 1457 if (ret) { 1458 pr_err("%s: unable to initialize dsi clks\n", __func__); 1459 goto fail; 1460 } 1461 1462 msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL"); 1463 if (IS_ERR(msm_host->ctrl_base)) { 1464 pr_err("%s: unable to map Dsi ctrl base\n", __func__); 1465 ret = PTR_ERR(msm_host->ctrl_base); 1466 goto fail; 1467 } 1468 1469 msm_host->cfg = dsi_get_config(msm_host); 1470 if (!msm_host->cfg) { 1471 ret = -EINVAL; 1472 pr_err("%s: get config failed\n", __func__); 1473 goto fail; 1474 } 1475 1476 ret = dsi_regulator_init(msm_host); 1477 if (ret) { 1478 pr_err("%s: regulator init failed\n", __func__); 1479 goto fail; 1480 } 1481 1482 msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL); 1483 if (!msm_host->rx_buf) { 1484 pr_err("%s: alloc rx temp buf failed\n", __func__); 1485 goto fail; 1486 } 1487 1488 init_completion(&msm_host->dma_comp); 1489 init_completion(&msm_host->video_comp); 1490 mutex_init(&msm_host->dev_mutex); 1491 mutex_init(&msm_host->cmd_mutex); 1492 mutex_init(&msm_host->clk_mutex); 1493 spin_lock_init(&msm_host->intr_lock); 1494 1495 /* setup workqueue */ 1496 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); 1497 INIT_WORK(&msm_host->err_work, dsi_err_worker); 1498 1499 msm_dsi->host = &msm_host->base; 1500 msm_dsi->id = msm_host->id; 1501 1502 DBG("Dsi Host %d initialized", msm_host->id); 1503 return 0; 1504 1505 fail: 1506 return ret; 1507 } 1508 1509 void msm_dsi_host_destroy(struct mipi_dsi_host *host) 1510 { 1511 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1512 1513 DBG(""); 1514 dsi_tx_buf_free(msm_host); 1515 if (msm_host->workqueue) { 1516 flush_workqueue(msm_host->workqueue); 1517 destroy_workqueue(msm_host->workqueue); 1518 msm_host->workqueue = NULL; 1519 } 1520 1521 mutex_destroy(&msm_host->clk_mutex); 1522 mutex_destroy(&msm_host->cmd_mutex); 1523 mutex_destroy(&msm_host->dev_mutex); 1524 } 1525 1526 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, 1527 struct drm_device *dev) 1528 { 1529 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1530 struct platform_device *pdev = msm_host->pdev; 1531 int ret; 1532 1533 msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 1534 if (msm_host->irq < 0) { 1535 ret = msm_host->irq; 1536 dev_err(dev->dev, "failed to get irq: %d\n", ret); 1537 return ret; 1538 } 1539 1540 ret = devm_request_irq(&pdev->dev, msm_host->irq, 1541 dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 1542 "dsi_isr", msm_host); 1543 if (ret < 0) { 1544 dev_err(&pdev->dev, "failed to request IRQ%u: %d\n", 1545 msm_host->irq, ret); 1546 return ret; 1547 } 1548 1549 msm_host->dev = dev; 1550 ret = dsi_tx_buf_alloc(msm_host, SZ_4K); 1551 if (ret) { 1552 pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret); 1553 return ret; 1554 } 1555 1556 return 0; 1557 } 1558 1559 int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer) 1560 { 1561 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1562 struct device_node *node; 1563 int ret; 1564 1565 /* Register mipi dsi host */ 1566 if (!msm_host->registered) { 1567 host->dev = &msm_host->pdev->dev; 1568 host->ops = &dsi_host_ops; 1569 ret = mipi_dsi_host_register(host); 1570 if (ret) 1571 return ret; 1572 1573 msm_host->registered = true; 1574 1575 /* If the panel driver has not been probed after host register, 1576 * we should defer the host's probe. 1577 * It makes sure panel is connected when fbcon detects 1578 * connector status and gets the proper display mode to 1579 * create framebuffer. 1580 */ 1581 if (check_defer) { 1582 node = of_get_child_by_name(msm_host->pdev->dev.of_node, 1583 "panel"); 1584 if (node) { 1585 if (!of_drm_find_panel(node)) 1586 return -EPROBE_DEFER; 1587 } 1588 } 1589 } 1590 1591 return 0; 1592 } 1593 1594 void msm_dsi_host_unregister(struct mipi_dsi_host *host) 1595 { 1596 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1597 1598 if (msm_host->registered) { 1599 mipi_dsi_host_unregister(host); 1600 host->dev = NULL; 1601 host->ops = NULL; 1602 msm_host->registered = false; 1603 } 1604 } 1605 1606 int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, 1607 const struct mipi_dsi_msg *msg) 1608 { 1609 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1610 1611 /* TODO: make sure dsi_cmd_mdp is idle. 1612 * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME 1613 * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed. 1614 * How to handle the old versions? Wait for mdp cmd done? 1615 */ 1616 1617 /* 1618 * mdss interrupt is generated in mdp core clock domain 1619 * mdp clock need to be enabled to receive dsi interrupt 1620 */ 1621 dsi_clk_ctrl(msm_host, 1); 1622 1623 /* TODO: vote for bus bandwidth */ 1624 1625 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM)) 1626 dsi_set_tx_power_mode(0, msm_host); 1627 1628 msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL); 1629 dsi_write(msm_host, REG_DSI_CTRL, 1630 msm_host->dma_cmd_ctrl_restore | 1631 DSI_CTRL_CMD_MODE_EN | 1632 DSI_CTRL_ENABLE); 1633 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1); 1634 1635 return 0; 1636 } 1637 1638 void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host, 1639 const struct mipi_dsi_msg *msg) 1640 { 1641 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1642 1643 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0); 1644 dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore); 1645 1646 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM)) 1647 dsi_set_tx_power_mode(1, msm_host); 1648 1649 /* TODO: unvote for bus bandwidth */ 1650 1651 dsi_clk_ctrl(msm_host, 0); 1652 } 1653 1654 int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, 1655 const struct mipi_dsi_msg *msg) 1656 { 1657 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1658 1659 return dsi_cmds2buf_tx(msm_host, msg); 1660 } 1661 1662 int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, 1663 const struct mipi_dsi_msg *msg) 1664 { 1665 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1666 int data_byte, rx_byte, dlen, end; 1667 int short_response, diff, pkt_size, ret = 0; 1668 char cmd; 1669 int rlen = msg->rx_len; 1670 u8 *buf; 1671 1672 if (rlen <= 2) { 1673 short_response = 1; 1674 pkt_size = rlen; 1675 rx_byte = 4; 1676 } else { 1677 short_response = 0; 1678 data_byte = 10; /* first read */ 1679 if (rlen < data_byte) 1680 pkt_size = rlen; 1681 else 1682 pkt_size = data_byte; 1683 rx_byte = data_byte + 6; /* 4 header + 2 crc */ 1684 } 1685 1686 buf = msm_host->rx_buf; 1687 end = 0; 1688 while (!end) { 1689 u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8}; 1690 struct mipi_dsi_msg max_pkt_size_msg = { 1691 .channel = msg->channel, 1692 .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, 1693 .tx_len = 2, 1694 .tx_buf = tx, 1695 }; 1696 1697 DBG("rlen=%d pkt_size=%d rx_byte=%d", 1698 rlen, pkt_size, rx_byte); 1699 1700 ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg); 1701 if (ret < 2) { 1702 pr_err("%s: Set max pkt size failed, %d\n", 1703 __func__, ret); 1704 return -EINVAL; 1705 } 1706 1707 if ((msm_host->cfg->major == MSM_DSI_VER_MAJOR_6G) && 1708 (msm_host->cfg->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) { 1709 /* Clear the RDBK_DATA registers */ 1710 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 1711 DSI_RDBK_DATA_CTRL_CLR); 1712 wmb(); /* make sure the RDBK registers are cleared */ 1713 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0); 1714 wmb(); /* release cleared status before transfer */ 1715 } 1716 1717 ret = dsi_cmds2buf_tx(msm_host, msg); 1718 if (ret < msg->tx_len) { 1719 pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret); 1720 return ret; 1721 } 1722 1723 /* 1724 * once cmd_dma_done interrupt received, 1725 * return data from client is ready and stored 1726 * at RDBK_DATA register already 1727 * since rx fifo is 16 bytes, dcs header is kept at first loop, 1728 * after that dcs header lost during shift into registers 1729 */ 1730 dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size); 1731 1732 if (dlen <= 0) 1733 return 0; 1734 1735 if (short_response) 1736 break; 1737 1738 if (rlen <= data_byte) { 1739 diff = data_byte - rlen; 1740 end = 1; 1741 } else { 1742 diff = 0; 1743 rlen -= data_byte; 1744 } 1745 1746 if (!end) { 1747 dlen -= 2; /* 2 crc */ 1748 dlen -= diff; 1749 buf += dlen; /* next start position */ 1750 data_byte = 14; /* NOT first read */ 1751 if (rlen < data_byte) 1752 pkt_size += rlen; 1753 else 1754 pkt_size += data_byte; 1755 DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff); 1756 } 1757 } 1758 1759 /* 1760 * For single Long read, if the requested rlen < 10, 1761 * we need to shift the start position of rx 1762 * data buffer to skip the bytes which are not 1763 * updated. 1764 */ 1765 if (pkt_size < 10 && !short_response) 1766 buf = msm_host->rx_buf + (10 - rlen); 1767 else 1768 buf = msm_host->rx_buf; 1769 1770 cmd = buf[0]; 1771 switch (cmd) { 1772 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: 1773 pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__); 1774 ret = 0; 1775 break; 1776 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: 1777 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: 1778 ret = dsi_short_read1_resp(buf, msg); 1779 break; 1780 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: 1781 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: 1782 ret = dsi_short_read2_resp(buf, msg); 1783 break; 1784 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE: 1785 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE: 1786 ret = dsi_long_read_resp(buf, msg); 1787 break; 1788 default: 1789 pr_warn("%s:Invalid response cmd\n", __func__); 1790 ret = 0; 1791 } 1792 1793 return ret; 1794 } 1795 1796 void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len) 1797 { 1798 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1799 1800 dsi_write(msm_host, REG_DSI_DMA_BASE, iova); 1801 dsi_write(msm_host, REG_DSI_DMA_LEN, len); 1802 dsi_write(msm_host, REG_DSI_TRIG_DMA, 1); 1803 1804 /* Make sure trigger happens */ 1805 wmb(); 1806 } 1807 1808 int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, 1809 struct msm_dsi_pll *src_pll) 1810 { 1811 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1812 struct clk *byte_clk_provider, *pixel_clk_provider; 1813 int ret; 1814 1815 ret = msm_dsi_pll_get_clk_provider(src_pll, 1816 &byte_clk_provider, &pixel_clk_provider); 1817 if (ret) { 1818 pr_info("%s: can't get provider from pll, don't set parent\n", 1819 __func__); 1820 return 0; 1821 } 1822 1823 ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider); 1824 if (ret) { 1825 pr_err("%s: can't set parent to byte_clk_src. ret=%d\n", 1826 __func__, ret); 1827 goto exit; 1828 } 1829 1830 ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider); 1831 if (ret) { 1832 pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n", 1833 __func__, ret); 1834 goto exit; 1835 } 1836 1837 exit: 1838 return ret; 1839 } 1840 1841 int msm_dsi_host_enable(struct mipi_dsi_host *host) 1842 { 1843 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1844 1845 dsi_op_mode_config(msm_host, 1846 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true); 1847 1848 /* TODO: clock should be turned off for command mode, 1849 * and only turned on before MDP START. 1850 * This part of code should be enabled once mdp driver support it. 1851 */ 1852 /* if (msm_panel->mode == MSM_DSI_CMD_MODE) 1853 dsi_clk_ctrl(msm_host, 0); */ 1854 1855 return 0; 1856 } 1857 1858 int msm_dsi_host_disable(struct mipi_dsi_host *host) 1859 { 1860 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1861 1862 dsi_op_mode_config(msm_host, 1863 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false); 1864 1865 /* Since we have disabled INTF, the video engine won't stop so that 1866 * the cmd engine will be blocked. 1867 * Reset to disable video engine so that we can send off cmd. 1868 */ 1869 dsi_sw_reset(msm_host); 1870 1871 return 0; 1872 } 1873 1874 int msm_dsi_host_power_on(struct mipi_dsi_host *host) 1875 { 1876 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1877 u32 clk_pre = 0, clk_post = 0; 1878 int ret = 0; 1879 1880 mutex_lock(&msm_host->dev_mutex); 1881 if (msm_host->power_on) { 1882 DBG("dsi host already on"); 1883 goto unlock_ret; 1884 } 1885 1886 ret = dsi_calc_clk_rate(msm_host); 1887 if (ret) { 1888 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); 1889 goto unlock_ret; 1890 } 1891 1892 ret = dsi_host_regulator_enable(msm_host); 1893 if (ret) { 1894 pr_err("%s:Failed to enable vregs.ret=%d\n", 1895 __func__, ret); 1896 goto unlock_ret; 1897 } 1898 1899 ret = dsi_bus_clk_enable(msm_host); 1900 if (ret) { 1901 pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret); 1902 goto fail_disable_reg; 1903 } 1904 1905 dsi_phy_sw_reset(msm_host); 1906 ret = msm_dsi_manager_phy_enable(msm_host->id, 1907 msm_host->byte_clk_rate * 8, 1908 clk_get_rate(msm_host->esc_clk), 1909 &clk_pre, &clk_post); 1910 dsi_bus_clk_disable(msm_host); 1911 if (ret) { 1912 pr_err("%s: failed to enable phy, %d\n", __func__, ret); 1913 goto fail_disable_reg; 1914 } 1915 1916 ret = dsi_clk_ctrl(msm_host, 1); 1917 if (ret) { 1918 pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret); 1919 goto fail_disable_reg; 1920 } 1921 1922 dsi_timing_setup(msm_host); 1923 dsi_sw_reset(msm_host); 1924 dsi_ctrl_config(msm_host, true, clk_pre, clk_post); 1925 1926 if (msm_host->disp_en_gpio) 1927 gpiod_set_value(msm_host->disp_en_gpio, 1); 1928 1929 msm_host->power_on = true; 1930 mutex_unlock(&msm_host->dev_mutex); 1931 1932 return 0; 1933 1934 fail_disable_reg: 1935 dsi_host_regulator_disable(msm_host); 1936 unlock_ret: 1937 mutex_unlock(&msm_host->dev_mutex); 1938 return ret; 1939 } 1940 1941 int msm_dsi_host_power_off(struct mipi_dsi_host *host) 1942 { 1943 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1944 1945 mutex_lock(&msm_host->dev_mutex); 1946 if (!msm_host->power_on) { 1947 DBG("dsi host already off"); 1948 goto unlock_ret; 1949 } 1950 1951 dsi_ctrl_config(msm_host, false, 0, 0); 1952 1953 if (msm_host->disp_en_gpio) 1954 gpiod_set_value(msm_host->disp_en_gpio, 0); 1955 1956 msm_dsi_manager_phy_disable(msm_host->id); 1957 1958 dsi_clk_ctrl(msm_host, 0); 1959 1960 dsi_host_regulator_disable(msm_host); 1961 1962 DBG("-"); 1963 1964 msm_host->power_on = false; 1965 1966 unlock_ret: 1967 mutex_unlock(&msm_host->dev_mutex); 1968 return 0; 1969 } 1970 1971 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, 1972 struct drm_display_mode *mode) 1973 { 1974 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1975 1976 if (msm_host->mode) { 1977 drm_mode_destroy(msm_host->dev, msm_host->mode); 1978 msm_host->mode = NULL; 1979 } 1980 1981 msm_host->mode = drm_mode_duplicate(msm_host->dev, mode); 1982 if (IS_ERR(msm_host->mode)) { 1983 pr_err("%s: cannot duplicate mode\n", __func__); 1984 return PTR_ERR(msm_host->mode); 1985 } 1986 1987 return 0; 1988 } 1989 1990 struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, 1991 unsigned long *panel_flags) 1992 { 1993 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1994 struct drm_panel *panel; 1995 1996 panel = of_drm_find_panel(msm_host->panel_node); 1997 if (panel_flags) 1998 *panel_flags = msm_host->mode_flags; 1999 2000 return panel; 2001 } 2002 2003