1 /* 2 * Copyright (c) 2015, The Linux Foundation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 #include <linux/clk.h> 15 #include <linux/delay.h> 16 #include <linux/err.h> 17 #include <linux/gpio.h> 18 #include <linux/gpio/consumer.h> 19 #include <linux/interrupt.h> 20 #include <linux/of_device.h> 21 #include <linux/of_gpio.h> 22 #include <linux/of_irq.h> 23 #include <linux/pinctrl/consumer.h> 24 #include <linux/of_graph.h> 25 #include <linux/regulator/consumer.h> 26 #include <linux/spinlock.h> 27 #include <linux/mfd/syscon.h> 28 #include <linux/regmap.h> 29 #include <video/mipi_display.h> 30 31 #include "dsi.h" 32 #include "dsi.xml.h" 33 #include "sfpb.xml.h" 34 #include "dsi_cfg.h" 35 #include "msm_kms.h" 36 37 static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) 38 { 39 u32 ver; 40 41 if (!major || !minor) 42 return -EINVAL; 43 44 /* 45 * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0 46 * makes all other registers 4-byte shifted down. 47 * 48 * In order to identify between DSI6G(v3) and beyond, and DSIv2 and 49 * older, we read the DSI_VERSION register without any shift(offset 50 * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In 51 * the case of DSI6G, this has to be zero (the offset points to a 52 * scratch register which we never touch) 53 */ 54 55 ver = msm_readl(base + REG_DSI_VERSION); 56 if (ver) { 57 /* older dsi host, there is no register shift */ 58 ver = FIELD(ver, DSI_VERSION_MAJOR); 59 if (ver <= MSM_DSI_VER_MAJOR_V2) { 60 /* old versions */ 61 *major = ver; 62 *minor = 0; 63 return 0; 64 } else { 65 return -EINVAL; 66 } 67 } else { 68 /* 69 * newer host, offset 0 has 6G_HW_VERSION, the rest of the 70 * registers are shifted down, read DSI_VERSION again with 71 * the shifted offset 72 */ 73 ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION); 74 ver = FIELD(ver, DSI_VERSION_MAJOR); 75 if (ver == MSM_DSI_VER_MAJOR_6G) { 76 /* 6G version */ 77 *major = ver; 78 *minor = msm_readl(base + REG_DSI_6G_HW_VERSION); 79 return 0; 80 } else { 81 return -EINVAL; 82 } 83 } 84 } 85 86 #define DSI_ERR_STATE_ACK 0x0000 87 #define DSI_ERR_STATE_TIMEOUT 0x0001 88 #define DSI_ERR_STATE_DLN0_PHY 0x0002 89 #define DSI_ERR_STATE_FIFO 0x0004 90 #define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008 91 #define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010 92 #define DSI_ERR_STATE_PLL_UNLOCKED 0x0020 93 94 #define DSI_CLK_CTRL_ENABLE_CLKS \ 95 (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \ 96 DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \ 97 DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \ 98 DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK) 99 100 struct msm_dsi_host { 101 struct mipi_dsi_host base; 102 103 struct platform_device *pdev; 104 struct drm_device *dev; 105 106 int id; 107 108 void __iomem *ctrl_base; 109 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX]; 110 111 struct clk *bus_clks[DSI_BUS_CLK_MAX]; 112 113 struct clk *byte_clk; 114 struct clk *esc_clk; 115 struct clk *pixel_clk; 116 struct clk *byte_clk_src; 117 struct clk *pixel_clk_src; 118 struct clk *byte_intf_clk; 119 120 u32 byte_clk_rate; 121 u32 esc_clk_rate; 122 123 /* DSI v2 specific clocks */ 124 struct clk *src_clk; 125 struct clk *esc_clk_src; 126 struct clk *dsi_clk_src; 127 128 u32 src_clk_rate; 129 130 struct gpio_desc *disp_en_gpio; 131 struct gpio_desc *te_gpio; 132 133 const struct msm_dsi_cfg_handler *cfg_hnd; 134 135 struct completion dma_comp; 136 struct completion video_comp; 137 struct mutex dev_mutex; 138 struct mutex cmd_mutex; 139 spinlock_t intr_lock; /* Protect interrupt ctrl register */ 140 141 u32 err_work_state; 142 struct work_struct err_work; 143 struct work_struct hpd_work; 144 struct workqueue_struct *workqueue; 145 146 /* DSI 6G TX buffer*/ 147 struct drm_gem_object *tx_gem_obj; 148 149 /* DSI v2 TX buffer */ 150 void *tx_buf; 151 dma_addr_t tx_buf_paddr; 152 153 int tx_size; 154 155 u8 *rx_buf; 156 157 struct regmap *sfpb; 158 159 struct drm_display_mode *mode; 160 161 /* connected device info */ 162 struct device_node *device_node; 163 unsigned int channel; 164 unsigned int lanes; 165 enum mipi_dsi_pixel_format format; 166 unsigned long mode_flags; 167 168 /* lane data parsed via DT */ 169 int dlane_swap; 170 int num_data_lanes; 171 172 u32 dma_cmd_ctrl_restore; 173 174 bool registered; 175 bool power_on; 176 bool enabled; 177 int irq; 178 }; 179 180 static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt) 181 { 182 switch (fmt) { 183 case MIPI_DSI_FMT_RGB565: return 16; 184 case MIPI_DSI_FMT_RGB666_PACKED: return 18; 185 case MIPI_DSI_FMT_RGB666: 186 case MIPI_DSI_FMT_RGB888: 187 default: return 24; 188 } 189 } 190 191 static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg) 192 { 193 return msm_readl(msm_host->ctrl_base + reg); 194 } 195 static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data) 196 { 197 msm_writel(data, msm_host->ctrl_base + reg); 198 } 199 200 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host); 201 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host); 202 203 static const struct msm_dsi_cfg_handler *dsi_get_config( 204 struct msm_dsi_host *msm_host) 205 { 206 const struct msm_dsi_cfg_handler *cfg_hnd = NULL; 207 struct device *dev = &msm_host->pdev->dev; 208 struct regulator *gdsc_reg; 209 struct clk *ahb_clk; 210 int ret; 211 u32 major = 0, minor = 0; 212 213 gdsc_reg = regulator_get(dev, "gdsc"); 214 if (IS_ERR(gdsc_reg)) { 215 pr_err("%s: cannot get gdsc\n", __func__); 216 goto exit; 217 } 218 219 ahb_clk = msm_clk_get(msm_host->pdev, "iface"); 220 if (IS_ERR(ahb_clk)) { 221 pr_err("%s: cannot get interface clock\n", __func__); 222 goto put_gdsc; 223 } 224 225 pm_runtime_get_sync(dev); 226 227 ret = regulator_enable(gdsc_reg); 228 if (ret) { 229 pr_err("%s: unable to enable gdsc\n", __func__); 230 goto put_gdsc; 231 } 232 233 ret = clk_prepare_enable(ahb_clk); 234 if (ret) { 235 pr_err("%s: unable to enable ahb_clk\n", __func__); 236 goto disable_gdsc; 237 } 238 239 ret = dsi_get_version(msm_host->ctrl_base, &major, &minor); 240 if (ret) { 241 pr_err("%s: Invalid version\n", __func__); 242 goto disable_clks; 243 } 244 245 cfg_hnd = msm_dsi_cfg_get(major, minor); 246 247 DBG("%s: Version %x:%x\n", __func__, major, minor); 248 249 disable_clks: 250 clk_disable_unprepare(ahb_clk); 251 disable_gdsc: 252 regulator_disable(gdsc_reg); 253 pm_runtime_put_sync(dev); 254 put_gdsc: 255 regulator_put(gdsc_reg); 256 exit: 257 return cfg_hnd; 258 } 259 260 static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host) 261 { 262 return container_of(host, struct msm_dsi_host, base); 263 } 264 265 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host) 266 { 267 struct regulator_bulk_data *s = msm_host->supplies; 268 const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; 269 int num = msm_host->cfg_hnd->cfg->reg_cfg.num; 270 int i; 271 272 DBG(""); 273 for (i = num - 1; i >= 0; i--) 274 if (regs[i].disable_load >= 0) 275 regulator_set_load(s[i].consumer, 276 regs[i].disable_load); 277 278 regulator_bulk_disable(num, s); 279 } 280 281 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host) 282 { 283 struct regulator_bulk_data *s = msm_host->supplies; 284 const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; 285 int num = msm_host->cfg_hnd->cfg->reg_cfg.num; 286 int ret, i; 287 288 DBG(""); 289 for (i = 0; i < num; i++) { 290 if (regs[i].enable_load >= 0) { 291 ret = regulator_set_load(s[i].consumer, 292 regs[i].enable_load); 293 if (ret < 0) { 294 pr_err("regulator %d set op mode failed, %d\n", 295 i, ret); 296 goto fail; 297 } 298 } 299 } 300 301 ret = regulator_bulk_enable(num, s); 302 if (ret < 0) { 303 pr_err("regulator enable failed, %d\n", ret); 304 goto fail; 305 } 306 307 return 0; 308 309 fail: 310 for (i--; i >= 0; i--) 311 regulator_set_load(s[i].consumer, regs[i].disable_load); 312 return ret; 313 } 314 315 static int dsi_regulator_init(struct msm_dsi_host *msm_host) 316 { 317 struct regulator_bulk_data *s = msm_host->supplies; 318 const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; 319 int num = msm_host->cfg_hnd->cfg->reg_cfg.num; 320 int i, ret; 321 322 for (i = 0; i < num; i++) 323 s[i].supply = regs[i].name; 324 325 ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s); 326 if (ret < 0) { 327 pr_err("%s: failed to init regulator, ret=%d\n", 328 __func__, ret); 329 return ret; 330 } 331 332 return 0; 333 } 334 335 static int dsi_clk_init(struct msm_dsi_host *msm_host) 336 { 337 struct platform_device *pdev = msm_host->pdev; 338 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 339 const struct msm_dsi_config *cfg = cfg_hnd->cfg; 340 int i, ret = 0; 341 342 /* get bus clocks */ 343 for (i = 0; i < cfg->num_bus_clks; i++) { 344 msm_host->bus_clks[i] = msm_clk_get(pdev, 345 cfg->bus_clk_names[i]); 346 if (IS_ERR(msm_host->bus_clks[i])) { 347 ret = PTR_ERR(msm_host->bus_clks[i]); 348 pr_err("%s: Unable to get %s clock, ret = %d\n", 349 __func__, cfg->bus_clk_names[i], ret); 350 goto exit; 351 } 352 } 353 354 /* get link and source clocks */ 355 msm_host->byte_clk = msm_clk_get(pdev, "byte"); 356 if (IS_ERR(msm_host->byte_clk)) { 357 ret = PTR_ERR(msm_host->byte_clk); 358 pr_err("%s: can't find dsi_byte clock. ret=%d\n", 359 __func__, ret); 360 msm_host->byte_clk = NULL; 361 goto exit; 362 } 363 364 msm_host->pixel_clk = msm_clk_get(pdev, "pixel"); 365 if (IS_ERR(msm_host->pixel_clk)) { 366 ret = PTR_ERR(msm_host->pixel_clk); 367 pr_err("%s: can't find dsi_pixel clock. ret=%d\n", 368 __func__, ret); 369 msm_host->pixel_clk = NULL; 370 goto exit; 371 } 372 373 msm_host->esc_clk = msm_clk_get(pdev, "core"); 374 if (IS_ERR(msm_host->esc_clk)) { 375 ret = PTR_ERR(msm_host->esc_clk); 376 pr_err("%s: can't find dsi_esc clock. ret=%d\n", 377 __func__, ret); 378 msm_host->esc_clk = NULL; 379 goto exit; 380 } 381 382 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G && 383 cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_2_1) { 384 msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf"); 385 if (IS_ERR(msm_host->byte_intf_clk)) { 386 ret = PTR_ERR(msm_host->byte_intf_clk); 387 pr_err("%s: can't find byte_intf clock. ret=%d\n", 388 __func__, ret); 389 goto exit; 390 } 391 } else { 392 msm_host->byte_intf_clk = NULL; 393 } 394 395 msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk); 396 if (!msm_host->byte_clk_src) { 397 ret = -ENODEV; 398 pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret); 399 goto exit; 400 } 401 402 msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk); 403 if (!msm_host->pixel_clk_src) { 404 ret = -ENODEV; 405 pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret); 406 goto exit; 407 } 408 409 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) { 410 msm_host->src_clk = msm_clk_get(pdev, "src"); 411 if (IS_ERR(msm_host->src_clk)) { 412 ret = PTR_ERR(msm_host->src_clk); 413 pr_err("%s: can't find src clock. ret=%d\n", 414 __func__, ret); 415 msm_host->src_clk = NULL; 416 goto exit; 417 } 418 419 msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk); 420 if (!msm_host->esc_clk_src) { 421 ret = -ENODEV; 422 pr_err("%s: can't get esc clock parent. ret=%d\n", 423 __func__, ret); 424 goto exit; 425 } 426 427 msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk); 428 if (!msm_host->dsi_clk_src) { 429 ret = -ENODEV; 430 pr_err("%s: can't get src clock parent. ret=%d\n", 431 __func__, ret); 432 } 433 } 434 exit: 435 return ret; 436 } 437 438 static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host) 439 { 440 const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg; 441 int i, ret; 442 443 DBG("id=%d", msm_host->id); 444 445 for (i = 0; i < cfg->num_bus_clks; i++) { 446 ret = clk_prepare_enable(msm_host->bus_clks[i]); 447 if (ret) { 448 pr_err("%s: failed to enable bus clock %d ret %d\n", 449 __func__, i, ret); 450 goto err; 451 } 452 } 453 454 return 0; 455 err: 456 for (; i > 0; i--) 457 clk_disable_unprepare(msm_host->bus_clks[i]); 458 459 return ret; 460 } 461 462 static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host) 463 { 464 const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg; 465 int i; 466 467 DBG(""); 468 469 for (i = cfg->num_bus_clks - 1; i >= 0; i--) 470 clk_disable_unprepare(msm_host->bus_clks[i]); 471 } 472 473 int msm_dsi_runtime_suspend(struct device *dev) 474 { 475 struct platform_device *pdev = to_platform_device(dev); 476 struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); 477 struct mipi_dsi_host *host = msm_dsi->host; 478 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 479 480 if (!msm_host->cfg_hnd) 481 return 0; 482 483 dsi_bus_clk_disable(msm_host); 484 485 return 0; 486 } 487 488 int msm_dsi_runtime_resume(struct device *dev) 489 { 490 struct platform_device *pdev = to_platform_device(dev); 491 struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); 492 struct mipi_dsi_host *host = msm_dsi->host; 493 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 494 495 if (!msm_host->cfg_hnd) 496 return 0; 497 498 return dsi_bus_clk_enable(msm_host); 499 } 500 501 static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) 502 { 503 int ret; 504 505 DBG("Set clk rates: pclk=%d, byteclk=%d", 506 msm_host->mode->clock, msm_host->byte_clk_rate); 507 508 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate); 509 if (ret) { 510 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret); 511 goto error; 512 } 513 514 ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000); 515 if (ret) { 516 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); 517 goto error; 518 } 519 520 if (msm_host->byte_intf_clk) { 521 ret = clk_set_rate(msm_host->byte_intf_clk, 522 msm_host->byte_clk_rate / 2); 523 if (ret) { 524 pr_err("%s: Failed to set rate byte intf clk, %d\n", 525 __func__, ret); 526 goto error; 527 } 528 } 529 530 ret = clk_prepare_enable(msm_host->esc_clk); 531 if (ret) { 532 pr_err("%s: Failed to enable dsi esc clk\n", __func__); 533 goto error; 534 } 535 536 ret = clk_prepare_enable(msm_host->byte_clk); 537 if (ret) { 538 pr_err("%s: Failed to enable dsi byte clk\n", __func__); 539 goto byte_clk_err; 540 } 541 542 ret = clk_prepare_enable(msm_host->pixel_clk); 543 if (ret) { 544 pr_err("%s: Failed to enable dsi pixel clk\n", __func__); 545 goto pixel_clk_err; 546 } 547 548 if (msm_host->byte_intf_clk) { 549 ret = clk_prepare_enable(msm_host->byte_intf_clk); 550 if (ret) { 551 pr_err("%s: Failed to enable byte intf clk\n", 552 __func__); 553 goto byte_intf_clk_err; 554 } 555 } 556 557 return 0; 558 559 byte_intf_clk_err: 560 clk_disable_unprepare(msm_host->pixel_clk); 561 pixel_clk_err: 562 clk_disable_unprepare(msm_host->byte_clk); 563 byte_clk_err: 564 clk_disable_unprepare(msm_host->esc_clk); 565 error: 566 return ret; 567 } 568 569 static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host) 570 { 571 int ret; 572 573 DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d", 574 msm_host->mode->clock, msm_host->byte_clk_rate, 575 msm_host->esc_clk_rate, msm_host->src_clk_rate); 576 577 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate); 578 if (ret) { 579 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret); 580 goto error; 581 } 582 583 ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate); 584 if (ret) { 585 pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret); 586 goto error; 587 } 588 589 ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate); 590 if (ret) { 591 pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret); 592 goto error; 593 } 594 595 ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000); 596 if (ret) { 597 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); 598 goto error; 599 } 600 601 ret = clk_prepare_enable(msm_host->byte_clk); 602 if (ret) { 603 pr_err("%s: Failed to enable dsi byte clk\n", __func__); 604 goto error; 605 } 606 607 ret = clk_prepare_enable(msm_host->esc_clk); 608 if (ret) { 609 pr_err("%s: Failed to enable dsi esc clk\n", __func__); 610 goto esc_clk_err; 611 } 612 613 ret = clk_prepare_enable(msm_host->src_clk); 614 if (ret) { 615 pr_err("%s: Failed to enable dsi src clk\n", __func__); 616 goto src_clk_err; 617 } 618 619 ret = clk_prepare_enable(msm_host->pixel_clk); 620 if (ret) { 621 pr_err("%s: Failed to enable dsi pixel clk\n", __func__); 622 goto pixel_clk_err; 623 } 624 625 return 0; 626 627 pixel_clk_err: 628 clk_disable_unprepare(msm_host->src_clk); 629 src_clk_err: 630 clk_disable_unprepare(msm_host->esc_clk); 631 esc_clk_err: 632 clk_disable_unprepare(msm_host->byte_clk); 633 error: 634 return ret; 635 } 636 637 static int dsi_link_clk_enable(struct msm_dsi_host *msm_host) 638 { 639 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 640 641 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) 642 return dsi_link_clk_enable_6g(msm_host); 643 else 644 return dsi_link_clk_enable_v2(msm_host); 645 } 646 647 static void dsi_link_clk_disable(struct msm_dsi_host *msm_host) 648 { 649 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 650 651 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { 652 clk_disable_unprepare(msm_host->esc_clk); 653 clk_disable_unprepare(msm_host->pixel_clk); 654 if (msm_host->byte_intf_clk) 655 clk_disable_unprepare(msm_host->byte_intf_clk); 656 clk_disable_unprepare(msm_host->byte_clk); 657 } else { 658 clk_disable_unprepare(msm_host->pixel_clk); 659 clk_disable_unprepare(msm_host->src_clk); 660 clk_disable_unprepare(msm_host->esc_clk); 661 clk_disable_unprepare(msm_host->byte_clk); 662 } 663 } 664 665 static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host) 666 { 667 struct drm_display_mode *mode = msm_host->mode; 668 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 669 u8 lanes = msm_host->lanes; 670 u32 bpp = dsi_get_bpp(msm_host->format); 671 u32 pclk_rate; 672 673 if (!mode) { 674 pr_err("%s: mode not set\n", __func__); 675 return -EINVAL; 676 } 677 678 pclk_rate = mode->clock * 1000; 679 if (lanes > 0) { 680 msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes); 681 } else { 682 pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__); 683 msm_host->byte_clk_rate = (pclk_rate * bpp) / 8; 684 } 685 686 DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate); 687 688 msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk); 689 690 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) { 691 unsigned int esc_mhz, esc_div; 692 unsigned long byte_mhz; 693 694 msm_host->src_clk_rate = (pclk_rate * bpp) / 8; 695 696 /* 697 * esc clock is byte clock followed by a 4 bit divider, 698 * we need to find an escape clock frequency within the 699 * mipi DSI spec range within the maximum divider limit 700 * We iterate here between an escape clock frequencey 701 * between 20 Mhz to 5 Mhz and pick up the first one 702 * that can be supported by our divider 703 */ 704 705 byte_mhz = msm_host->byte_clk_rate / 1000000; 706 707 for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) { 708 esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz); 709 710 /* 711 * TODO: Ideally, we shouldn't know what sort of divider 712 * is available in mmss_cc, we're just assuming that 713 * it'll always be a 4 bit divider. Need to come up with 714 * a better way here. 715 */ 716 if (esc_div >= 1 && esc_div <= 16) 717 break; 718 } 719 720 if (esc_mhz < 5) 721 return -EINVAL; 722 723 msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div; 724 725 DBG("esc=%d, src=%d", msm_host->esc_clk_rate, 726 msm_host->src_clk_rate); 727 } 728 729 return 0; 730 } 731 732 static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable) 733 { 734 u32 intr; 735 unsigned long flags; 736 737 spin_lock_irqsave(&msm_host->intr_lock, flags); 738 intr = dsi_read(msm_host, REG_DSI_INTR_CTRL); 739 740 if (enable) 741 intr |= mask; 742 else 743 intr &= ~mask; 744 745 DBG("intr=%x enable=%d", intr, enable); 746 747 dsi_write(msm_host, REG_DSI_INTR_CTRL, intr); 748 spin_unlock_irqrestore(&msm_host->intr_lock, flags); 749 } 750 751 static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags) 752 { 753 if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST) 754 return BURST_MODE; 755 else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) 756 return NON_BURST_SYNCH_PULSE; 757 758 return NON_BURST_SYNCH_EVENT; 759 } 760 761 static inline enum dsi_vid_dst_format dsi_get_vid_fmt( 762 const enum mipi_dsi_pixel_format mipi_fmt) 763 { 764 switch (mipi_fmt) { 765 case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888; 766 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE; 767 case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666; 768 case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565; 769 default: return VID_DST_FORMAT_RGB888; 770 } 771 } 772 773 static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( 774 const enum mipi_dsi_pixel_format mipi_fmt) 775 { 776 switch (mipi_fmt) { 777 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; 778 case MIPI_DSI_FMT_RGB666_PACKED: 779 case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666; 780 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; 781 default: return CMD_DST_FORMAT_RGB888; 782 } 783 } 784 785 static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, 786 struct msm_dsi_phy_shared_timings *phy_shared_timings) 787 { 788 u32 flags = msm_host->mode_flags; 789 enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; 790 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 791 u32 data = 0; 792 793 if (!enable) { 794 dsi_write(msm_host, REG_DSI_CTRL, 0); 795 return; 796 } 797 798 if (flags & MIPI_DSI_MODE_VIDEO) { 799 if (flags & MIPI_DSI_MODE_VIDEO_HSE) 800 data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE; 801 if (flags & MIPI_DSI_MODE_VIDEO_HFP) 802 data |= DSI_VID_CFG0_HFP_POWER_STOP; 803 if (flags & MIPI_DSI_MODE_VIDEO_HBP) 804 data |= DSI_VID_CFG0_HBP_POWER_STOP; 805 if (flags & MIPI_DSI_MODE_VIDEO_HSA) 806 data |= DSI_VID_CFG0_HSA_POWER_STOP; 807 /* Always set low power stop mode for BLLP 808 * to let command engine send packets 809 */ 810 data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP | 811 DSI_VID_CFG0_BLLP_POWER_STOP; 812 data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags)); 813 data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt)); 814 data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel); 815 dsi_write(msm_host, REG_DSI_VID_CFG0, data); 816 817 /* Do not swap RGB colors */ 818 data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB); 819 dsi_write(msm_host, REG_DSI_VID_CFG1, 0); 820 } else { 821 /* Do not swap RGB colors */ 822 data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB); 823 data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt)); 824 dsi_write(msm_host, REG_DSI_CMD_CFG0, data); 825 826 data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) | 827 DSI_CMD_CFG1_WR_MEM_CONTINUE( 828 MIPI_DCS_WRITE_MEMORY_CONTINUE); 829 /* Always insert DCS command */ 830 data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND; 831 dsi_write(msm_host, REG_DSI_CMD_CFG1, data); 832 } 833 834 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, 835 DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER | 836 DSI_CMD_DMA_CTRL_LOW_POWER); 837 838 data = 0; 839 /* Always assume dedicated TE pin */ 840 data |= DSI_TRIG_CTRL_TE; 841 data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE); 842 data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW); 843 data |= DSI_TRIG_CTRL_STREAM(msm_host->channel); 844 if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && 845 (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2)) 846 data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; 847 dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); 848 849 data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) | 850 DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre); 851 dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data); 852 853 if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && 854 (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) && 855 phy_shared_timings->clk_pre_inc_by_2) 856 dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND, 857 DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK); 858 859 data = 0; 860 if (!(flags & MIPI_DSI_MODE_EOT_PACKET)) 861 data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND; 862 dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data); 863 864 /* allow only ack-err-status to generate interrupt */ 865 dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0); 866 867 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1); 868 869 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); 870 871 data = DSI_CTRL_CLK_EN; 872 873 DBG("lane number=%d", msm_host->lanes); 874 data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0); 875 876 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, 877 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap)); 878 879 if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) 880 dsi_write(msm_host, REG_DSI_LANE_CTRL, 881 DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST); 882 883 data |= DSI_CTRL_ENABLE; 884 885 dsi_write(msm_host, REG_DSI_CTRL, data); 886 } 887 888 static void dsi_timing_setup(struct msm_dsi_host *msm_host) 889 { 890 struct drm_display_mode *mode = msm_host->mode; 891 u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */ 892 u32 h_total = mode->htotal; 893 u32 v_total = mode->vtotal; 894 u32 hs_end = mode->hsync_end - mode->hsync_start; 895 u32 vs_end = mode->vsync_end - mode->vsync_start; 896 u32 ha_start = h_total - mode->hsync_start; 897 u32 ha_end = ha_start + mode->hdisplay; 898 u32 va_start = v_total - mode->vsync_start; 899 u32 va_end = va_start + mode->vdisplay; 900 u32 wc; 901 902 DBG(""); 903 904 if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) { 905 dsi_write(msm_host, REG_DSI_ACTIVE_H, 906 DSI_ACTIVE_H_START(ha_start) | 907 DSI_ACTIVE_H_END(ha_end)); 908 dsi_write(msm_host, REG_DSI_ACTIVE_V, 909 DSI_ACTIVE_V_START(va_start) | 910 DSI_ACTIVE_V_END(va_end)); 911 dsi_write(msm_host, REG_DSI_TOTAL, 912 DSI_TOTAL_H_TOTAL(h_total - 1) | 913 DSI_TOTAL_V_TOTAL(v_total - 1)); 914 915 dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC, 916 DSI_ACTIVE_HSYNC_START(hs_start) | 917 DSI_ACTIVE_HSYNC_END(hs_end)); 918 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0); 919 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS, 920 DSI_ACTIVE_VSYNC_VPOS_START(vs_start) | 921 DSI_ACTIVE_VSYNC_VPOS_END(vs_end)); 922 } else { /* command mode */ 923 /* image data and 1 byte write_memory_start cmd */ 924 wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1; 925 926 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL, 927 DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) | 928 DSI_CMD_MDP_STREAM_CTRL_VIRTUAL_CHANNEL( 929 msm_host->channel) | 930 DSI_CMD_MDP_STREAM_CTRL_DATA_TYPE( 931 MIPI_DSI_DCS_LONG_WRITE)); 932 933 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL, 934 DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) | 935 DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay)); 936 } 937 } 938 939 static void dsi_sw_reset(struct msm_dsi_host *msm_host) 940 { 941 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); 942 wmb(); /* clocks need to be enabled before reset */ 943 944 dsi_write(msm_host, REG_DSI_RESET, 1); 945 wmb(); /* make sure reset happen */ 946 dsi_write(msm_host, REG_DSI_RESET, 0); 947 } 948 949 static void dsi_op_mode_config(struct msm_dsi_host *msm_host, 950 bool video_mode, bool enable) 951 { 952 u32 dsi_ctrl; 953 954 dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL); 955 956 if (!enable) { 957 dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN | 958 DSI_CTRL_CMD_MODE_EN); 959 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE | 960 DSI_IRQ_MASK_VIDEO_DONE, 0); 961 } else { 962 if (video_mode) { 963 dsi_ctrl |= DSI_CTRL_VID_MODE_EN; 964 } else { /* command mode */ 965 dsi_ctrl |= DSI_CTRL_CMD_MODE_EN; 966 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1); 967 } 968 dsi_ctrl |= DSI_CTRL_ENABLE; 969 } 970 971 dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl); 972 } 973 974 static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host) 975 { 976 u32 data; 977 978 data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL); 979 980 if (mode == 0) 981 data &= ~DSI_CMD_DMA_CTRL_LOW_POWER; 982 else 983 data |= DSI_CMD_DMA_CTRL_LOW_POWER; 984 985 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data); 986 } 987 988 static void dsi_wait4video_done(struct msm_dsi_host *msm_host) 989 { 990 u32 ret = 0; 991 struct device *dev = &msm_host->pdev->dev; 992 993 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1); 994 995 reinit_completion(&msm_host->video_comp); 996 997 ret = wait_for_completion_timeout(&msm_host->video_comp, 998 msecs_to_jiffies(70)); 999 1000 if (ret <= 0) 1001 dev_err(dev, "wait for video done timed out\n"); 1002 1003 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); 1004 } 1005 1006 static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host) 1007 { 1008 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) 1009 return; 1010 1011 if (msm_host->power_on && msm_host->enabled) { 1012 dsi_wait4video_done(msm_host); 1013 /* delay 4 ms to skip BLLP */ 1014 usleep_range(2000, 4000); 1015 } 1016 } 1017 1018 /* dsi_cmd */ 1019 static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size) 1020 { 1021 struct drm_device *dev = msm_host->dev; 1022 struct msm_drm_private *priv = dev->dev_private; 1023 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 1024 int ret; 1025 uint64_t iova; 1026 1027 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { 1028 msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED); 1029 if (IS_ERR(msm_host->tx_gem_obj)) { 1030 ret = PTR_ERR(msm_host->tx_gem_obj); 1031 pr_err("%s: failed to allocate gem, %d\n", 1032 __func__, ret); 1033 msm_host->tx_gem_obj = NULL; 1034 return ret; 1035 } 1036 1037 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 1038 priv->kms->aspace, &iova); 1039 if (ret) { 1040 pr_err("%s: failed to get iova, %d\n", __func__, ret); 1041 return ret; 1042 } 1043 1044 if (iova & 0x07) { 1045 pr_err("%s: buf NOT 8 bytes aligned\n", __func__); 1046 return -EINVAL; 1047 } 1048 1049 msm_host->tx_size = msm_host->tx_gem_obj->size; 1050 } else { 1051 msm_host->tx_buf = dma_alloc_coherent(dev->dev, size, 1052 &msm_host->tx_buf_paddr, GFP_KERNEL); 1053 if (!msm_host->tx_buf) { 1054 ret = -ENOMEM; 1055 pr_err("%s: failed to allocate tx buf, %d\n", 1056 __func__, ret); 1057 return ret; 1058 } 1059 1060 msm_host->tx_size = size; 1061 } 1062 1063 return 0; 1064 } 1065 1066 static void dsi_tx_buf_free(struct msm_dsi_host *msm_host) 1067 { 1068 struct drm_device *dev = msm_host->dev; 1069 struct msm_drm_private *priv; 1070 1071 /* 1072 * This is possible if we're tearing down before we've had a chance to 1073 * fully initialize. A very real possibility if our probe is deferred, 1074 * in which case we'll hit msm_dsi_host_destroy() without having run 1075 * through the dsi_tx_buf_alloc(). 1076 */ 1077 if (!dev) 1078 return; 1079 1080 priv = dev->dev_private; 1081 if (msm_host->tx_gem_obj) { 1082 msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace); 1083 drm_gem_object_put_unlocked(msm_host->tx_gem_obj); 1084 msm_host->tx_gem_obj = NULL; 1085 } 1086 1087 if (msm_host->tx_buf) 1088 dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf, 1089 msm_host->tx_buf_paddr); 1090 } 1091 1092 /* 1093 * prepare cmd buffer to be txed 1094 */ 1095 static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host, 1096 const struct mipi_dsi_msg *msg) 1097 { 1098 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 1099 struct mipi_dsi_packet packet; 1100 int len; 1101 int ret; 1102 u8 *data; 1103 1104 ret = mipi_dsi_create_packet(&packet, msg); 1105 if (ret) { 1106 pr_err("%s: create packet failed, %d\n", __func__, ret); 1107 return ret; 1108 } 1109 len = (packet.size + 3) & (~0x3); 1110 1111 if (len > msm_host->tx_size) { 1112 pr_err("%s: packet size is too big\n", __func__); 1113 return -EINVAL; 1114 } 1115 1116 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { 1117 data = msm_gem_get_vaddr(msm_host->tx_gem_obj); 1118 if (IS_ERR(data)) { 1119 ret = PTR_ERR(data); 1120 pr_err("%s: get vaddr failed, %d\n", __func__, ret); 1121 return ret; 1122 } 1123 } else { 1124 data = msm_host->tx_buf; 1125 } 1126 1127 /* MSM specific command format in memory */ 1128 data[0] = packet.header[1]; 1129 data[1] = packet.header[2]; 1130 data[2] = packet.header[0]; 1131 data[3] = BIT(7); /* Last packet */ 1132 if (mipi_dsi_packet_format_is_long(msg->type)) 1133 data[3] |= BIT(6); 1134 if (msg->rx_buf && msg->rx_len) 1135 data[3] |= BIT(5); 1136 1137 /* Long packet */ 1138 if (packet.payload && packet.payload_length) 1139 memcpy(data + 4, packet.payload, packet.payload_length); 1140 1141 /* Append 0xff to the end */ 1142 if (packet.size < len) 1143 memset(data + packet.size, 0xff, len - packet.size); 1144 1145 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) 1146 msm_gem_put_vaddr(msm_host->tx_gem_obj); 1147 1148 return len; 1149 } 1150 1151 /* 1152 * dsi_short_read1_resp: 1 parameter 1153 */ 1154 static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg) 1155 { 1156 u8 *data = msg->rx_buf; 1157 if (data && (msg->rx_len >= 1)) { 1158 *data = buf[1]; /* strip out dcs type */ 1159 return 1; 1160 } else { 1161 pr_err("%s: read data does not match with rx_buf len %zu\n", 1162 __func__, msg->rx_len); 1163 return -EINVAL; 1164 } 1165 } 1166 1167 /* 1168 * dsi_short_read2_resp: 2 parameter 1169 */ 1170 static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg) 1171 { 1172 u8 *data = msg->rx_buf; 1173 if (data && (msg->rx_len >= 2)) { 1174 data[0] = buf[1]; /* strip out dcs type */ 1175 data[1] = buf[2]; 1176 return 2; 1177 } else { 1178 pr_err("%s: read data does not match with rx_buf len %zu\n", 1179 __func__, msg->rx_len); 1180 return -EINVAL; 1181 } 1182 } 1183 1184 static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg) 1185 { 1186 /* strip out 4 byte dcs header */ 1187 if (msg->rx_buf && msg->rx_len) 1188 memcpy(msg->rx_buf, buf + 4, msg->rx_len); 1189 1190 return msg->rx_len; 1191 } 1192 1193 static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) 1194 { 1195 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 1196 struct drm_device *dev = msm_host->dev; 1197 struct msm_drm_private *priv = dev->dev_private; 1198 int ret; 1199 uint64_t dma_base; 1200 bool triggered; 1201 1202 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { 1203 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 1204 priv->kms->aspace, &dma_base); 1205 if (ret) { 1206 pr_err("%s: failed to get iova: %d\n", __func__, ret); 1207 return ret; 1208 } 1209 } else { 1210 dma_base = msm_host->tx_buf_paddr; 1211 } 1212 1213 reinit_completion(&msm_host->dma_comp); 1214 1215 dsi_wait4video_eng_busy(msm_host); 1216 1217 triggered = msm_dsi_manager_cmd_xfer_trigger( 1218 msm_host->id, dma_base, len); 1219 if (triggered) { 1220 ret = wait_for_completion_timeout(&msm_host->dma_comp, 1221 msecs_to_jiffies(200)); 1222 DBG("ret=%d", ret); 1223 if (ret == 0) 1224 ret = -ETIMEDOUT; 1225 else 1226 ret = len; 1227 } else 1228 ret = len; 1229 1230 return ret; 1231 } 1232 1233 static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host, 1234 u8 *buf, int rx_byte, int pkt_size) 1235 { 1236 u32 *lp, *temp, data; 1237 int i, j = 0, cnt; 1238 u32 read_cnt; 1239 u8 reg[16]; 1240 int repeated_bytes = 0; 1241 int buf_offset = buf - msm_host->rx_buf; 1242 1243 lp = (u32 *)buf; 1244 temp = (u32 *)reg; 1245 cnt = (rx_byte + 3) >> 2; 1246 if (cnt > 4) 1247 cnt = 4; /* 4 x 32 bits registers only */ 1248 1249 if (rx_byte == 4) 1250 read_cnt = 4; 1251 else 1252 read_cnt = pkt_size + 6; 1253 1254 /* 1255 * In case of multiple reads from the panel, after the first read, there 1256 * is possibility that there are some bytes in the payload repeating in 1257 * the RDBK_DATA registers. Since we read all the parameters from the 1258 * panel right from the first byte for every pass. We need to skip the 1259 * repeating bytes and then append the new parameters to the rx buffer. 1260 */ 1261 if (read_cnt > 16) { 1262 int bytes_shifted; 1263 /* Any data more than 16 bytes will be shifted out. 1264 * The temp read buffer should already contain these bytes. 1265 * The remaining bytes in read buffer are the repeated bytes. 1266 */ 1267 bytes_shifted = read_cnt - 16; 1268 repeated_bytes = buf_offset - bytes_shifted; 1269 } 1270 1271 for (i = cnt - 1; i >= 0; i--) { 1272 data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i)); 1273 *temp++ = ntohl(data); /* to host byte order */ 1274 DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data)); 1275 } 1276 1277 for (i = repeated_bytes; i < 16; i++) 1278 buf[j++] = reg[i]; 1279 1280 return j; 1281 } 1282 1283 static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host, 1284 const struct mipi_dsi_msg *msg) 1285 { 1286 int len, ret; 1287 int bllp_len = msm_host->mode->hdisplay * 1288 dsi_get_bpp(msm_host->format) / 8; 1289 1290 len = dsi_cmd_dma_add(msm_host, msg); 1291 if (!len) { 1292 pr_err("%s: failed to add cmd type = 0x%x\n", 1293 __func__, msg->type); 1294 return -EINVAL; 1295 } 1296 1297 /* for video mode, do not send cmds more than 1298 * one pixel line, since it only transmit it 1299 * during BLLP. 1300 */ 1301 /* TODO: if the command is sent in LP mode, the bit rate is only 1302 * half of esc clk rate. In this case, if the video is already 1303 * actively streaming, we need to check more carefully if the 1304 * command can be fit into one BLLP. 1305 */ 1306 if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) { 1307 pr_err("%s: cmd cannot fit into BLLP period, len=%d\n", 1308 __func__, len); 1309 return -EINVAL; 1310 } 1311 1312 ret = dsi_cmd_dma_tx(msm_host, len); 1313 if (ret < len) { 1314 pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d\n", 1315 __func__, msg->type, (*(u8 *)(msg->tx_buf)), len); 1316 return -ECOMM; 1317 } 1318 1319 return len; 1320 } 1321 1322 static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host) 1323 { 1324 u32 data0, data1; 1325 1326 data0 = dsi_read(msm_host, REG_DSI_CTRL); 1327 data1 = data0; 1328 data1 &= ~DSI_CTRL_ENABLE; 1329 dsi_write(msm_host, REG_DSI_CTRL, data1); 1330 /* 1331 * dsi controller need to be disabled before 1332 * clocks turned on 1333 */ 1334 wmb(); 1335 1336 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); 1337 wmb(); /* make sure clocks enabled */ 1338 1339 /* dsi controller can only be reset while clocks are running */ 1340 dsi_write(msm_host, REG_DSI_RESET, 1); 1341 wmb(); /* make sure reset happen */ 1342 dsi_write(msm_host, REG_DSI_RESET, 0); 1343 wmb(); /* controller out of reset */ 1344 dsi_write(msm_host, REG_DSI_CTRL, data0); 1345 wmb(); /* make sure dsi controller enabled again */ 1346 } 1347 1348 static void dsi_hpd_worker(struct work_struct *work) 1349 { 1350 struct msm_dsi_host *msm_host = 1351 container_of(work, struct msm_dsi_host, hpd_work); 1352 1353 drm_helper_hpd_irq_event(msm_host->dev); 1354 } 1355 1356 static void dsi_err_worker(struct work_struct *work) 1357 { 1358 struct msm_dsi_host *msm_host = 1359 container_of(work, struct msm_dsi_host, err_work); 1360 u32 status = msm_host->err_work_state; 1361 1362 pr_err_ratelimited("%s: status=%x\n", __func__, status); 1363 if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW) 1364 dsi_sw_reset_restore(msm_host); 1365 1366 /* It is safe to clear here because error irq is disabled. */ 1367 msm_host->err_work_state = 0; 1368 1369 /* enable dsi error interrupt */ 1370 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1); 1371 } 1372 1373 static void dsi_ack_err_status(struct msm_dsi_host *msm_host) 1374 { 1375 u32 status; 1376 1377 status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS); 1378 1379 if (status) { 1380 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status); 1381 /* Writing of an extra 0 needed to clear error bits */ 1382 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0); 1383 msm_host->err_work_state |= DSI_ERR_STATE_ACK; 1384 } 1385 } 1386 1387 static void dsi_timeout_status(struct msm_dsi_host *msm_host) 1388 { 1389 u32 status; 1390 1391 status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS); 1392 1393 if (status) { 1394 dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status); 1395 msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT; 1396 } 1397 } 1398 1399 static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host) 1400 { 1401 u32 status; 1402 1403 status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR); 1404 1405 if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC | 1406 DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC | 1407 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL | 1408 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 | 1409 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) { 1410 dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status); 1411 msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY; 1412 } 1413 } 1414 1415 static void dsi_fifo_status(struct msm_dsi_host *msm_host) 1416 { 1417 u32 status; 1418 1419 status = dsi_read(msm_host, REG_DSI_FIFO_STATUS); 1420 1421 /* fifo underflow, overflow */ 1422 if (status) { 1423 dsi_write(msm_host, REG_DSI_FIFO_STATUS, status); 1424 msm_host->err_work_state |= DSI_ERR_STATE_FIFO; 1425 if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW) 1426 msm_host->err_work_state |= 1427 DSI_ERR_STATE_MDP_FIFO_UNDERFLOW; 1428 } 1429 } 1430 1431 static void dsi_status(struct msm_dsi_host *msm_host) 1432 { 1433 u32 status; 1434 1435 status = dsi_read(msm_host, REG_DSI_STATUS0); 1436 1437 if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) { 1438 dsi_write(msm_host, REG_DSI_STATUS0, status); 1439 msm_host->err_work_state |= 1440 DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION; 1441 } 1442 } 1443 1444 static void dsi_clk_status(struct msm_dsi_host *msm_host) 1445 { 1446 u32 status; 1447 1448 status = dsi_read(msm_host, REG_DSI_CLK_STATUS); 1449 1450 if (status & DSI_CLK_STATUS_PLL_UNLOCKED) { 1451 dsi_write(msm_host, REG_DSI_CLK_STATUS, status); 1452 msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED; 1453 } 1454 } 1455 1456 static void dsi_error(struct msm_dsi_host *msm_host) 1457 { 1458 /* disable dsi error interrupt */ 1459 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0); 1460 1461 dsi_clk_status(msm_host); 1462 dsi_fifo_status(msm_host); 1463 dsi_ack_err_status(msm_host); 1464 dsi_timeout_status(msm_host); 1465 dsi_status(msm_host); 1466 dsi_dln0_phy_err(msm_host); 1467 1468 queue_work(msm_host->workqueue, &msm_host->err_work); 1469 } 1470 1471 static irqreturn_t dsi_host_irq(int irq, void *ptr) 1472 { 1473 struct msm_dsi_host *msm_host = ptr; 1474 u32 isr; 1475 unsigned long flags; 1476 1477 if (!msm_host->ctrl_base) 1478 return IRQ_HANDLED; 1479 1480 spin_lock_irqsave(&msm_host->intr_lock, flags); 1481 isr = dsi_read(msm_host, REG_DSI_INTR_CTRL); 1482 dsi_write(msm_host, REG_DSI_INTR_CTRL, isr); 1483 spin_unlock_irqrestore(&msm_host->intr_lock, flags); 1484 1485 DBG("isr=0x%x, id=%d", isr, msm_host->id); 1486 1487 if (isr & DSI_IRQ_ERROR) 1488 dsi_error(msm_host); 1489 1490 if (isr & DSI_IRQ_VIDEO_DONE) 1491 complete(&msm_host->video_comp); 1492 1493 if (isr & DSI_IRQ_CMD_DMA_DONE) 1494 complete(&msm_host->dma_comp); 1495 1496 return IRQ_HANDLED; 1497 } 1498 1499 static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host, 1500 struct device *panel_device) 1501 { 1502 msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device, 1503 "disp-enable", 1504 GPIOD_OUT_LOW); 1505 if (IS_ERR(msm_host->disp_en_gpio)) { 1506 DBG("cannot get disp-enable-gpios %ld", 1507 PTR_ERR(msm_host->disp_en_gpio)); 1508 return PTR_ERR(msm_host->disp_en_gpio); 1509 } 1510 1511 msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te", 1512 GPIOD_IN); 1513 if (IS_ERR(msm_host->te_gpio)) { 1514 DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio)); 1515 return PTR_ERR(msm_host->te_gpio); 1516 } 1517 1518 return 0; 1519 } 1520 1521 static int dsi_host_attach(struct mipi_dsi_host *host, 1522 struct mipi_dsi_device *dsi) 1523 { 1524 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1525 int ret; 1526 1527 if (dsi->lanes > msm_host->num_data_lanes) 1528 return -EINVAL; 1529 1530 msm_host->channel = dsi->channel; 1531 msm_host->lanes = dsi->lanes; 1532 msm_host->format = dsi->format; 1533 msm_host->mode_flags = dsi->mode_flags; 1534 1535 msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags); 1536 1537 /* Some gpios defined in panel DT need to be controlled by host */ 1538 ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); 1539 if (ret) 1540 return ret; 1541 1542 DBG("id=%d", msm_host->id); 1543 if (msm_host->dev) 1544 queue_work(msm_host->workqueue, &msm_host->hpd_work); 1545 1546 return 0; 1547 } 1548 1549 static int dsi_host_detach(struct mipi_dsi_host *host, 1550 struct mipi_dsi_device *dsi) 1551 { 1552 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1553 1554 msm_host->device_node = NULL; 1555 1556 DBG("id=%d", msm_host->id); 1557 if (msm_host->dev) 1558 queue_work(msm_host->workqueue, &msm_host->hpd_work); 1559 1560 return 0; 1561 } 1562 1563 static ssize_t dsi_host_transfer(struct mipi_dsi_host *host, 1564 const struct mipi_dsi_msg *msg) 1565 { 1566 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1567 int ret; 1568 1569 if (!msg || !msm_host->power_on) 1570 return -EINVAL; 1571 1572 mutex_lock(&msm_host->cmd_mutex); 1573 ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg); 1574 mutex_unlock(&msm_host->cmd_mutex); 1575 1576 return ret; 1577 } 1578 1579 static struct mipi_dsi_host_ops dsi_host_ops = { 1580 .attach = dsi_host_attach, 1581 .detach = dsi_host_detach, 1582 .transfer = dsi_host_transfer, 1583 }; 1584 1585 /* 1586 * List of supported physical to logical lane mappings. 1587 * For example, the 2nd entry represents the following mapping: 1588 * 1589 * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3; 1590 */ 1591 static const int supported_data_lane_swaps[][4] = { 1592 { 0, 1, 2, 3 }, 1593 { 3, 0, 1, 2 }, 1594 { 2, 3, 0, 1 }, 1595 { 1, 2, 3, 0 }, 1596 { 0, 3, 2, 1 }, 1597 { 1, 0, 3, 2 }, 1598 { 2, 1, 0, 3 }, 1599 { 3, 2, 1, 0 }, 1600 }; 1601 1602 static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, 1603 struct device_node *ep) 1604 { 1605 struct device *dev = &msm_host->pdev->dev; 1606 struct property *prop; 1607 u32 lane_map[4]; 1608 int ret, i, len, num_lanes; 1609 1610 prop = of_find_property(ep, "data-lanes", &len); 1611 if (!prop) { 1612 dev_dbg(dev, 1613 "failed to find data lane mapping, using default\n"); 1614 return 0; 1615 } 1616 1617 num_lanes = len / sizeof(u32); 1618 1619 if (num_lanes < 1 || num_lanes > 4) { 1620 dev_err(dev, "bad number of data lanes\n"); 1621 return -EINVAL; 1622 } 1623 1624 msm_host->num_data_lanes = num_lanes; 1625 1626 ret = of_property_read_u32_array(ep, "data-lanes", lane_map, 1627 num_lanes); 1628 if (ret) { 1629 dev_err(dev, "failed to read lane data\n"); 1630 return ret; 1631 } 1632 1633 /* 1634 * compare DT specified physical-logical lane mappings with the ones 1635 * supported by hardware 1636 */ 1637 for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) { 1638 const int *swap = supported_data_lane_swaps[i]; 1639 int j; 1640 1641 /* 1642 * the data-lanes array we get from DT has a logical->physical 1643 * mapping. The "data lane swap" register field represents 1644 * supported configurations in a physical->logical mapping. 1645 * Translate the DT mapping to what we understand and find a 1646 * configuration that works. 1647 */ 1648 for (j = 0; j < num_lanes; j++) { 1649 if (lane_map[j] < 0 || lane_map[j] > 3) 1650 dev_err(dev, "bad physical lane entry %u\n", 1651 lane_map[j]); 1652 1653 if (swap[lane_map[j]] != j) 1654 break; 1655 } 1656 1657 if (j == num_lanes) { 1658 msm_host->dlane_swap = i; 1659 return 0; 1660 } 1661 } 1662 1663 return -EINVAL; 1664 } 1665 1666 static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) 1667 { 1668 struct device *dev = &msm_host->pdev->dev; 1669 struct device_node *np = dev->of_node; 1670 struct device_node *endpoint, *device_node; 1671 int ret = 0; 1672 1673 /* 1674 * Get the endpoint of the output port of the DSI host. In our case, 1675 * this is mapped to port number with reg = 1. Don't return an error if 1676 * the remote endpoint isn't defined. It's possible that there is 1677 * nothing connected to the dsi output. 1678 */ 1679 endpoint = of_graph_get_endpoint_by_regs(np, 1, -1); 1680 if (!endpoint) { 1681 dev_dbg(dev, "%s: no endpoint\n", __func__); 1682 return 0; 1683 } 1684 1685 ret = dsi_host_parse_lane_data(msm_host, endpoint); 1686 if (ret) { 1687 dev_err(dev, "%s: invalid lane configuration %d\n", 1688 __func__, ret); 1689 goto err; 1690 } 1691 1692 /* Get panel node from the output port's endpoint data */ 1693 device_node = of_graph_get_remote_node(np, 1, 0); 1694 if (!device_node) { 1695 dev_dbg(dev, "%s: no valid device\n", __func__); 1696 goto err; 1697 } 1698 1699 msm_host->device_node = device_node; 1700 1701 if (of_property_read_bool(np, "syscon-sfpb")) { 1702 msm_host->sfpb = syscon_regmap_lookup_by_phandle(np, 1703 "syscon-sfpb"); 1704 if (IS_ERR(msm_host->sfpb)) { 1705 dev_err(dev, "%s: failed to get sfpb regmap\n", 1706 __func__); 1707 ret = PTR_ERR(msm_host->sfpb); 1708 } 1709 } 1710 1711 of_node_put(device_node); 1712 1713 err: 1714 of_node_put(endpoint); 1715 1716 return ret; 1717 } 1718 1719 static int dsi_host_get_id(struct msm_dsi_host *msm_host) 1720 { 1721 struct platform_device *pdev = msm_host->pdev; 1722 const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg; 1723 struct resource *res; 1724 int i; 1725 1726 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl"); 1727 if (!res) 1728 return -EINVAL; 1729 1730 for (i = 0; i < cfg->num_dsi; i++) { 1731 if (cfg->io_start[i] == res->start) 1732 return i; 1733 } 1734 1735 return -EINVAL; 1736 } 1737 1738 int msm_dsi_host_init(struct msm_dsi *msm_dsi) 1739 { 1740 struct msm_dsi_host *msm_host = NULL; 1741 struct platform_device *pdev = msm_dsi->pdev; 1742 int ret; 1743 1744 msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL); 1745 if (!msm_host) { 1746 pr_err("%s: FAILED: cannot alloc dsi host\n", 1747 __func__); 1748 ret = -ENOMEM; 1749 goto fail; 1750 } 1751 1752 msm_host->pdev = pdev; 1753 msm_dsi->host = &msm_host->base; 1754 1755 ret = dsi_host_parse_dt(msm_host); 1756 if (ret) { 1757 pr_err("%s: failed to parse dt\n", __func__); 1758 goto fail; 1759 } 1760 1761 msm_host->ctrl_base = msm_ioremap(pdev, "dsi_ctrl", "DSI CTRL"); 1762 if (IS_ERR(msm_host->ctrl_base)) { 1763 pr_err("%s: unable to map Dsi ctrl base\n", __func__); 1764 ret = PTR_ERR(msm_host->ctrl_base); 1765 goto fail; 1766 } 1767 1768 pm_runtime_enable(&pdev->dev); 1769 1770 msm_host->cfg_hnd = dsi_get_config(msm_host); 1771 if (!msm_host->cfg_hnd) { 1772 ret = -EINVAL; 1773 pr_err("%s: get config failed\n", __func__); 1774 goto fail; 1775 } 1776 1777 msm_host->id = dsi_host_get_id(msm_host); 1778 if (msm_host->id < 0) { 1779 ret = msm_host->id; 1780 pr_err("%s: unable to identify DSI host index\n", __func__); 1781 goto fail; 1782 } 1783 1784 /* fixup base address by io offset */ 1785 msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset; 1786 1787 ret = dsi_regulator_init(msm_host); 1788 if (ret) { 1789 pr_err("%s: regulator init failed\n", __func__); 1790 goto fail; 1791 } 1792 1793 ret = dsi_clk_init(msm_host); 1794 if (ret) { 1795 pr_err("%s: unable to initialize dsi clks\n", __func__); 1796 goto fail; 1797 } 1798 1799 msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL); 1800 if (!msm_host->rx_buf) { 1801 ret = -ENOMEM; 1802 pr_err("%s: alloc rx temp buf failed\n", __func__); 1803 goto fail; 1804 } 1805 1806 init_completion(&msm_host->dma_comp); 1807 init_completion(&msm_host->video_comp); 1808 mutex_init(&msm_host->dev_mutex); 1809 mutex_init(&msm_host->cmd_mutex); 1810 spin_lock_init(&msm_host->intr_lock); 1811 1812 /* setup workqueue */ 1813 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); 1814 INIT_WORK(&msm_host->err_work, dsi_err_worker); 1815 INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker); 1816 1817 msm_dsi->id = msm_host->id; 1818 1819 DBG("Dsi Host %d initialized", msm_host->id); 1820 return 0; 1821 1822 fail: 1823 return ret; 1824 } 1825 1826 void msm_dsi_host_destroy(struct mipi_dsi_host *host) 1827 { 1828 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1829 1830 DBG(""); 1831 dsi_tx_buf_free(msm_host); 1832 if (msm_host->workqueue) { 1833 flush_workqueue(msm_host->workqueue); 1834 destroy_workqueue(msm_host->workqueue); 1835 msm_host->workqueue = NULL; 1836 } 1837 1838 mutex_destroy(&msm_host->cmd_mutex); 1839 mutex_destroy(&msm_host->dev_mutex); 1840 1841 pm_runtime_disable(&msm_host->pdev->dev); 1842 } 1843 1844 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, 1845 struct drm_device *dev) 1846 { 1847 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1848 struct platform_device *pdev = msm_host->pdev; 1849 int ret; 1850 1851 msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 1852 if (msm_host->irq < 0) { 1853 ret = msm_host->irq; 1854 dev_err(dev->dev, "failed to get irq: %d\n", ret); 1855 return ret; 1856 } 1857 1858 ret = devm_request_irq(&pdev->dev, msm_host->irq, 1859 dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 1860 "dsi_isr", msm_host); 1861 if (ret < 0) { 1862 dev_err(&pdev->dev, "failed to request IRQ%u: %d\n", 1863 msm_host->irq, ret); 1864 return ret; 1865 } 1866 1867 msm_host->dev = dev; 1868 ret = dsi_tx_buf_alloc(msm_host, SZ_4K); 1869 if (ret) { 1870 pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret); 1871 return ret; 1872 } 1873 1874 return 0; 1875 } 1876 1877 int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer) 1878 { 1879 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1880 int ret; 1881 1882 /* Register mipi dsi host */ 1883 if (!msm_host->registered) { 1884 host->dev = &msm_host->pdev->dev; 1885 host->ops = &dsi_host_ops; 1886 ret = mipi_dsi_host_register(host); 1887 if (ret) 1888 return ret; 1889 1890 msm_host->registered = true; 1891 1892 /* If the panel driver has not been probed after host register, 1893 * we should defer the host's probe. 1894 * It makes sure panel is connected when fbcon detects 1895 * connector status and gets the proper display mode to 1896 * create framebuffer. 1897 * Don't try to defer if there is nothing connected to the dsi 1898 * output 1899 */ 1900 if (check_defer && msm_host->device_node) { 1901 if (!of_drm_find_panel(msm_host->device_node)) 1902 if (!of_drm_find_bridge(msm_host->device_node)) 1903 return -EPROBE_DEFER; 1904 } 1905 } 1906 1907 return 0; 1908 } 1909 1910 void msm_dsi_host_unregister(struct mipi_dsi_host *host) 1911 { 1912 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1913 1914 if (msm_host->registered) { 1915 mipi_dsi_host_unregister(host); 1916 host->dev = NULL; 1917 host->ops = NULL; 1918 msm_host->registered = false; 1919 } 1920 } 1921 1922 int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, 1923 const struct mipi_dsi_msg *msg) 1924 { 1925 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1926 1927 /* TODO: make sure dsi_cmd_mdp is idle. 1928 * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME 1929 * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed. 1930 * How to handle the old versions? Wait for mdp cmd done? 1931 */ 1932 1933 /* 1934 * mdss interrupt is generated in mdp core clock domain 1935 * mdp clock need to be enabled to receive dsi interrupt 1936 */ 1937 pm_runtime_get_sync(&msm_host->pdev->dev); 1938 dsi_link_clk_enable(msm_host); 1939 1940 /* TODO: vote for bus bandwidth */ 1941 1942 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM)) 1943 dsi_set_tx_power_mode(0, msm_host); 1944 1945 msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL); 1946 dsi_write(msm_host, REG_DSI_CTRL, 1947 msm_host->dma_cmd_ctrl_restore | 1948 DSI_CTRL_CMD_MODE_EN | 1949 DSI_CTRL_ENABLE); 1950 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1); 1951 1952 return 0; 1953 } 1954 1955 void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host, 1956 const struct mipi_dsi_msg *msg) 1957 { 1958 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1959 1960 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0); 1961 dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore); 1962 1963 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM)) 1964 dsi_set_tx_power_mode(1, msm_host); 1965 1966 /* TODO: unvote for bus bandwidth */ 1967 1968 dsi_link_clk_disable(msm_host); 1969 pm_runtime_put_autosuspend(&msm_host->pdev->dev); 1970 } 1971 1972 int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, 1973 const struct mipi_dsi_msg *msg) 1974 { 1975 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1976 1977 return dsi_cmds2buf_tx(msm_host, msg); 1978 } 1979 1980 int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, 1981 const struct mipi_dsi_msg *msg) 1982 { 1983 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1984 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 1985 int data_byte, rx_byte, dlen, end; 1986 int short_response, diff, pkt_size, ret = 0; 1987 char cmd; 1988 int rlen = msg->rx_len; 1989 u8 *buf; 1990 1991 if (rlen <= 2) { 1992 short_response = 1; 1993 pkt_size = rlen; 1994 rx_byte = 4; 1995 } else { 1996 short_response = 0; 1997 data_byte = 10; /* first read */ 1998 if (rlen < data_byte) 1999 pkt_size = rlen; 2000 else 2001 pkt_size = data_byte; 2002 rx_byte = data_byte + 6; /* 4 header + 2 crc */ 2003 } 2004 2005 buf = msm_host->rx_buf; 2006 end = 0; 2007 while (!end) { 2008 u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8}; 2009 struct mipi_dsi_msg max_pkt_size_msg = { 2010 .channel = msg->channel, 2011 .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, 2012 .tx_len = 2, 2013 .tx_buf = tx, 2014 }; 2015 2016 DBG("rlen=%d pkt_size=%d rx_byte=%d", 2017 rlen, pkt_size, rx_byte); 2018 2019 ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg); 2020 if (ret < 2) { 2021 pr_err("%s: Set max pkt size failed, %d\n", 2022 __func__, ret); 2023 return -EINVAL; 2024 } 2025 2026 if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && 2027 (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) { 2028 /* Clear the RDBK_DATA registers */ 2029 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 2030 DSI_RDBK_DATA_CTRL_CLR); 2031 wmb(); /* make sure the RDBK registers are cleared */ 2032 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0); 2033 wmb(); /* release cleared status before transfer */ 2034 } 2035 2036 ret = dsi_cmds2buf_tx(msm_host, msg); 2037 if (ret < msg->tx_len) { 2038 pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret); 2039 return ret; 2040 } 2041 2042 /* 2043 * once cmd_dma_done interrupt received, 2044 * return data from client is ready and stored 2045 * at RDBK_DATA register already 2046 * since rx fifo is 16 bytes, dcs header is kept at first loop, 2047 * after that dcs header lost during shift into registers 2048 */ 2049 dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size); 2050 2051 if (dlen <= 0) 2052 return 0; 2053 2054 if (short_response) 2055 break; 2056 2057 if (rlen <= data_byte) { 2058 diff = data_byte - rlen; 2059 end = 1; 2060 } else { 2061 diff = 0; 2062 rlen -= data_byte; 2063 } 2064 2065 if (!end) { 2066 dlen -= 2; /* 2 crc */ 2067 dlen -= diff; 2068 buf += dlen; /* next start position */ 2069 data_byte = 14; /* NOT first read */ 2070 if (rlen < data_byte) 2071 pkt_size += rlen; 2072 else 2073 pkt_size += data_byte; 2074 DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff); 2075 } 2076 } 2077 2078 /* 2079 * For single Long read, if the requested rlen < 10, 2080 * we need to shift the start position of rx 2081 * data buffer to skip the bytes which are not 2082 * updated. 2083 */ 2084 if (pkt_size < 10 && !short_response) 2085 buf = msm_host->rx_buf + (10 - rlen); 2086 else 2087 buf = msm_host->rx_buf; 2088 2089 cmd = buf[0]; 2090 switch (cmd) { 2091 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: 2092 pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__); 2093 ret = 0; 2094 break; 2095 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: 2096 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: 2097 ret = dsi_short_read1_resp(buf, msg); 2098 break; 2099 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: 2100 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: 2101 ret = dsi_short_read2_resp(buf, msg); 2102 break; 2103 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE: 2104 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE: 2105 ret = dsi_long_read_resp(buf, msg); 2106 break; 2107 default: 2108 pr_warn("%s:Invalid response cmd\n", __func__); 2109 ret = 0; 2110 } 2111 2112 return ret; 2113 } 2114 2115 void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base, 2116 u32 len) 2117 { 2118 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2119 2120 dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base); 2121 dsi_write(msm_host, REG_DSI_DMA_LEN, len); 2122 dsi_write(msm_host, REG_DSI_TRIG_DMA, 1); 2123 2124 /* Make sure trigger happens */ 2125 wmb(); 2126 } 2127 2128 int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, 2129 struct msm_dsi_pll *src_pll) 2130 { 2131 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2132 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2133 struct clk *byte_clk_provider, *pixel_clk_provider; 2134 int ret; 2135 2136 ret = msm_dsi_pll_get_clk_provider(src_pll, 2137 &byte_clk_provider, &pixel_clk_provider); 2138 if (ret) { 2139 pr_info("%s: can't get provider from pll, don't set parent\n", 2140 __func__); 2141 return 0; 2142 } 2143 2144 ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider); 2145 if (ret) { 2146 pr_err("%s: can't set parent to byte_clk_src. ret=%d\n", 2147 __func__, ret); 2148 goto exit; 2149 } 2150 2151 ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider); 2152 if (ret) { 2153 pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n", 2154 __func__, ret); 2155 goto exit; 2156 } 2157 2158 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) { 2159 ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider); 2160 if (ret) { 2161 pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n", 2162 __func__, ret); 2163 goto exit; 2164 } 2165 2166 ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider); 2167 if (ret) { 2168 pr_err("%s: can't set parent to esc_clk_src. ret=%d\n", 2169 __func__, ret); 2170 goto exit; 2171 } 2172 } 2173 2174 exit: 2175 return ret; 2176 } 2177 2178 void msm_dsi_host_reset_phy(struct mipi_dsi_host *host) 2179 { 2180 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2181 2182 DBG(""); 2183 dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET); 2184 /* Make sure fully reset */ 2185 wmb(); 2186 udelay(1000); 2187 dsi_write(msm_host, REG_DSI_PHY_RESET, 0); 2188 udelay(100); 2189 } 2190 2191 void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, 2192 struct msm_dsi_phy_clk_request *clk_req) 2193 { 2194 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2195 int ret; 2196 2197 ret = dsi_calc_clk_rate(msm_host); 2198 if (ret) { 2199 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); 2200 return; 2201 } 2202 2203 clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; 2204 clk_req->escclk_rate = msm_host->esc_clk_rate; 2205 } 2206 2207 int msm_dsi_host_enable(struct mipi_dsi_host *host) 2208 { 2209 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2210 2211 dsi_op_mode_config(msm_host, 2212 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true); 2213 2214 /* TODO: clock should be turned off for command mode, 2215 * and only turned on before MDP START. 2216 * This part of code should be enabled once mdp driver support it. 2217 */ 2218 /* if (msm_panel->mode == MSM_DSI_CMD_MODE) { 2219 * dsi_link_clk_disable(msm_host); 2220 * pm_runtime_put_autosuspend(&msm_host->pdev->dev); 2221 * } 2222 */ 2223 msm_host->enabled = true; 2224 return 0; 2225 } 2226 2227 int msm_dsi_host_disable(struct mipi_dsi_host *host) 2228 { 2229 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2230 2231 msm_host->enabled = false; 2232 dsi_op_mode_config(msm_host, 2233 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false); 2234 2235 /* Since we have disabled INTF, the video engine won't stop so that 2236 * the cmd engine will be blocked. 2237 * Reset to disable video engine so that we can send off cmd. 2238 */ 2239 dsi_sw_reset(msm_host); 2240 2241 return 0; 2242 } 2243 2244 static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable) 2245 { 2246 enum sfpb_ahb_arb_master_port_en en; 2247 2248 if (!msm_host->sfpb) 2249 return; 2250 2251 en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE; 2252 2253 regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG, 2254 SFPB_GPREG_MASTER_PORT_EN__MASK, 2255 SFPB_GPREG_MASTER_PORT_EN(en)); 2256 } 2257 2258 int msm_dsi_host_power_on(struct mipi_dsi_host *host, 2259 struct msm_dsi_phy_shared_timings *phy_shared_timings) 2260 { 2261 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2262 int ret = 0; 2263 2264 mutex_lock(&msm_host->dev_mutex); 2265 if (msm_host->power_on) { 2266 DBG("dsi host already on"); 2267 goto unlock_ret; 2268 } 2269 2270 msm_dsi_sfpb_config(msm_host, true); 2271 2272 ret = dsi_host_regulator_enable(msm_host); 2273 if (ret) { 2274 pr_err("%s:Failed to enable vregs.ret=%d\n", 2275 __func__, ret); 2276 goto unlock_ret; 2277 } 2278 2279 pm_runtime_get_sync(&msm_host->pdev->dev); 2280 ret = dsi_link_clk_enable(msm_host); 2281 if (ret) { 2282 pr_err("%s: failed to enable link clocks. ret=%d\n", 2283 __func__, ret); 2284 goto fail_disable_reg; 2285 } 2286 2287 ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev); 2288 if (ret) { 2289 pr_err("%s: failed to set pinctrl default state, %d\n", 2290 __func__, ret); 2291 goto fail_disable_clk; 2292 } 2293 2294 dsi_timing_setup(msm_host); 2295 dsi_sw_reset(msm_host); 2296 dsi_ctrl_config(msm_host, true, phy_shared_timings); 2297 2298 if (msm_host->disp_en_gpio) 2299 gpiod_set_value(msm_host->disp_en_gpio, 1); 2300 2301 msm_host->power_on = true; 2302 mutex_unlock(&msm_host->dev_mutex); 2303 2304 return 0; 2305 2306 fail_disable_clk: 2307 dsi_link_clk_disable(msm_host); 2308 pm_runtime_put_autosuspend(&msm_host->pdev->dev); 2309 fail_disable_reg: 2310 dsi_host_regulator_disable(msm_host); 2311 unlock_ret: 2312 mutex_unlock(&msm_host->dev_mutex); 2313 return ret; 2314 } 2315 2316 int msm_dsi_host_power_off(struct mipi_dsi_host *host) 2317 { 2318 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2319 2320 mutex_lock(&msm_host->dev_mutex); 2321 if (!msm_host->power_on) { 2322 DBG("dsi host already off"); 2323 goto unlock_ret; 2324 } 2325 2326 dsi_ctrl_config(msm_host, false, NULL); 2327 2328 if (msm_host->disp_en_gpio) 2329 gpiod_set_value(msm_host->disp_en_gpio, 0); 2330 2331 pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); 2332 2333 dsi_link_clk_disable(msm_host); 2334 pm_runtime_put_autosuspend(&msm_host->pdev->dev); 2335 2336 dsi_host_regulator_disable(msm_host); 2337 2338 msm_dsi_sfpb_config(msm_host, false); 2339 2340 DBG("-"); 2341 2342 msm_host->power_on = false; 2343 2344 unlock_ret: 2345 mutex_unlock(&msm_host->dev_mutex); 2346 return 0; 2347 } 2348 2349 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, 2350 struct drm_display_mode *mode) 2351 { 2352 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2353 2354 if (msm_host->mode) { 2355 drm_mode_destroy(msm_host->dev, msm_host->mode); 2356 msm_host->mode = NULL; 2357 } 2358 2359 msm_host->mode = drm_mode_duplicate(msm_host->dev, mode); 2360 if (!msm_host->mode) { 2361 pr_err("%s: cannot duplicate mode\n", __func__); 2362 return -ENOMEM; 2363 } 2364 2365 return 0; 2366 } 2367 2368 struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host, 2369 unsigned long *panel_flags) 2370 { 2371 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2372 struct drm_panel *panel; 2373 2374 panel = of_drm_find_panel(msm_host->device_node); 2375 if (panel_flags) 2376 *panel_flags = msm_host->mode_flags; 2377 2378 return panel; 2379 } 2380 2381 struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host) 2382 { 2383 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2384 2385 return of_drm_find_bridge(msm_host->device_node); 2386 } 2387