1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/delay.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/err.h> 10 #include <linux/gpio/consumer.h> 11 #include <linux/interrupt.h> 12 #include <linux/mfd/syscon.h> 13 #include <linux/of_device.h> 14 #include <linux/of_graph.h> 15 #include <linux/of_irq.h> 16 #include <linux/pinctrl/consumer.h> 17 #include <linux/pm_opp.h> 18 #include <linux/regmap.h> 19 #include <linux/regulator/consumer.h> 20 #include <linux/spinlock.h> 21 22 #include <video/mipi_display.h> 23 24 #include <drm/drm_of.h> 25 26 #include "dsi.h" 27 #include "dsi.xml.h" 28 #include "sfpb.xml.h" 29 #include "dsi_cfg.h" 30 #include "msm_kms.h" 31 #include "msm_gem.h" 32 #include "phy/dsi_phy.h" 33 34 #define DSI_RESET_TOGGLE_DELAY_MS 20 35 36 static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc); 37 38 static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor) 39 { 40 u32 ver; 41 42 if (!major || !minor) 43 return -EINVAL; 44 45 /* 46 * From DSI6G(v3), addition of a 6G_HW_VERSION register at offset 0 47 * makes all other registers 4-byte shifted down. 48 * 49 * In order to identify between DSI6G(v3) and beyond, and DSIv2 and 50 * older, we read the DSI_VERSION register without any shift(offset 51 * 0x1f0). In the case of DSIv2, this hast to be a non-zero value. In 52 * the case of DSI6G, this has to be zero (the offset points to a 53 * scratch register which we never touch) 54 */ 55 56 ver = msm_readl(base + REG_DSI_VERSION); 57 if (ver) { 58 /* older dsi host, there is no register shift */ 59 ver = FIELD(ver, DSI_VERSION_MAJOR); 60 if (ver <= MSM_DSI_VER_MAJOR_V2) { 61 /* old versions */ 62 *major = ver; 63 *minor = 0; 64 return 0; 65 } else { 66 return -EINVAL; 67 } 68 } else { 69 /* 70 * newer host, offset 0 has 6G_HW_VERSION, the rest of the 71 * registers are shifted down, read DSI_VERSION again with 72 * the shifted offset 73 */ 74 ver = msm_readl(base + DSI_6G_REG_SHIFT + REG_DSI_VERSION); 75 ver = FIELD(ver, DSI_VERSION_MAJOR); 76 if (ver == MSM_DSI_VER_MAJOR_6G) { 77 /* 6G version */ 78 *major = ver; 79 *minor = msm_readl(base + REG_DSI_6G_HW_VERSION); 80 return 0; 81 } else { 82 return -EINVAL; 83 } 84 } 85 } 86 87 #define DSI_ERR_STATE_ACK 0x0000 88 #define DSI_ERR_STATE_TIMEOUT 0x0001 89 #define DSI_ERR_STATE_DLN0_PHY 0x0002 90 #define DSI_ERR_STATE_FIFO 0x0004 91 #define DSI_ERR_STATE_MDP_FIFO_UNDERFLOW 0x0008 92 #define DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION 0x0010 93 #define DSI_ERR_STATE_PLL_UNLOCKED 0x0020 94 95 #define DSI_CLK_CTRL_ENABLE_CLKS \ 96 (DSI_CLK_CTRL_AHBS_HCLK_ON | DSI_CLK_CTRL_AHBM_SCLK_ON | \ 97 DSI_CLK_CTRL_PCLK_ON | DSI_CLK_CTRL_DSICLK_ON | \ 98 DSI_CLK_CTRL_BYTECLK_ON | DSI_CLK_CTRL_ESCCLK_ON | \ 99 DSI_CLK_CTRL_FORCE_ON_DYN_AHBM_HCLK) 100 101 struct msm_dsi_host { 102 struct mipi_dsi_host base; 103 104 struct platform_device *pdev; 105 struct drm_device *dev; 106 107 int id; 108 109 void __iomem *ctrl_base; 110 phys_addr_t ctrl_size; 111 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX]; 112 113 int num_bus_clks; 114 struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX]; 115 116 struct clk *byte_clk; 117 struct clk *esc_clk; 118 struct clk *pixel_clk; 119 struct clk *byte_clk_src; 120 struct clk *pixel_clk_src; 121 struct clk *byte_intf_clk; 122 123 unsigned long byte_clk_rate; 124 unsigned long pixel_clk_rate; 125 unsigned long esc_clk_rate; 126 127 /* DSI v2 specific clocks */ 128 struct clk *src_clk; 129 struct clk *esc_clk_src; 130 struct clk *dsi_clk_src; 131 132 unsigned long src_clk_rate; 133 134 struct gpio_desc *disp_en_gpio; 135 struct gpio_desc *te_gpio; 136 137 const struct msm_dsi_cfg_handler *cfg_hnd; 138 139 struct completion dma_comp; 140 struct completion video_comp; 141 struct mutex dev_mutex; 142 struct mutex cmd_mutex; 143 spinlock_t intr_lock; /* Protect interrupt ctrl register */ 144 145 u32 err_work_state; 146 struct work_struct err_work; 147 struct work_struct hpd_work; 148 struct workqueue_struct *workqueue; 149 150 /* DSI 6G TX buffer*/ 151 struct drm_gem_object *tx_gem_obj; 152 153 /* DSI v2 TX buffer */ 154 void *tx_buf; 155 dma_addr_t tx_buf_paddr; 156 157 int tx_size; 158 159 u8 *rx_buf; 160 161 struct regmap *sfpb; 162 163 struct drm_display_mode *mode; 164 struct msm_display_dsc_config *dsc; 165 166 /* connected device info */ 167 struct device_node *device_node; 168 unsigned int channel; 169 unsigned int lanes; 170 enum mipi_dsi_pixel_format format; 171 unsigned long mode_flags; 172 173 /* lane data parsed via DT */ 174 int dlane_swap; 175 int num_data_lanes; 176 177 /* from phy DT */ 178 bool cphy_mode; 179 180 u32 dma_cmd_ctrl_restore; 181 182 bool registered; 183 bool power_on; 184 bool enabled; 185 int irq; 186 }; 187 188 static u32 dsi_get_bpp(const enum mipi_dsi_pixel_format fmt) 189 { 190 switch (fmt) { 191 case MIPI_DSI_FMT_RGB565: return 16; 192 case MIPI_DSI_FMT_RGB666_PACKED: return 18; 193 case MIPI_DSI_FMT_RGB666: 194 case MIPI_DSI_FMT_RGB888: 195 default: return 24; 196 } 197 } 198 199 static inline u32 dsi_read(struct msm_dsi_host *msm_host, u32 reg) 200 { 201 return msm_readl(msm_host->ctrl_base + reg); 202 } 203 static inline void dsi_write(struct msm_dsi_host *msm_host, u32 reg, u32 data) 204 { 205 msm_writel(data, msm_host->ctrl_base + reg); 206 } 207 208 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host); 209 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host); 210 211 static const struct msm_dsi_cfg_handler *dsi_get_config( 212 struct msm_dsi_host *msm_host) 213 { 214 const struct msm_dsi_cfg_handler *cfg_hnd = NULL; 215 struct device *dev = &msm_host->pdev->dev; 216 struct clk *ahb_clk; 217 int ret; 218 u32 major = 0, minor = 0; 219 220 cfg_hnd = device_get_match_data(dev); 221 if (cfg_hnd) 222 return cfg_hnd; 223 224 ahb_clk = msm_clk_get(msm_host->pdev, "iface"); 225 if (IS_ERR(ahb_clk)) { 226 pr_err("%s: cannot get interface clock\n", __func__); 227 goto exit; 228 } 229 230 pm_runtime_get_sync(dev); 231 232 ret = clk_prepare_enable(ahb_clk); 233 if (ret) { 234 pr_err("%s: unable to enable ahb_clk\n", __func__); 235 goto runtime_put; 236 } 237 238 ret = dsi_get_version(msm_host->ctrl_base, &major, &minor); 239 if (ret) { 240 pr_err("%s: Invalid version\n", __func__); 241 goto disable_clks; 242 } 243 244 cfg_hnd = msm_dsi_cfg_get(major, minor); 245 246 DBG("%s: Version %x:%x\n", __func__, major, minor); 247 248 disable_clks: 249 clk_disable_unprepare(ahb_clk); 250 runtime_put: 251 pm_runtime_put_sync(dev); 252 exit: 253 return cfg_hnd; 254 } 255 256 static inline struct msm_dsi_host *to_msm_dsi_host(struct mipi_dsi_host *host) 257 { 258 return container_of(host, struct msm_dsi_host, base); 259 } 260 261 static void dsi_host_regulator_disable(struct msm_dsi_host *msm_host) 262 { 263 struct regulator_bulk_data *s = msm_host->supplies; 264 const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; 265 int num = msm_host->cfg_hnd->cfg->reg_cfg.num; 266 int i; 267 268 DBG(""); 269 for (i = num - 1; i >= 0; i--) 270 if (regs[i].disable_load >= 0) 271 regulator_set_load(s[i].consumer, 272 regs[i].disable_load); 273 274 regulator_bulk_disable(num, s); 275 } 276 277 static int dsi_host_regulator_enable(struct msm_dsi_host *msm_host) 278 { 279 struct regulator_bulk_data *s = msm_host->supplies; 280 const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; 281 int num = msm_host->cfg_hnd->cfg->reg_cfg.num; 282 int ret, i; 283 284 DBG(""); 285 for (i = 0; i < num; i++) { 286 if (regs[i].enable_load >= 0) { 287 ret = regulator_set_load(s[i].consumer, 288 regs[i].enable_load); 289 if (ret < 0) { 290 pr_err("regulator %d set op mode failed, %d\n", 291 i, ret); 292 goto fail; 293 } 294 } 295 } 296 297 ret = regulator_bulk_enable(num, s); 298 if (ret < 0) { 299 pr_err("regulator enable failed, %d\n", ret); 300 goto fail; 301 } 302 303 return 0; 304 305 fail: 306 for (i--; i >= 0; i--) 307 regulator_set_load(s[i].consumer, regs[i].disable_load); 308 return ret; 309 } 310 311 static int dsi_regulator_init(struct msm_dsi_host *msm_host) 312 { 313 struct regulator_bulk_data *s = msm_host->supplies; 314 const struct dsi_reg_entry *regs = msm_host->cfg_hnd->cfg->reg_cfg.regs; 315 int num = msm_host->cfg_hnd->cfg->reg_cfg.num; 316 int i, ret; 317 318 for (i = 0; i < num; i++) 319 s[i].supply = regs[i].name; 320 321 ret = devm_regulator_bulk_get(&msm_host->pdev->dev, num, s); 322 if (ret < 0) { 323 pr_err("%s: failed to init regulator, ret=%d\n", 324 __func__, ret); 325 return ret; 326 } 327 328 return 0; 329 } 330 331 int dsi_clk_init_v2(struct msm_dsi_host *msm_host) 332 { 333 struct platform_device *pdev = msm_host->pdev; 334 int ret = 0; 335 336 msm_host->src_clk = msm_clk_get(pdev, "src"); 337 338 if (IS_ERR(msm_host->src_clk)) { 339 ret = PTR_ERR(msm_host->src_clk); 340 pr_err("%s: can't find src clock. ret=%d\n", 341 __func__, ret); 342 msm_host->src_clk = NULL; 343 return ret; 344 } 345 346 msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk); 347 if (!msm_host->esc_clk_src) { 348 ret = -ENODEV; 349 pr_err("%s: can't get esc clock parent. ret=%d\n", 350 __func__, ret); 351 return ret; 352 } 353 354 msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk); 355 if (!msm_host->dsi_clk_src) { 356 ret = -ENODEV; 357 pr_err("%s: can't get src clock parent. ret=%d\n", 358 __func__, ret); 359 } 360 361 return ret; 362 } 363 364 int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host) 365 { 366 struct platform_device *pdev = msm_host->pdev; 367 int ret = 0; 368 369 msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf"); 370 if (IS_ERR(msm_host->byte_intf_clk)) { 371 ret = PTR_ERR(msm_host->byte_intf_clk); 372 pr_err("%s: can't find byte_intf clock. ret=%d\n", 373 __func__, ret); 374 } 375 376 return ret; 377 } 378 379 static int dsi_clk_init(struct msm_dsi_host *msm_host) 380 { 381 struct platform_device *pdev = msm_host->pdev; 382 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 383 const struct msm_dsi_config *cfg = cfg_hnd->cfg; 384 int i, ret = 0; 385 386 /* get bus clocks */ 387 for (i = 0; i < cfg->num_bus_clks; i++) 388 msm_host->bus_clks[i].id = cfg->bus_clk_names[i]; 389 msm_host->num_bus_clks = cfg->num_bus_clks; 390 391 ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks); 392 if (ret < 0) { 393 dev_err(&pdev->dev, "Unable to get clocks, ret = %d\n", ret); 394 goto exit; 395 } 396 397 /* get link and source clocks */ 398 msm_host->byte_clk = msm_clk_get(pdev, "byte"); 399 if (IS_ERR(msm_host->byte_clk)) { 400 ret = PTR_ERR(msm_host->byte_clk); 401 pr_err("%s: can't find dsi_byte clock. ret=%d\n", 402 __func__, ret); 403 msm_host->byte_clk = NULL; 404 goto exit; 405 } 406 407 msm_host->pixel_clk = msm_clk_get(pdev, "pixel"); 408 if (IS_ERR(msm_host->pixel_clk)) { 409 ret = PTR_ERR(msm_host->pixel_clk); 410 pr_err("%s: can't find dsi_pixel clock. ret=%d\n", 411 __func__, ret); 412 msm_host->pixel_clk = NULL; 413 goto exit; 414 } 415 416 msm_host->esc_clk = msm_clk_get(pdev, "core"); 417 if (IS_ERR(msm_host->esc_clk)) { 418 ret = PTR_ERR(msm_host->esc_clk); 419 pr_err("%s: can't find dsi_esc clock. ret=%d\n", 420 __func__, ret); 421 msm_host->esc_clk = NULL; 422 goto exit; 423 } 424 425 msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk); 426 if (IS_ERR(msm_host->byte_clk_src)) { 427 ret = PTR_ERR(msm_host->byte_clk_src); 428 pr_err("%s: can't find byte_clk clock. ret=%d\n", __func__, ret); 429 goto exit; 430 } 431 432 msm_host->pixel_clk_src = clk_get_parent(msm_host->pixel_clk); 433 if (IS_ERR(msm_host->pixel_clk_src)) { 434 ret = PTR_ERR(msm_host->pixel_clk_src); 435 pr_err("%s: can't find pixel_clk clock. ret=%d\n", __func__, ret); 436 goto exit; 437 } 438 439 if (cfg_hnd->ops->clk_init_ver) 440 ret = cfg_hnd->ops->clk_init_ver(msm_host); 441 exit: 442 return ret; 443 } 444 445 int msm_dsi_runtime_suspend(struct device *dev) 446 { 447 struct platform_device *pdev = to_platform_device(dev); 448 struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); 449 struct mipi_dsi_host *host = msm_dsi->host; 450 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 451 452 if (!msm_host->cfg_hnd) 453 return 0; 454 455 clk_bulk_disable_unprepare(msm_host->num_bus_clks, msm_host->bus_clks); 456 457 return 0; 458 } 459 460 int msm_dsi_runtime_resume(struct device *dev) 461 { 462 struct platform_device *pdev = to_platform_device(dev); 463 struct msm_dsi *msm_dsi = platform_get_drvdata(pdev); 464 struct mipi_dsi_host *host = msm_dsi->host; 465 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 466 467 if (!msm_host->cfg_hnd) 468 return 0; 469 470 return clk_bulk_prepare_enable(msm_host->num_bus_clks, msm_host->bus_clks); 471 } 472 473 int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host) 474 { 475 unsigned long byte_intf_rate; 476 int ret; 477 478 DBG("Set clk rates: pclk=%d, byteclk=%lu", 479 msm_host->mode->clock, msm_host->byte_clk_rate); 480 481 ret = dev_pm_opp_set_rate(&msm_host->pdev->dev, 482 msm_host->byte_clk_rate); 483 if (ret) { 484 pr_err("%s: dev_pm_opp_set_rate failed %d\n", __func__, ret); 485 return ret; 486 } 487 488 ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate); 489 if (ret) { 490 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); 491 return ret; 492 } 493 494 if (msm_host->byte_intf_clk) { 495 /* For CPHY, byte_intf_clk is same as byte_clk */ 496 if (msm_host->cphy_mode) 497 byte_intf_rate = msm_host->byte_clk_rate; 498 else 499 byte_intf_rate = msm_host->byte_clk_rate / 2; 500 501 ret = clk_set_rate(msm_host->byte_intf_clk, byte_intf_rate); 502 if (ret) { 503 pr_err("%s: Failed to set rate byte intf clk, %d\n", 504 __func__, ret); 505 return ret; 506 } 507 } 508 509 return 0; 510 } 511 512 513 int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host) 514 { 515 int ret; 516 517 ret = clk_prepare_enable(msm_host->esc_clk); 518 if (ret) { 519 pr_err("%s: Failed to enable dsi esc clk\n", __func__); 520 goto error; 521 } 522 523 ret = clk_prepare_enable(msm_host->byte_clk); 524 if (ret) { 525 pr_err("%s: Failed to enable dsi byte clk\n", __func__); 526 goto byte_clk_err; 527 } 528 529 ret = clk_prepare_enable(msm_host->pixel_clk); 530 if (ret) { 531 pr_err("%s: Failed to enable dsi pixel clk\n", __func__); 532 goto pixel_clk_err; 533 } 534 535 ret = clk_prepare_enable(msm_host->byte_intf_clk); 536 if (ret) { 537 pr_err("%s: Failed to enable byte intf clk\n", 538 __func__); 539 goto byte_intf_clk_err; 540 } 541 542 return 0; 543 544 byte_intf_clk_err: 545 clk_disable_unprepare(msm_host->pixel_clk); 546 pixel_clk_err: 547 clk_disable_unprepare(msm_host->byte_clk); 548 byte_clk_err: 549 clk_disable_unprepare(msm_host->esc_clk); 550 error: 551 return ret; 552 } 553 554 int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host) 555 { 556 int ret; 557 558 DBG("Set clk rates: pclk=%d, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu", 559 msm_host->mode->clock, msm_host->byte_clk_rate, 560 msm_host->esc_clk_rate, msm_host->src_clk_rate); 561 562 ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate); 563 if (ret) { 564 pr_err("%s: Failed to set rate byte clk, %d\n", __func__, ret); 565 return ret; 566 } 567 568 ret = clk_set_rate(msm_host->esc_clk, msm_host->esc_clk_rate); 569 if (ret) { 570 pr_err("%s: Failed to set rate esc clk, %d\n", __func__, ret); 571 return ret; 572 } 573 574 ret = clk_set_rate(msm_host->src_clk, msm_host->src_clk_rate); 575 if (ret) { 576 pr_err("%s: Failed to set rate src clk, %d\n", __func__, ret); 577 return ret; 578 } 579 580 ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate); 581 if (ret) { 582 pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret); 583 return ret; 584 } 585 586 return 0; 587 } 588 589 int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host) 590 { 591 int ret; 592 593 ret = clk_prepare_enable(msm_host->byte_clk); 594 if (ret) { 595 pr_err("%s: Failed to enable dsi byte clk\n", __func__); 596 goto error; 597 } 598 599 ret = clk_prepare_enable(msm_host->esc_clk); 600 if (ret) { 601 pr_err("%s: Failed to enable dsi esc clk\n", __func__); 602 goto esc_clk_err; 603 } 604 605 ret = clk_prepare_enable(msm_host->src_clk); 606 if (ret) { 607 pr_err("%s: Failed to enable dsi src clk\n", __func__); 608 goto src_clk_err; 609 } 610 611 ret = clk_prepare_enable(msm_host->pixel_clk); 612 if (ret) { 613 pr_err("%s: Failed to enable dsi pixel clk\n", __func__); 614 goto pixel_clk_err; 615 } 616 617 return 0; 618 619 pixel_clk_err: 620 clk_disable_unprepare(msm_host->src_clk); 621 src_clk_err: 622 clk_disable_unprepare(msm_host->esc_clk); 623 esc_clk_err: 624 clk_disable_unprepare(msm_host->byte_clk); 625 error: 626 return ret; 627 } 628 629 void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host) 630 { 631 /* Drop the performance state vote */ 632 dev_pm_opp_set_rate(&msm_host->pdev->dev, 0); 633 clk_disable_unprepare(msm_host->esc_clk); 634 clk_disable_unprepare(msm_host->pixel_clk); 635 clk_disable_unprepare(msm_host->byte_intf_clk); 636 clk_disable_unprepare(msm_host->byte_clk); 637 } 638 639 void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host) 640 { 641 clk_disable_unprepare(msm_host->pixel_clk); 642 clk_disable_unprepare(msm_host->src_clk); 643 clk_disable_unprepare(msm_host->esc_clk); 644 clk_disable_unprepare(msm_host->byte_clk); 645 } 646 647 static unsigned long dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 648 { 649 struct drm_display_mode *mode = msm_host->mode; 650 unsigned long pclk_rate; 651 652 pclk_rate = mode->clock * 1000; 653 654 /* 655 * For bonded DSI mode, the current DRM mode has the complete width of the 656 * panel. Since, the complete panel is driven by two DSI controllers, 657 * the clock rates have to be split between the two dsi controllers. 658 * Adjust the byte and pixel clock rates for each dsi host accordingly. 659 */ 660 if (is_bonded_dsi) 661 pclk_rate /= 2; 662 663 return pclk_rate; 664 } 665 666 static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 667 { 668 u8 lanes = msm_host->lanes; 669 u32 bpp = dsi_get_bpp(msm_host->format); 670 unsigned long pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi); 671 u64 pclk_bpp = (u64)pclk_rate * bpp; 672 673 if (lanes == 0) { 674 pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__); 675 lanes = 1; 676 } 677 678 /* CPHY "byte_clk" is in units of 16 bits */ 679 if (msm_host->cphy_mode) 680 do_div(pclk_bpp, (16 * lanes)); 681 else 682 do_div(pclk_bpp, (8 * lanes)); 683 684 msm_host->pixel_clk_rate = pclk_rate; 685 msm_host->byte_clk_rate = pclk_bpp; 686 687 DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate, 688 msm_host->byte_clk_rate); 689 690 } 691 692 int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 693 { 694 if (!msm_host->mode) { 695 pr_err("%s: mode not set\n", __func__); 696 return -EINVAL; 697 } 698 699 dsi_calc_pclk(msm_host, is_bonded_dsi); 700 msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk); 701 return 0; 702 } 703 704 int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 705 { 706 u32 bpp = dsi_get_bpp(msm_host->format); 707 u64 pclk_bpp; 708 unsigned int esc_mhz, esc_div; 709 unsigned long byte_mhz; 710 711 dsi_calc_pclk(msm_host, is_bonded_dsi); 712 713 pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_bonded_dsi) * bpp; 714 do_div(pclk_bpp, 8); 715 msm_host->src_clk_rate = pclk_bpp; 716 717 /* 718 * esc clock is byte clock followed by a 4 bit divider, 719 * we need to find an escape clock frequency within the 720 * mipi DSI spec range within the maximum divider limit 721 * We iterate here between an escape clock frequencey 722 * between 20 Mhz to 5 Mhz and pick up the first one 723 * that can be supported by our divider 724 */ 725 726 byte_mhz = msm_host->byte_clk_rate / 1000000; 727 728 for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) { 729 esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz); 730 731 /* 732 * TODO: Ideally, we shouldn't know what sort of divider 733 * is available in mmss_cc, we're just assuming that 734 * it'll always be a 4 bit divider. Need to come up with 735 * a better way here. 736 */ 737 if (esc_div >= 1 && esc_div <= 16) 738 break; 739 } 740 741 if (esc_mhz < 5) 742 return -EINVAL; 743 744 msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div; 745 746 DBG("esc=%lu, src=%lu", msm_host->esc_clk_rate, 747 msm_host->src_clk_rate); 748 749 return 0; 750 } 751 752 static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable) 753 { 754 u32 intr; 755 unsigned long flags; 756 757 spin_lock_irqsave(&msm_host->intr_lock, flags); 758 intr = dsi_read(msm_host, REG_DSI_INTR_CTRL); 759 760 if (enable) 761 intr |= mask; 762 else 763 intr &= ~mask; 764 765 DBG("intr=%x enable=%d", intr, enable); 766 767 dsi_write(msm_host, REG_DSI_INTR_CTRL, intr); 768 spin_unlock_irqrestore(&msm_host->intr_lock, flags); 769 } 770 771 static inline enum dsi_traffic_mode dsi_get_traffic_mode(const u32 mode_flags) 772 { 773 if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST) 774 return BURST_MODE; 775 else if (mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) 776 return NON_BURST_SYNCH_PULSE; 777 778 return NON_BURST_SYNCH_EVENT; 779 } 780 781 static inline enum dsi_vid_dst_format dsi_get_vid_fmt( 782 const enum mipi_dsi_pixel_format mipi_fmt) 783 { 784 switch (mipi_fmt) { 785 case MIPI_DSI_FMT_RGB888: return VID_DST_FORMAT_RGB888; 786 case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666_LOOSE; 787 case MIPI_DSI_FMT_RGB666_PACKED: return VID_DST_FORMAT_RGB666; 788 case MIPI_DSI_FMT_RGB565: return VID_DST_FORMAT_RGB565; 789 default: return VID_DST_FORMAT_RGB888; 790 } 791 } 792 793 static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( 794 const enum mipi_dsi_pixel_format mipi_fmt) 795 { 796 switch (mipi_fmt) { 797 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; 798 case MIPI_DSI_FMT_RGB666_PACKED: 799 case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666; 800 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; 801 default: return CMD_DST_FORMAT_RGB888; 802 } 803 } 804 805 static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, 806 struct msm_dsi_phy_shared_timings *phy_shared_timings, struct msm_dsi_phy *phy) 807 { 808 u32 flags = msm_host->mode_flags; 809 enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; 810 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 811 u32 data = 0, lane_ctrl = 0; 812 813 if (!enable) { 814 dsi_write(msm_host, REG_DSI_CTRL, 0); 815 return; 816 } 817 818 if (flags & MIPI_DSI_MODE_VIDEO) { 819 if (flags & MIPI_DSI_MODE_VIDEO_HSE) 820 data |= DSI_VID_CFG0_PULSE_MODE_HSA_HE; 821 if (flags & MIPI_DSI_MODE_VIDEO_NO_HFP) 822 data |= DSI_VID_CFG0_HFP_POWER_STOP; 823 if (flags & MIPI_DSI_MODE_VIDEO_NO_HBP) 824 data |= DSI_VID_CFG0_HBP_POWER_STOP; 825 if (flags & MIPI_DSI_MODE_VIDEO_NO_HSA) 826 data |= DSI_VID_CFG0_HSA_POWER_STOP; 827 /* Always set low power stop mode for BLLP 828 * to let command engine send packets 829 */ 830 data |= DSI_VID_CFG0_EOF_BLLP_POWER_STOP | 831 DSI_VID_CFG0_BLLP_POWER_STOP; 832 data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags)); 833 data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt)); 834 data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel); 835 dsi_write(msm_host, REG_DSI_VID_CFG0, data); 836 837 /* Do not swap RGB colors */ 838 data = DSI_VID_CFG1_RGB_SWAP(SWAP_RGB); 839 dsi_write(msm_host, REG_DSI_VID_CFG1, 0); 840 } else { 841 /* Do not swap RGB colors */ 842 data = DSI_CMD_CFG0_RGB_SWAP(SWAP_RGB); 843 data |= DSI_CMD_CFG0_DST_FORMAT(dsi_get_cmd_fmt(mipi_fmt)); 844 dsi_write(msm_host, REG_DSI_CMD_CFG0, data); 845 846 data = DSI_CMD_CFG1_WR_MEM_START(MIPI_DCS_WRITE_MEMORY_START) | 847 DSI_CMD_CFG1_WR_MEM_CONTINUE( 848 MIPI_DCS_WRITE_MEMORY_CONTINUE); 849 /* Always insert DCS command */ 850 data |= DSI_CMD_CFG1_INSERT_DCS_COMMAND; 851 dsi_write(msm_host, REG_DSI_CMD_CFG1, data); 852 } 853 854 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, 855 DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER | 856 DSI_CMD_DMA_CTRL_LOW_POWER); 857 858 data = 0; 859 /* Always assume dedicated TE pin */ 860 data |= DSI_TRIG_CTRL_TE; 861 data |= DSI_TRIG_CTRL_MDP_TRIGGER(TRIGGER_NONE); 862 data |= DSI_TRIG_CTRL_DMA_TRIGGER(TRIGGER_SW); 863 data |= DSI_TRIG_CTRL_STREAM(msm_host->channel); 864 if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && 865 (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_2)) 866 data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; 867 dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); 868 869 data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) | 870 DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre); 871 dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data); 872 873 if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && 874 (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) && 875 phy_shared_timings->clk_pre_inc_by_2) 876 dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND, 877 DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK); 878 879 data = 0; 880 if (!(flags & MIPI_DSI_MODE_NO_EOT_PACKET)) 881 data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND; 882 dsi_write(msm_host, REG_DSI_EOT_PACKET_CTRL, data); 883 884 /* allow only ack-err-status to generate interrupt */ 885 dsi_write(msm_host, REG_DSI_ERR_INT_MASK0, 0x13ff3fe0); 886 887 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1); 888 889 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); 890 891 data = DSI_CTRL_CLK_EN; 892 893 DBG("lane number=%d", msm_host->lanes); 894 data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0); 895 896 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, 897 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap)); 898 899 if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) { 900 lane_ctrl = dsi_read(msm_host, REG_DSI_LANE_CTRL); 901 902 if (msm_dsi_phy_set_continuous_clock(phy, enable)) 903 lane_ctrl &= ~DSI_LANE_CTRL_HS_REQ_SEL_PHY; 904 905 dsi_write(msm_host, REG_DSI_LANE_CTRL, 906 lane_ctrl | DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST); 907 } 908 909 data |= DSI_CTRL_ENABLE; 910 911 dsi_write(msm_host, REG_DSI_CTRL, data); 912 913 if (msm_host->cphy_mode) 914 dsi_write(msm_host, REG_DSI_CPHY_MODE_CTRL, BIT(0)); 915 } 916 917 static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mode, u32 hdisplay) 918 { 919 struct msm_display_dsc_config *dsc = msm_host->dsc; 920 u32 reg, intf_width, reg_ctrl, reg_ctrl2; 921 u32 slice_per_intf, total_bytes_per_intf; 922 u32 pkt_per_line; 923 u32 bytes_in_slice; 924 u32 eol_byte_num; 925 926 /* first calculate dsc parameters and then program 927 * compress mode registers 928 */ 929 intf_width = hdisplay; 930 slice_per_intf = DIV_ROUND_UP(intf_width, dsc->drm->slice_width); 931 932 /* If slice_per_pkt is greater than slice_per_intf 933 * then default to 1. This can happen during partial 934 * update. 935 */ 936 if (slice_per_intf > dsc->drm->slice_count) 937 dsc->drm->slice_count = 1; 938 939 slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->drm->slice_width); 940 bytes_in_slice = DIV_ROUND_UP(dsc->drm->slice_width * dsc->drm->bits_per_pixel, 8); 941 942 dsc->drm->slice_chunk_size = bytes_in_slice; 943 944 total_bytes_per_intf = bytes_in_slice * slice_per_intf; 945 946 eol_byte_num = total_bytes_per_intf % 3; 947 pkt_per_line = slice_per_intf / dsc->drm->slice_count; 948 949 if (is_cmd_mode) /* packet data type */ 950 reg = DSI_COMMAND_COMPRESSION_MODE_CTRL_STREAM0_DATATYPE(MIPI_DSI_DCS_LONG_WRITE); 951 else 952 reg = DSI_VIDEO_COMPRESSION_MODE_CTRL_DATATYPE(MIPI_DSI_COMPRESSED_PIXEL_STREAM); 953 954 /* DSI_VIDEO_COMPRESSION_MODE & DSI_COMMAND_COMPRESSION_MODE 955 * registers have similar offsets, so for below common code use 956 * DSI_VIDEO_COMPRESSION_MODE_XXXX for setting bits 957 */ 958 reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE(pkt_per_line >> 1); 959 reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM(eol_byte_num); 960 reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EN; 961 962 if (is_cmd_mode) { 963 reg_ctrl = dsi_read(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL); 964 reg_ctrl2 = dsi_read(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2); 965 966 reg_ctrl &= ~0xffff; 967 reg_ctrl |= reg; 968 969 reg_ctrl2 &= ~DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH__MASK; 970 reg_ctrl2 |= DSI_COMMAND_COMPRESSION_MODE_CTRL2_STREAM0_SLICE_WIDTH(bytes_in_slice); 971 972 dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl); 973 dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2); 974 } else { 975 dsi_write(msm_host, REG_DSI_VIDEO_COMPRESSION_MODE_CTRL, reg); 976 } 977 } 978 979 static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi) 980 { 981 struct drm_display_mode *mode = msm_host->mode; 982 u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */ 983 u32 h_total = mode->htotal; 984 u32 v_total = mode->vtotal; 985 u32 hs_end = mode->hsync_end - mode->hsync_start; 986 u32 vs_end = mode->vsync_end - mode->vsync_start; 987 u32 ha_start = h_total - mode->hsync_start; 988 u32 ha_end = ha_start + mode->hdisplay; 989 u32 va_start = v_total - mode->vsync_start; 990 u32 va_end = va_start + mode->vdisplay; 991 u32 hdisplay = mode->hdisplay; 992 u32 wc; 993 994 DBG(""); 995 996 /* 997 * For bonded DSI mode, the current DRM mode has 998 * the complete width of the panel. Since, the complete 999 * panel is driven by two DSI controllers, the horizontal 1000 * timings have to be split between the two dsi controllers. 1001 * Adjust the DSI host timing values accordingly. 1002 */ 1003 if (is_bonded_dsi) { 1004 h_total /= 2; 1005 hs_end /= 2; 1006 ha_start /= 2; 1007 ha_end /= 2; 1008 hdisplay /= 2; 1009 } 1010 1011 if (msm_host->dsc) { 1012 struct msm_display_dsc_config *dsc = msm_host->dsc; 1013 1014 /* update dsc params with timing params */ 1015 if (!dsc || !mode->hdisplay || !mode->vdisplay) { 1016 pr_err("DSI: invalid input: pic_width: %d pic_height: %d\n", 1017 mode->hdisplay, mode->vdisplay); 1018 return; 1019 } 1020 1021 dsc->drm->pic_width = mode->hdisplay; 1022 dsc->drm->pic_height = mode->vdisplay; 1023 DBG("Mode %dx%d\n", dsc->drm->pic_width, dsc->drm->pic_height); 1024 1025 /* we do the calculations for dsc parameters here so that 1026 * panel can use these parameters 1027 */ 1028 dsi_populate_dsc_params(dsc); 1029 1030 /* Divide the display by 3 but keep back/font porch and 1031 * pulse width same 1032 */ 1033 h_total -= hdisplay; 1034 hdisplay /= 3; 1035 h_total += hdisplay; 1036 ha_end = ha_start + hdisplay; 1037 } 1038 1039 if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) { 1040 if (msm_host->dsc) 1041 dsi_update_dsc_timing(msm_host, false, mode->hdisplay); 1042 1043 dsi_write(msm_host, REG_DSI_ACTIVE_H, 1044 DSI_ACTIVE_H_START(ha_start) | 1045 DSI_ACTIVE_H_END(ha_end)); 1046 dsi_write(msm_host, REG_DSI_ACTIVE_V, 1047 DSI_ACTIVE_V_START(va_start) | 1048 DSI_ACTIVE_V_END(va_end)); 1049 dsi_write(msm_host, REG_DSI_TOTAL, 1050 DSI_TOTAL_H_TOTAL(h_total - 1) | 1051 DSI_TOTAL_V_TOTAL(v_total - 1)); 1052 1053 dsi_write(msm_host, REG_DSI_ACTIVE_HSYNC, 1054 DSI_ACTIVE_HSYNC_START(hs_start) | 1055 DSI_ACTIVE_HSYNC_END(hs_end)); 1056 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_HPOS, 0); 1057 dsi_write(msm_host, REG_DSI_ACTIVE_VSYNC_VPOS, 1058 DSI_ACTIVE_VSYNC_VPOS_START(vs_start) | 1059 DSI_ACTIVE_VSYNC_VPOS_END(vs_end)); 1060 } else { /* command mode */ 1061 if (msm_host->dsc) 1062 dsi_update_dsc_timing(msm_host, true, mode->hdisplay); 1063 1064 /* image data and 1 byte write_memory_start cmd */ 1065 if (!msm_host->dsc) 1066 wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1; 1067 else 1068 wc = mode->hdisplay / 2 + 1; 1069 1070 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL, 1071 DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) | 1072 DSI_CMD_MDP_STREAM0_CTRL_VIRTUAL_CHANNEL( 1073 msm_host->channel) | 1074 DSI_CMD_MDP_STREAM0_CTRL_DATA_TYPE( 1075 MIPI_DSI_DCS_LONG_WRITE)); 1076 1077 dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_TOTAL, 1078 DSI_CMD_MDP_STREAM0_TOTAL_H_TOTAL(hdisplay) | 1079 DSI_CMD_MDP_STREAM0_TOTAL_V_TOTAL(mode->vdisplay)); 1080 } 1081 } 1082 1083 static void dsi_sw_reset(struct msm_dsi_host *msm_host) 1084 { 1085 u32 ctrl; 1086 1087 ctrl = dsi_read(msm_host, REG_DSI_CTRL); 1088 1089 if (ctrl & DSI_CTRL_ENABLE) { 1090 dsi_write(msm_host, REG_DSI_CTRL, ctrl & ~DSI_CTRL_ENABLE); 1091 /* 1092 * dsi controller need to be disabled before 1093 * clocks turned on 1094 */ 1095 wmb(); 1096 } 1097 1098 dsi_write(msm_host, REG_DSI_CLK_CTRL, DSI_CLK_CTRL_ENABLE_CLKS); 1099 wmb(); /* clocks need to be enabled before reset */ 1100 1101 /* dsi controller can only be reset while clocks are running */ 1102 dsi_write(msm_host, REG_DSI_RESET, 1); 1103 msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */ 1104 dsi_write(msm_host, REG_DSI_RESET, 0); 1105 wmb(); /* controller out of reset */ 1106 1107 if (ctrl & DSI_CTRL_ENABLE) { 1108 dsi_write(msm_host, REG_DSI_CTRL, ctrl); 1109 wmb(); /* make sure dsi controller enabled again */ 1110 } 1111 } 1112 1113 static void dsi_op_mode_config(struct msm_dsi_host *msm_host, 1114 bool video_mode, bool enable) 1115 { 1116 u32 dsi_ctrl; 1117 1118 dsi_ctrl = dsi_read(msm_host, REG_DSI_CTRL); 1119 1120 if (!enable) { 1121 dsi_ctrl &= ~(DSI_CTRL_ENABLE | DSI_CTRL_VID_MODE_EN | 1122 DSI_CTRL_CMD_MODE_EN); 1123 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE | 1124 DSI_IRQ_MASK_VIDEO_DONE, 0); 1125 } else { 1126 if (video_mode) { 1127 dsi_ctrl |= DSI_CTRL_VID_MODE_EN; 1128 } else { /* command mode */ 1129 dsi_ctrl |= DSI_CTRL_CMD_MODE_EN; 1130 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_MDP_DONE, 1); 1131 } 1132 dsi_ctrl |= DSI_CTRL_ENABLE; 1133 } 1134 1135 dsi_write(msm_host, REG_DSI_CTRL, dsi_ctrl); 1136 } 1137 1138 static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host) 1139 { 1140 u32 data; 1141 1142 data = dsi_read(msm_host, REG_DSI_CMD_DMA_CTRL); 1143 1144 if (mode == 0) 1145 data &= ~DSI_CMD_DMA_CTRL_LOW_POWER; 1146 else 1147 data |= DSI_CMD_DMA_CTRL_LOW_POWER; 1148 1149 dsi_write(msm_host, REG_DSI_CMD_DMA_CTRL, data); 1150 } 1151 1152 static void dsi_wait4video_done(struct msm_dsi_host *msm_host) 1153 { 1154 u32 ret = 0; 1155 struct device *dev = &msm_host->pdev->dev; 1156 1157 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1); 1158 1159 reinit_completion(&msm_host->video_comp); 1160 1161 ret = wait_for_completion_timeout(&msm_host->video_comp, 1162 msecs_to_jiffies(70)); 1163 1164 if (ret == 0) 1165 DRM_DEV_ERROR(dev, "wait for video done timed out\n"); 1166 1167 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); 1168 } 1169 1170 static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host) 1171 { 1172 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) 1173 return; 1174 1175 if (msm_host->power_on && msm_host->enabled) { 1176 dsi_wait4video_done(msm_host); 1177 /* delay 4 ms to skip BLLP */ 1178 usleep_range(2000, 4000); 1179 } 1180 } 1181 1182 int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size) 1183 { 1184 struct drm_device *dev = msm_host->dev; 1185 struct msm_drm_private *priv = dev->dev_private; 1186 uint64_t iova; 1187 u8 *data; 1188 1189 data = msm_gem_kernel_new(dev, size, MSM_BO_WC, 1190 priv->kms->aspace, 1191 &msm_host->tx_gem_obj, &iova); 1192 1193 if (IS_ERR(data)) { 1194 msm_host->tx_gem_obj = NULL; 1195 return PTR_ERR(data); 1196 } 1197 1198 msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem"); 1199 1200 msm_host->tx_size = msm_host->tx_gem_obj->size; 1201 1202 return 0; 1203 } 1204 1205 int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size) 1206 { 1207 struct drm_device *dev = msm_host->dev; 1208 1209 msm_host->tx_buf = dma_alloc_coherent(dev->dev, size, 1210 &msm_host->tx_buf_paddr, GFP_KERNEL); 1211 if (!msm_host->tx_buf) 1212 return -ENOMEM; 1213 1214 msm_host->tx_size = size; 1215 1216 return 0; 1217 } 1218 1219 static void dsi_tx_buf_free(struct msm_dsi_host *msm_host) 1220 { 1221 struct drm_device *dev = msm_host->dev; 1222 struct msm_drm_private *priv; 1223 1224 /* 1225 * This is possible if we're tearing down before we've had a chance to 1226 * fully initialize. A very real possibility if our probe is deferred, 1227 * in which case we'll hit msm_dsi_host_destroy() without having run 1228 * through the dsi_tx_buf_alloc(). 1229 */ 1230 if (!dev) 1231 return; 1232 1233 priv = dev->dev_private; 1234 if (msm_host->tx_gem_obj) { 1235 msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace); 1236 drm_gem_object_put(msm_host->tx_gem_obj); 1237 msm_host->tx_gem_obj = NULL; 1238 } 1239 1240 if (msm_host->tx_buf) 1241 dma_free_coherent(dev->dev, msm_host->tx_size, msm_host->tx_buf, 1242 msm_host->tx_buf_paddr); 1243 } 1244 1245 void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host) 1246 { 1247 return msm_gem_get_vaddr(msm_host->tx_gem_obj); 1248 } 1249 1250 void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host) 1251 { 1252 return msm_host->tx_buf; 1253 } 1254 1255 void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host) 1256 { 1257 msm_gem_put_vaddr(msm_host->tx_gem_obj); 1258 } 1259 1260 /* 1261 * prepare cmd buffer to be txed 1262 */ 1263 static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host, 1264 const struct mipi_dsi_msg *msg) 1265 { 1266 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 1267 struct mipi_dsi_packet packet; 1268 int len; 1269 int ret; 1270 u8 *data; 1271 1272 ret = mipi_dsi_create_packet(&packet, msg); 1273 if (ret) { 1274 pr_err("%s: create packet failed, %d\n", __func__, ret); 1275 return ret; 1276 } 1277 len = (packet.size + 3) & (~0x3); 1278 1279 if (len > msm_host->tx_size) { 1280 pr_err("%s: packet size is too big\n", __func__); 1281 return -EINVAL; 1282 } 1283 1284 data = cfg_hnd->ops->tx_buf_get(msm_host); 1285 if (IS_ERR(data)) { 1286 ret = PTR_ERR(data); 1287 pr_err("%s: get vaddr failed, %d\n", __func__, ret); 1288 return ret; 1289 } 1290 1291 /* MSM specific command format in memory */ 1292 data[0] = packet.header[1]; 1293 data[1] = packet.header[2]; 1294 data[2] = packet.header[0]; 1295 data[3] = BIT(7); /* Last packet */ 1296 if (mipi_dsi_packet_format_is_long(msg->type)) 1297 data[3] |= BIT(6); 1298 if (msg->rx_buf && msg->rx_len) 1299 data[3] |= BIT(5); 1300 1301 /* Long packet */ 1302 if (packet.payload && packet.payload_length) 1303 memcpy(data + 4, packet.payload, packet.payload_length); 1304 1305 /* Append 0xff to the end */ 1306 if (packet.size < len) 1307 memset(data + packet.size, 0xff, len - packet.size); 1308 1309 if (cfg_hnd->ops->tx_buf_put) 1310 cfg_hnd->ops->tx_buf_put(msm_host); 1311 1312 return len; 1313 } 1314 1315 /* 1316 * dsi_short_read1_resp: 1 parameter 1317 */ 1318 static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg) 1319 { 1320 u8 *data = msg->rx_buf; 1321 if (data && (msg->rx_len >= 1)) { 1322 *data = buf[1]; /* strip out dcs type */ 1323 return 1; 1324 } else { 1325 pr_err("%s: read data does not match with rx_buf len %zu\n", 1326 __func__, msg->rx_len); 1327 return -EINVAL; 1328 } 1329 } 1330 1331 /* 1332 * dsi_short_read2_resp: 2 parameter 1333 */ 1334 static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg) 1335 { 1336 u8 *data = msg->rx_buf; 1337 if (data && (msg->rx_len >= 2)) { 1338 data[0] = buf[1]; /* strip out dcs type */ 1339 data[1] = buf[2]; 1340 return 2; 1341 } else { 1342 pr_err("%s: read data does not match with rx_buf len %zu\n", 1343 __func__, msg->rx_len); 1344 return -EINVAL; 1345 } 1346 } 1347 1348 static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg) 1349 { 1350 /* strip out 4 byte dcs header */ 1351 if (msg->rx_buf && msg->rx_len) 1352 memcpy(msg->rx_buf, buf + 4, msg->rx_len); 1353 1354 return msg->rx_len; 1355 } 1356 1357 int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base) 1358 { 1359 struct drm_device *dev = msm_host->dev; 1360 struct msm_drm_private *priv = dev->dev_private; 1361 1362 if (!dma_base) 1363 return -EINVAL; 1364 1365 return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj, 1366 priv->kms->aspace, dma_base); 1367 } 1368 1369 int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base) 1370 { 1371 if (!dma_base) 1372 return -EINVAL; 1373 1374 *dma_base = msm_host->tx_buf_paddr; 1375 return 0; 1376 } 1377 1378 static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) 1379 { 1380 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 1381 int ret; 1382 uint64_t dma_base; 1383 bool triggered; 1384 1385 ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base); 1386 if (ret) { 1387 pr_err("%s: failed to get iova: %d\n", __func__, ret); 1388 return ret; 1389 } 1390 1391 reinit_completion(&msm_host->dma_comp); 1392 1393 dsi_wait4video_eng_busy(msm_host); 1394 1395 triggered = msm_dsi_manager_cmd_xfer_trigger( 1396 msm_host->id, dma_base, len); 1397 if (triggered) { 1398 ret = wait_for_completion_timeout(&msm_host->dma_comp, 1399 msecs_to_jiffies(200)); 1400 DBG("ret=%d", ret); 1401 if (ret == 0) 1402 ret = -ETIMEDOUT; 1403 else 1404 ret = len; 1405 } else 1406 ret = len; 1407 1408 return ret; 1409 } 1410 1411 static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host, 1412 u8 *buf, int rx_byte, int pkt_size) 1413 { 1414 u32 *temp, data; 1415 int i, j = 0, cnt; 1416 u32 read_cnt; 1417 u8 reg[16]; 1418 int repeated_bytes = 0; 1419 int buf_offset = buf - msm_host->rx_buf; 1420 1421 temp = (u32 *)reg; 1422 cnt = (rx_byte + 3) >> 2; 1423 if (cnt > 4) 1424 cnt = 4; /* 4 x 32 bits registers only */ 1425 1426 if (rx_byte == 4) 1427 read_cnt = 4; 1428 else 1429 read_cnt = pkt_size + 6; 1430 1431 /* 1432 * In case of multiple reads from the panel, after the first read, there 1433 * is possibility that there are some bytes in the payload repeating in 1434 * the RDBK_DATA registers. Since we read all the parameters from the 1435 * panel right from the first byte for every pass. We need to skip the 1436 * repeating bytes and then append the new parameters to the rx buffer. 1437 */ 1438 if (read_cnt > 16) { 1439 int bytes_shifted; 1440 /* Any data more than 16 bytes will be shifted out. 1441 * The temp read buffer should already contain these bytes. 1442 * The remaining bytes in read buffer are the repeated bytes. 1443 */ 1444 bytes_shifted = read_cnt - 16; 1445 repeated_bytes = buf_offset - bytes_shifted; 1446 } 1447 1448 for (i = cnt - 1; i >= 0; i--) { 1449 data = dsi_read(msm_host, REG_DSI_RDBK_DATA(i)); 1450 *temp++ = ntohl(data); /* to host byte order */ 1451 DBG("data = 0x%x and ntohl(data) = 0x%x", data, ntohl(data)); 1452 } 1453 1454 for (i = repeated_bytes; i < 16; i++) 1455 buf[j++] = reg[i]; 1456 1457 return j; 1458 } 1459 1460 static int dsi_cmds2buf_tx(struct msm_dsi_host *msm_host, 1461 const struct mipi_dsi_msg *msg) 1462 { 1463 int len, ret; 1464 int bllp_len = msm_host->mode->hdisplay * 1465 dsi_get_bpp(msm_host->format) / 8; 1466 1467 len = dsi_cmd_dma_add(msm_host, msg); 1468 if (len < 0) { 1469 pr_err("%s: failed to add cmd type = 0x%x\n", 1470 __func__, msg->type); 1471 return len; 1472 } 1473 1474 /* for video mode, do not send cmds more than 1475 * one pixel line, since it only transmit it 1476 * during BLLP. 1477 */ 1478 /* TODO: if the command is sent in LP mode, the bit rate is only 1479 * half of esc clk rate. In this case, if the video is already 1480 * actively streaming, we need to check more carefully if the 1481 * command can be fit into one BLLP. 1482 */ 1483 if ((msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) && (len > bllp_len)) { 1484 pr_err("%s: cmd cannot fit into BLLP period, len=%d\n", 1485 __func__, len); 1486 return -EINVAL; 1487 } 1488 1489 ret = dsi_cmd_dma_tx(msm_host, len); 1490 if (ret < 0) { 1491 pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, len=%d, ret=%d\n", 1492 __func__, msg->type, (*(u8 *)(msg->tx_buf)), len, ret); 1493 return ret; 1494 } else if (ret < len) { 1495 pr_err("%s: cmd dma tx failed, type=0x%x, data0=0x%x, ret=%d len=%d\n", 1496 __func__, msg->type, (*(u8 *)(msg->tx_buf)), ret, len); 1497 return -EIO; 1498 } 1499 1500 return len; 1501 } 1502 1503 static void dsi_hpd_worker(struct work_struct *work) 1504 { 1505 struct msm_dsi_host *msm_host = 1506 container_of(work, struct msm_dsi_host, hpd_work); 1507 1508 drm_helper_hpd_irq_event(msm_host->dev); 1509 } 1510 1511 static void dsi_err_worker(struct work_struct *work) 1512 { 1513 struct msm_dsi_host *msm_host = 1514 container_of(work, struct msm_dsi_host, err_work); 1515 u32 status = msm_host->err_work_state; 1516 1517 pr_err_ratelimited("%s: status=%x\n", __func__, status); 1518 if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW) 1519 dsi_sw_reset(msm_host); 1520 1521 /* It is safe to clear here because error irq is disabled. */ 1522 msm_host->err_work_state = 0; 1523 1524 /* enable dsi error interrupt */ 1525 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 1); 1526 } 1527 1528 static void dsi_ack_err_status(struct msm_dsi_host *msm_host) 1529 { 1530 u32 status; 1531 1532 status = dsi_read(msm_host, REG_DSI_ACK_ERR_STATUS); 1533 1534 if (status) { 1535 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, status); 1536 /* Writing of an extra 0 needed to clear error bits */ 1537 dsi_write(msm_host, REG_DSI_ACK_ERR_STATUS, 0); 1538 msm_host->err_work_state |= DSI_ERR_STATE_ACK; 1539 } 1540 } 1541 1542 static void dsi_timeout_status(struct msm_dsi_host *msm_host) 1543 { 1544 u32 status; 1545 1546 status = dsi_read(msm_host, REG_DSI_TIMEOUT_STATUS); 1547 1548 if (status) { 1549 dsi_write(msm_host, REG_DSI_TIMEOUT_STATUS, status); 1550 msm_host->err_work_state |= DSI_ERR_STATE_TIMEOUT; 1551 } 1552 } 1553 1554 static void dsi_dln0_phy_err(struct msm_dsi_host *msm_host) 1555 { 1556 u32 status; 1557 1558 status = dsi_read(msm_host, REG_DSI_DLN0_PHY_ERR); 1559 1560 if (status & (DSI_DLN0_PHY_ERR_DLN0_ERR_ESC | 1561 DSI_DLN0_PHY_ERR_DLN0_ERR_SYNC_ESC | 1562 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTROL | 1563 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP0 | 1564 DSI_DLN0_PHY_ERR_DLN0_ERR_CONTENTION_LP1)) { 1565 dsi_write(msm_host, REG_DSI_DLN0_PHY_ERR, status); 1566 msm_host->err_work_state |= DSI_ERR_STATE_DLN0_PHY; 1567 } 1568 } 1569 1570 static void dsi_fifo_status(struct msm_dsi_host *msm_host) 1571 { 1572 u32 status; 1573 1574 status = dsi_read(msm_host, REG_DSI_FIFO_STATUS); 1575 1576 /* fifo underflow, overflow */ 1577 if (status) { 1578 dsi_write(msm_host, REG_DSI_FIFO_STATUS, status); 1579 msm_host->err_work_state |= DSI_ERR_STATE_FIFO; 1580 if (status & DSI_FIFO_STATUS_CMD_MDP_FIFO_UNDERFLOW) 1581 msm_host->err_work_state |= 1582 DSI_ERR_STATE_MDP_FIFO_UNDERFLOW; 1583 } 1584 } 1585 1586 static void dsi_status(struct msm_dsi_host *msm_host) 1587 { 1588 u32 status; 1589 1590 status = dsi_read(msm_host, REG_DSI_STATUS0); 1591 1592 if (status & DSI_STATUS0_INTERLEAVE_OP_CONTENTION) { 1593 dsi_write(msm_host, REG_DSI_STATUS0, status); 1594 msm_host->err_work_state |= 1595 DSI_ERR_STATE_INTERLEAVE_OP_CONTENTION; 1596 } 1597 } 1598 1599 static void dsi_clk_status(struct msm_dsi_host *msm_host) 1600 { 1601 u32 status; 1602 1603 status = dsi_read(msm_host, REG_DSI_CLK_STATUS); 1604 1605 if (status & DSI_CLK_STATUS_PLL_UNLOCKED) { 1606 dsi_write(msm_host, REG_DSI_CLK_STATUS, status); 1607 msm_host->err_work_state |= DSI_ERR_STATE_PLL_UNLOCKED; 1608 } 1609 } 1610 1611 static void dsi_error(struct msm_dsi_host *msm_host) 1612 { 1613 /* disable dsi error interrupt */ 1614 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_ERROR, 0); 1615 1616 dsi_clk_status(msm_host); 1617 dsi_fifo_status(msm_host); 1618 dsi_ack_err_status(msm_host); 1619 dsi_timeout_status(msm_host); 1620 dsi_status(msm_host); 1621 dsi_dln0_phy_err(msm_host); 1622 1623 queue_work(msm_host->workqueue, &msm_host->err_work); 1624 } 1625 1626 static irqreturn_t dsi_host_irq(int irq, void *ptr) 1627 { 1628 struct msm_dsi_host *msm_host = ptr; 1629 u32 isr; 1630 unsigned long flags; 1631 1632 if (!msm_host->ctrl_base) 1633 return IRQ_HANDLED; 1634 1635 spin_lock_irqsave(&msm_host->intr_lock, flags); 1636 isr = dsi_read(msm_host, REG_DSI_INTR_CTRL); 1637 dsi_write(msm_host, REG_DSI_INTR_CTRL, isr); 1638 spin_unlock_irqrestore(&msm_host->intr_lock, flags); 1639 1640 DBG("isr=0x%x, id=%d", isr, msm_host->id); 1641 1642 if (isr & DSI_IRQ_ERROR) 1643 dsi_error(msm_host); 1644 1645 if (isr & DSI_IRQ_VIDEO_DONE) 1646 complete(&msm_host->video_comp); 1647 1648 if (isr & DSI_IRQ_CMD_DMA_DONE) 1649 complete(&msm_host->dma_comp); 1650 1651 return IRQ_HANDLED; 1652 } 1653 1654 static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host, 1655 struct device *panel_device) 1656 { 1657 msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device, 1658 "disp-enable", 1659 GPIOD_OUT_LOW); 1660 if (IS_ERR(msm_host->disp_en_gpio)) { 1661 DBG("cannot get disp-enable-gpios %ld", 1662 PTR_ERR(msm_host->disp_en_gpio)); 1663 return PTR_ERR(msm_host->disp_en_gpio); 1664 } 1665 1666 msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te", 1667 GPIOD_IN); 1668 if (IS_ERR(msm_host->te_gpio)) { 1669 DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio)); 1670 return PTR_ERR(msm_host->te_gpio); 1671 } 1672 1673 return 0; 1674 } 1675 1676 static int dsi_host_attach(struct mipi_dsi_host *host, 1677 struct mipi_dsi_device *dsi) 1678 { 1679 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1680 int ret; 1681 1682 if (dsi->lanes > msm_host->num_data_lanes) 1683 return -EINVAL; 1684 1685 msm_host->channel = dsi->channel; 1686 msm_host->lanes = dsi->lanes; 1687 msm_host->format = dsi->format; 1688 msm_host->mode_flags = dsi->mode_flags; 1689 1690 /* Some gpios defined in panel DT need to be controlled by host */ 1691 ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); 1692 if (ret) 1693 return ret; 1694 1695 ret = dsi_dev_attach(msm_host->pdev); 1696 if (ret) 1697 return ret; 1698 1699 DBG("id=%d", msm_host->id); 1700 if (msm_host->dev) 1701 queue_work(msm_host->workqueue, &msm_host->hpd_work); 1702 1703 return 0; 1704 } 1705 1706 static int dsi_host_detach(struct mipi_dsi_host *host, 1707 struct mipi_dsi_device *dsi) 1708 { 1709 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1710 1711 dsi_dev_detach(msm_host->pdev); 1712 1713 msm_host->device_node = NULL; 1714 1715 DBG("id=%d", msm_host->id); 1716 if (msm_host->dev) 1717 queue_work(msm_host->workqueue, &msm_host->hpd_work); 1718 1719 return 0; 1720 } 1721 1722 static ssize_t dsi_host_transfer(struct mipi_dsi_host *host, 1723 const struct mipi_dsi_msg *msg) 1724 { 1725 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1726 int ret; 1727 1728 if (!msg || !msm_host->power_on) 1729 return -EINVAL; 1730 1731 mutex_lock(&msm_host->cmd_mutex); 1732 ret = msm_dsi_manager_cmd_xfer(msm_host->id, msg); 1733 mutex_unlock(&msm_host->cmd_mutex); 1734 1735 return ret; 1736 } 1737 1738 static const struct mipi_dsi_host_ops dsi_host_ops = { 1739 .attach = dsi_host_attach, 1740 .detach = dsi_host_detach, 1741 .transfer = dsi_host_transfer, 1742 }; 1743 1744 /* 1745 * List of supported physical to logical lane mappings. 1746 * For example, the 2nd entry represents the following mapping: 1747 * 1748 * "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3; 1749 */ 1750 static const int supported_data_lane_swaps[][4] = { 1751 { 0, 1, 2, 3 }, 1752 { 3, 0, 1, 2 }, 1753 { 2, 3, 0, 1 }, 1754 { 1, 2, 3, 0 }, 1755 { 0, 3, 2, 1 }, 1756 { 1, 0, 3, 2 }, 1757 { 2, 1, 0, 3 }, 1758 { 3, 2, 1, 0 }, 1759 }; 1760 1761 static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, 1762 struct device_node *ep) 1763 { 1764 struct device *dev = &msm_host->pdev->dev; 1765 struct property *prop; 1766 u32 lane_map[4]; 1767 int ret, i, len, num_lanes; 1768 1769 prop = of_find_property(ep, "data-lanes", &len); 1770 if (!prop) { 1771 DRM_DEV_DEBUG(dev, 1772 "failed to find data lane mapping, using default\n"); 1773 /* Set the number of date lanes to 4 by default. */ 1774 msm_host->num_data_lanes = 4; 1775 return 0; 1776 } 1777 1778 num_lanes = drm_of_get_data_lanes_count(ep, 1, 4); 1779 if (num_lanes < 0) { 1780 DRM_DEV_ERROR(dev, "bad number of data lanes\n"); 1781 return num_lanes; 1782 } 1783 1784 msm_host->num_data_lanes = num_lanes; 1785 1786 ret = of_property_read_u32_array(ep, "data-lanes", lane_map, 1787 num_lanes); 1788 if (ret) { 1789 DRM_DEV_ERROR(dev, "failed to read lane data\n"); 1790 return ret; 1791 } 1792 1793 /* 1794 * compare DT specified physical-logical lane mappings with the ones 1795 * supported by hardware 1796 */ 1797 for (i = 0; i < ARRAY_SIZE(supported_data_lane_swaps); i++) { 1798 const int *swap = supported_data_lane_swaps[i]; 1799 int j; 1800 1801 /* 1802 * the data-lanes array we get from DT has a logical->physical 1803 * mapping. The "data lane swap" register field represents 1804 * supported configurations in a physical->logical mapping. 1805 * Translate the DT mapping to what we understand and find a 1806 * configuration that works. 1807 */ 1808 for (j = 0; j < num_lanes; j++) { 1809 if (lane_map[j] < 0 || lane_map[j] > 3) 1810 DRM_DEV_ERROR(dev, "bad physical lane entry %u\n", 1811 lane_map[j]); 1812 1813 if (swap[lane_map[j]] != j) 1814 break; 1815 } 1816 1817 if (j == num_lanes) { 1818 msm_host->dlane_swap = i; 1819 return 0; 1820 } 1821 } 1822 1823 return -EINVAL; 1824 } 1825 1826 static u32 dsi_dsc_rc_buf_thresh[DSC_NUM_BUF_RANGES - 1] = { 1827 0x0e, 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 1828 0x69, 0x70, 0x77, 0x79, 0x7b, 0x7d, 0x7e 1829 }; 1830 1831 /* only 8bpc, 8bpp added */ 1832 static char min_qp[DSC_NUM_BUF_RANGES] = { 1833 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 13 1834 }; 1835 1836 static char max_qp[DSC_NUM_BUF_RANGES] = { 1837 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 11, 12, 13, 13, 15 1838 }; 1839 1840 static char bpg_offset[DSC_NUM_BUF_RANGES] = { 1841 2, 0, 0, -2, -4, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 1842 }; 1843 1844 static int dsi_populate_dsc_params(struct msm_display_dsc_config *dsc) 1845 { 1846 int mux_words_size; 1847 int groups_per_line, groups_total; 1848 int min_rate_buffer_size; 1849 int hrd_delay; 1850 int pre_num_extra_mux_bits, num_extra_mux_bits; 1851 int slice_bits; 1852 int target_bpp_x16; 1853 int data; 1854 int final_value, final_scale; 1855 int i; 1856 1857 dsc->drm->rc_model_size = 8192; 1858 dsc->drm->first_line_bpg_offset = 12; 1859 dsc->drm->rc_edge_factor = 6; 1860 dsc->drm->rc_tgt_offset_high = 3; 1861 dsc->drm->rc_tgt_offset_low = 3; 1862 dsc->drm->simple_422 = 0; 1863 dsc->drm->convert_rgb = 1; 1864 dsc->drm->vbr_enable = 0; 1865 1866 /* handle only bpp = bpc = 8 */ 1867 for (i = 0; i < DSC_NUM_BUF_RANGES - 1 ; i++) 1868 dsc->drm->rc_buf_thresh[i] = dsi_dsc_rc_buf_thresh[i]; 1869 1870 for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { 1871 dsc->drm->rc_range_params[i].range_min_qp = min_qp[i]; 1872 dsc->drm->rc_range_params[i].range_max_qp = max_qp[i]; 1873 dsc->drm->rc_range_params[i].range_bpg_offset = bpg_offset[i]; 1874 } 1875 1876 dsc->drm->initial_offset = 6144; /* Not bpp 12 */ 1877 if (dsc->drm->bits_per_pixel != 8) 1878 dsc->drm->initial_offset = 2048; /* bpp = 12 */ 1879 1880 mux_words_size = 48; /* bpc == 8/10 */ 1881 if (dsc->drm->bits_per_component == 12) 1882 mux_words_size = 64; 1883 1884 dsc->drm->initial_xmit_delay = 512; 1885 dsc->drm->initial_scale_value = 32; 1886 dsc->drm->first_line_bpg_offset = 12; 1887 dsc->drm->line_buf_depth = dsc->drm->bits_per_component + 1; 1888 1889 /* bpc 8 */ 1890 dsc->drm->flatness_min_qp = 3; 1891 dsc->drm->flatness_max_qp = 12; 1892 dsc->drm->rc_quant_incr_limit0 = 11; 1893 dsc->drm->rc_quant_incr_limit1 = 11; 1894 dsc->drm->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC; 1895 1896 /* FIXME: need to call drm_dsc_compute_rc_parameters() so that rest of 1897 * params are calculated 1898 */ 1899 groups_per_line = DIV_ROUND_UP(dsc->drm->slice_width, 3); 1900 dsc->drm->slice_chunk_size = dsc->drm->slice_width * dsc->drm->bits_per_pixel / 8; 1901 if ((dsc->drm->slice_width * dsc->drm->bits_per_pixel) % 8) 1902 dsc->drm->slice_chunk_size++; 1903 1904 /* rbs-min */ 1905 min_rate_buffer_size = dsc->drm->rc_model_size - dsc->drm->initial_offset + 1906 dsc->drm->initial_xmit_delay * dsc->drm->bits_per_pixel + 1907 groups_per_line * dsc->drm->first_line_bpg_offset; 1908 1909 hrd_delay = DIV_ROUND_UP(min_rate_buffer_size, dsc->drm->bits_per_pixel); 1910 1911 dsc->drm->initial_dec_delay = hrd_delay - dsc->drm->initial_xmit_delay; 1912 1913 dsc->drm->initial_scale_value = 8 * dsc->drm->rc_model_size / 1914 (dsc->drm->rc_model_size - dsc->drm->initial_offset); 1915 1916 slice_bits = 8 * dsc->drm->slice_chunk_size * dsc->drm->slice_height; 1917 1918 groups_total = groups_per_line * dsc->drm->slice_height; 1919 1920 data = dsc->drm->first_line_bpg_offset * 2048; 1921 1922 dsc->drm->nfl_bpg_offset = DIV_ROUND_UP(data, (dsc->drm->slice_height - 1)); 1923 1924 pre_num_extra_mux_bits = 3 * (mux_words_size + (4 * dsc->drm->bits_per_component + 4) - 2); 1925 1926 num_extra_mux_bits = pre_num_extra_mux_bits - (mux_words_size - 1927 ((slice_bits - pre_num_extra_mux_bits) % mux_words_size)); 1928 1929 data = 2048 * (dsc->drm->rc_model_size - dsc->drm->initial_offset + num_extra_mux_bits); 1930 dsc->drm->slice_bpg_offset = DIV_ROUND_UP(data, groups_total); 1931 1932 /* bpp * 16 + 0.5 */ 1933 data = dsc->drm->bits_per_pixel * 16; 1934 data *= 2; 1935 data++; 1936 data /= 2; 1937 target_bpp_x16 = data; 1938 1939 data = (dsc->drm->initial_xmit_delay * target_bpp_x16) / 16; 1940 final_value = dsc->drm->rc_model_size - data + num_extra_mux_bits; 1941 dsc->drm->final_offset = final_value; 1942 1943 final_scale = 8 * dsc->drm->rc_model_size / (dsc->drm->rc_model_size - final_value); 1944 1945 data = (final_scale - 9) * (dsc->drm->nfl_bpg_offset + dsc->drm->slice_bpg_offset); 1946 dsc->drm->scale_increment_interval = (2048 * dsc->drm->final_offset) / data; 1947 1948 dsc->drm->scale_decrement_interval = groups_per_line / (dsc->drm->initial_scale_value - 8); 1949 1950 return 0; 1951 } 1952 1953 static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) 1954 { 1955 struct device *dev = &msm_host->pdev->dev; 1956 struct device_node *np = dev->of_node; 1957 struct device_node *endpoint, *device_node; 1958 int ret = 0; 1959 1960 /* 1961 * Get the endpoint of the output port of the DSI host. In our case, 1962 * this is mapped to port number with reg = 1. Don't return an error if 1963 * the remote endpoint isn't defined. It's possible that there is 1964 * nothing connected to the dsi output. 1965 */ 1966 endpoint = of_graph_get_endpoint_by_regs(np, 1, -1); 1967 if (!endpoint) { 1968 DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__); 1969 return 0; 1970 } 1971 1972 ret = dsi_host_parse_lane_data(msm_host, endpoint); 1973 if (ret) { 1974 DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n", 1975 __func__, ret); 1976 ret = -EINVAL; 1977 goto err; 1978 } 1979 1980 /* Get panel node from the output port's endpoint data */ 1981 device_node = of_graph_get_remote_node(np, 1, 0); 1982 if (!device_node) { 1983 DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__); 1984 ret = -ENODEV; 1985 goto err; 1986 } 1987 1988 msm_host->device_node = device_node; 1989 1990 if (of_property_read_bool(np, "syscon-sfpb")) { 1991 msm_host->sfpb = syscon_regmap_lookup_by_phandle(np, 1992 "syscon-sfpb"); 1993 if (IS_ERR(msm_host->sfpb)) { 1994 DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n", 1995 __func__); 1996 ret = PTR_ERR(msm_host->sfpb); 1997 } 1998 } 1999 2000 of_node_put(device_node); 2001 2002 err: 2003 of_node_put(endpoint); 2004 2005 return ret; 2006 } 2007 2008 static int dsi_host_get_id(struct msm_dsi_host *msm_host) 2009 { 2010 struct platform_device *pdev = msm_host->pdev; 2011 const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg; 2012 struct resource *res; 2013 int i; 2014 2015 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl"); 2016 if (!res) 2017 return -EINVAL; 2018 2019 for (i = 0; i < cfg->num_dsi; i++) { 2020 if (cfg->io_start[i] == res->start) 2021 return i; 2022 } 2023 2024 return -EINVAL; 2025 } 2026 2027 int msm_dsi_host_init(struct msm_dsi *msm_dsi) 2028 { 2029 struct msm_dsi_host *msm_host = NULL; 2030 struct platform_device *pdev = msm_dsi->pdev; 2031 int ret; 2032 2033 msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL); 2034 if (!msm_host) { 2035 ret = -ENOMEM; 2036 goto fail; 2037 } 2038 2039 msm_host->pdev = pdev; 2040 msm_dsi->host = &msm_host->base; 2041 2042 ret = dsi_host_parse_dt(msm_host); 2043 if (ret) { 2044 pr_err("%s: failed to parse dt\n", __func__); 2045 goto fail; 2046 } 2047 2048 msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", &msm_host->ctrl_size); 2049 if (IS_ERR(msm_host->ctrl_base)) { 2050 pr_err("%s: unable to map Dsi ctrl base\n", __func__); 2051 ret = PTR_ERR(msm_host->ctrl_base); 2052 goto fail; 2053 } 2054 2055 pm_runtime_enable(&pdev->dev); 2056 2057 msm_host->cfg_hnd = dsi_get_config(msm_host); 2058 if (!msm_host->cfg_hnd) { 2059 ret = -EINVAL; 2060 pr_err("%s: get config failed\n", __func__); 2061 goto fail; 2062 } 2063 2064 msm_host->id = dsi_host_get_id(msm_host); 2065 if (msm_host->id < 0) { 2066 ret = msm_host->id; 2067 pr_err("%s: unable to identify DSI host index\n", __func__); 2068 goto fail; 2069 } 2070 2071 /* fixup base address by io offset */ 2072 msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset; 2073 2074 ret = dsi_regulator_init(msm_host); 2075 if (ret) { 2076 pr_err("%s: regulator init failed\n", __func__); 2077 goto fail; 2078 } 2079 2080 ret = dsi_clk_init(msm_host); 2081 if (ret) { 2082 pr_err("%s: unable to initialize dsi clks\n", __func__); 2083 goto fail; 2084 } 2085 2086 msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL); 2087 if (!msm_host->rx_buf) { 2088 ret = -ENOMEM; 2089 pr_err("%s: alloc rx temp buf failed\n", __func__); 2090 goto fail; 2091 } 2092 2093 ret = devm_pm_opp_set_clkname(&pdev->dev, "byte"); 2094 if (ret) 2095 return ret; 2096 /* OPP table is optional */ 2097 ret = devm_pm_opp_of_add_table(&pdev->dev); 2098 if (ret && ret != -ENODEV) { 2099 dev_err(&pdev->dev, "invalid OPP table in device tree\n"); 2100 return ret; 2101 } 2102 2103 msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); 2104 if (msm_host->irq < 0) { 2105 ret = msm_host->irq; 2106 dev_err(&pdev->dev, "failed to get irq: %d\n", ret); 2107 return ret; 2108 } 2109 2110 /* do not autoenable, will be enabled later */ 2111 ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq, 2112 IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, 2113 "dsi_isr", msm_host); 2114 if (ret < 0) { 2115 dev_err(&pdev->dev, "failed to request IRQ%u: %d\n", 2116 msm_host->irq, ret); 2117 return ret; 2118 } 2119 2120 init_completion(&msm_host->dma_comp); 2121 init_completion(&msm_host->video_comp); 2122 mutex_init(&msm_host->dev_mutex); 2123 mutex_init(&msm_host->cmd_mutex); 2124 spin_lock_init(&msm_host->intr_lock); 2125 2126 /* setup workqueue */ 2127 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); 2128 INIT_WORK(&msm_host->err_work, dsi_err_worker); 2129 INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker); 2130 2131 msm_dsi->id = msm_host->id; 2132 2133 DBG("Dsi Host %d initialized", msm_host->id); 2134 return 0; 2135 2136 fail: 2137 return ret; 2138 } 2139 2140 void msm_dsi_host_destroy(struct mipi_dsi_host *host) 2141 { 2142 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2143 2144 DBG(""); 2145 dsi_tx_buf_free(msm_host); 2146 if (msm_host->workqueue) { 2147 destroy_workqueue(msm_host->workqueue); 2148 msm_host->workqueue = NULL; 2149 } 2150 2151 mutex_destroy(&msm_host->cmd_mutex); 2152 mutex_destroy(&msm_host->dev_mutex); 2153 2154 pm_runtime_disable(&msm_host->pdev->dev); 2155 } 2156 2157 int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, 2158 struct drm_device *dev) 2159 { 2160 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2161 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2162 struct drm_panel *panel; 2163 int ret; 2164 2165 msm_host->dev = dev; 2166 panel = msm_dsi_host_get_panel(&msm_host->base); 2167 2168 if (!IS_ERR(panel) && panel->dsc) { 2169 struct msm_display_dsc_config *dsc = msm_host->dsc; 2170 2171 if (!dsc) { 2172 dsc = devm_kzalloc(&msm_host->pdev->dev, sizeof(*dsc), GFP_KERNEL); 2173 if (!dsc) 2174 return -ENOMEM; 2175 dsc->drm = panel->dsc; 2176 msm_host->dsc = dsc; 2177 } 2178 } 2179 2180 ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K); 2181 if (ret) { 2182 pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret); 2183 return ret; 2184 } 2185 2186 return 0; 2187 } 2188 2189 int msm_dsi_host_register(struct mipi_dsi_host *host) 2190 { 2191 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2192 int ret; 2193 2194 /* Register mipi dsi host */ 2195 if (!msm_host->registered) { 2196 host->dev = &msm_host->pdev->dev; 2197 host->ops = &dsi_host_ops; 2198 ret = mipi_dsi_host_register(host); 2199 if (ret) 2200 return ret; 2201 2202 msm_host->registered = true; 2203 } 2204 2205 return 0; 2206 } 2207 2208 void msm_dsi_host_unregister(struct mipi_dsi_host *host) 2209 { 2210 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2211 2212 if (msm_host->registered) { 2213 mipi_dsi_host_unregister(host); 2214 host->dev = NULL; 2215 host->ops = NULL; 2216 msm_host->registered = false; 2217 } 2218 } 2219 2220 int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, 2221 const struct mipi_dsi_msg *msg) 2222 { 2223 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2224 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2225 2226 /* TODO: make sure dsi_cmd_mdp is idle. 2227 * Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME 2228 * to ask H/W to wait until cmd mdp is idle. S/W wait is not needed. 2229 * How to handle the old versions? Wait for mdp cmd done? 2230 */ 2231 2232 /* 2233 * mdss interrupt is generated in mdp core clock domain 2234 * mdp clock need to be enabled to receive dsi interrupt 2235 */ 2236 pm_runtime_get_sync(&msm_host->pdev->dev); 2237 cfg_hnd->ops->link_clk_set_rate(msm_host); 2238 cfg_hnd->ops->link_clk_enable(msm_host); 2239 2240 /* TODO: vote for bus bandwidth */ 2241 2242 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM)) 2243 dsi_set_tx_power_mode(0, msm_host); 2244 2245 msm_host->dma_cmd_ctrl_restore = dsi_read(msm_host, REG_DSI_CTRL); 2246 dsi_write(msm_host, REG_DSI_CTRL, 2247 msm_host->dma_cmd_ctrl_restore | 2248 DSI_CTRL_CMD_MODE_EN | 2249 DSI_CTRL_ENABLE); 2250 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 1); 2251 2252 return 0; 2253 } 2254 2255 void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host, 2256 const struct mipi_dsi_msg *msg) 2257 { 2258 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2259 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2260 2261 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0); 2262 dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore); 2263 2264 if (!(msg->flags & MIPI_DSI_MSG_USE_LPM)) 2265 dsi_set_tx_power_mode(1, msm_host); 2266 2267 /* TODO: unvote for bus bandwidth */ 2268 2269 cfg_hnd->ops->link_clk_disable(msm_host); 2270 pm_runtime_put(&msm_host->pdev->dev); 2271 } 2272 2273 int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host, 2274 const struct mipi_dsi_msg *msg) 2275 { 2276 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2277 2278 return dsi_cmds2buf_tx(msm_host, msg); 2279 } 2280 2281 int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host, 2282 const struct mipi_dsi_msg *msg) 2283 { 2284 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2285 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2286 int data_byte, rx_byte, dlen, end; 2287 int short_response, diff, pkt_size, ret = 0; 2288 char cmd; 2289 int rlen = msg->rx_len; 2290 u8 *buf; 2291 2292 if (rlen <= 2) { 2293 short_response = 1; 2294 pkt_size = rlen; 2295 rx_byte = 4; 2296 } else { 2297 short_response = 0; 2298 data_byte = 10; /* first read */ 2299 if (rlen < data_byte) 2300 pkt_size = rlen; 2301 else 2302 pkt_size = data_byte; 2303 rx_byte = data_byte + 6; /* 4 header + 2 crc */ 2304 } 2305 2306 buf = msm_host->rx_buf; 2307 end = 0; 2308 while (!end) { 2309 u8 tx[2] = {pkt_size & 0xff, pkt_size >> 8}; 2310 struct mipi_dsi_msg max_pkt_size_msg = { 2311 .channel = msg->channel, 2312 .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, 2313 .tx_len = 2, 2314 .tx_buf = tx, 2315 }; 2316 2317 DBG("rlen=%d pkt_size=%d rx_byte=%d", 2318 rlen, pkt_size, rx_byte); 2319 2320 ret = dsi_cmds2buf_tx(msm_host, &max_pkt_size_msg); 2321 if (ret < 2) { 2322 pr_err("%s: Set max pkt size failed, %d\n", 2323 __func__, ret); 2324 return -EINVAL; 2325 } 2326 2327 if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && 2328 (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_1)) { 2329 /* Clear the RDBK_DATA registers */ 2330 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 2331 DSI_RDBK_DATA_CTRL_CLR); 2332 wmb(); /* make sure the RDBK registers are cleared */ 2333 dsi_write(msm_host, REG_DSI_RDBK_DATA_CTRL, 0); 2334 wmb(); /* release cleared status before transfer */ 2335 } 2336 2337 ret = dsi_cmds2buf_tx(msm_host, msg); 2338 if (ret < 0) { 2339 pr_err("%s: Read cmd Tx failed, %d\n", __func__, ret); 2340 return ret; 2341 } else if (ret < msg->tx_len) { 2342 pr_err("%s: Read cmd Tx failed, too short: %d\n", __func__, ret); 2343 return -ECOMM; 2344 } 2345 2346 /* 2347 * once cmd_dma_done interrupt received, 2348 * return data from client is ready and stored 2349 * at RDBK_DATA register already 2350 * since rx fifo is 16 bytes, dcs header is kept at first loop, 2351 * after that dcs header lost during shift into registers 2352 */ 2353 dlen = dsi_cmd_dma_rx(msm_host, buf, rx_byte, pkt_size); 2354 2355 if (dlen <= 0) 2356 return 0; 2357 2358 if (short_response) 2359 break; 2360 2361 if (rlen <= data_byte) { 2362 diff = data_byte - rlen; 2363 end = 1; 2364 } else { 2365 diff = 0; 2366 rlen -= data_byte; 2367 } 2368 2369 if (!end) { 2370 dlen -= 2; /* 2 crc */ 2371 dlen -= diff; 2372 buf += dlen; /* next start position */ 2373 data_byte = 14; /* NOT first read */ 2374 if (rlen < data_byte) 2375 pkt_size += rlen; 2376 else 2377 pkt_size += data_byte; 2378 DBG("buf=%p dlen=%d diff=%d", buf, dlen, diff); 2379 } 2380 } 2381 2382 /* 2383 * For single Long read, if the requested rlen < 10, 2384 * we need to shift the start position of rx 2385 * data buffer to skip the bytes which are not 2386 * updated. 2387 */ 2388 if (pkt_size < 10 && !short_response) 2389 buf = msm_host->rx_buf + (10 - rlen); 2390 else 2391 buf = msm_host->rx_buf; 2392 2393 cmd = buf[0]; 2394 switch (cmd) { 2395 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: 2396 pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__); 2397 ret = 0; 2398 break; 2399 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: 2400 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: 2401 ret = dsi_short_read1_resp(buf, msg); 2402 break; 2403 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: 2404 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: 2405 ret = dsi_short_read2_resp(buf, msg); 2406 break; 2407 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE: 2408 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE: 2409 ret = dsi_long_read_resp(buf, msg); 2410 break; 2411 default: 2412 pr_warn("%s:Invalid response cmd\n", __func__); 2413 ret = 0; 2414 } 2415 2416 return ret; 2417 } 2418 2419 void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base, 2420 u32 len) 2421 { 2422 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2423 2424 dsi_write(msm_host, REG_DSI_DMA_BASE, dma_base); 2425 dsi_write(msm_host, REG_DSI_DMA_LEN, len); 2426 dsi_write(msm_host, REG_DSI_TRIG_DMA, 1); 2427 2428 /* Make sure trigger happens */ 2429 wmb(); 2430 } 2431 2432 void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host, 2433 struct msm_dsi_phy *src_phy) 2434 { 2435 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2436 2437 msm_host->cphy_mode = src_phy->cphy_mode; 2438 } 2439 2440 void msm_dsi_host_reset_phy(struct mipi_dsi_host *host) 2441 { 2442 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2443 2444 DBG(""); 2445 dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET); 2446 /* Make sure fully reset */ 2447 wmb(); 2448 udelay(1000); 2449 dsi_write(msm_host, REG_DSI_PHY_RESET, 0); 2450 udelay(100); 2451 } 2452 2453 void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, 2454 struct msm_dsi_phy_clk_request *clk_req, 2455 bool is_bonded_dsi) 2456 { 2457 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2458 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2459 int ret; 2460 2461 ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_bonded_dsi); 2462 if (ret) { 2463 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); 2464 return; 2465 } 2466 2467 /* CPHY transmits 16 bits over 7 clock cycles 2468 * "byte_clk" is in units of 16-bits (see dsi_calc_pclk), 2469 * so multiply by 7 to get the "bitclk rate" 2470 */ 2471 if (msm_host->cphy_mode) 2472 clk_req->bitclk_rate = msm_host->byte_clk_rate * 7; 2473 else 2474 clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; 2475 clk_req->escclk_rate = msm_host->esc_clk_rate; 2476 } 2477 2478 void msm_dsi_host_enable_irq(struct mipi_dsi_host *host) 2479 { 2480 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2481 2482 enable_irq(msm_host->irq); 2483 } 2484 2485 void msm_dsi_host_disable_irq(struct mipi_dsi_host *host) 2486 { 2487 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2488 2489 disable_irq(msm_host->irq); 2490 } 2491 2492 int msm_dsi_host_enable(struct mipi_dsi_host *host) 2493 { 2494 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2495 2496 dsi_op_mode_config(msm_host, 2497 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), true); 2498 2499 /* TODO: clock should be turned off for command mode, 2500 * and only turned on before MDP START. 2501 * This part of code should be enabled once mdp driver support it. 2502 */ 2503 /* if (msm_panel->mode == MSM_DSI_CMD_MODE) { 2504 * dsi_link_clk_disable(msm_host); 2505 * pm_runtime_put(&msm_host->pdev->dev); 2506 * } 2507 */ 2508 msm_host->enabled = true; 2509 return 0; 2510 } 2511 2512 int msm_dsi_host_disable(struct mipi_dsi_host *host) 2513 { 2514 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2515 2516 msm_host->enabled = false; 2517 dsi_op_mode_config(msm_host, 2518 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false); 2519 2520 /* Since we have disabled INTF, the video engine won't stop so that 2521 * the cmd engine will be blocked. 2522 * Reset to disable video engine so that we can send off cmd. 2523 */ 2524 dsi_sw_reset(msm_host); 2525 2526 return 0; 2527 } 2528 2529 static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable) 2530 { 2531 enum sfpb_ahb_arb_master_port_en en; 2532 2533 if (!msm_host->sfpb) 2534 return; 2535 2536 en = enable ? SFPB_MASTER_PORT_ENABLE : SFPB_MASTER_PORT_DISABLE; 2537 2538 regmap_update_bits(msm_host->sfpb, REG_SFPB_GPREG, 2539 SFPB_GPREG_MASTER_PORT_EN__MASK, 2540 SFPB_GPREG_MASTER_PORT_EN(en)); 2541 } 2542 2543 int msm_dsi_host_power_on(struct mipi_dsi_host *host, 2544 struct msm_dsi_phy_shared_timings *phy_shared_timings, 2545 bool is_bonded_dsi, struct msm_dsi_phy *phy) 2546 { 2547 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2548 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2549 int ret = 0; 2550 2551 mutex_lock(&msm_host->dev_mutex); 2552 if (msm_host->power_on) { 2553 DBG("dsi host already on"); 2554 goto unlock_ret; 2555 } 2556 2557 msm_dsi_sfpb_config(msm_host, true); 2558 2559 ret = dsi_host_regulator_enable(msm_host); 2560 if (ret) { 2561 pr_err("%s:Failed to enable vregs.ret=%d\n", 2562 __func__, ret); 2563 goto unlock_ret; 2564 } 2565 2566 pm_runtime_get_sync(&msm_host->pdev->dev); 2567 ret = cfg_hnd->ops->link_clk_set_rate(msm_host); 2568 if (!ret) 2569 ret = cfg_hnd->ops->link_clk_enable(msm_host); 2570 if (ret) { 2571 pr_err("%s: failed to enable link clocks. ret=%d\n", 2572 __func__, ret); 2573 goto fail_disable_reg; 2574 } 2575 2576 ret = pinctrl_pm_select_default_state(&msm_host->pdev->dev); 2577 if (ret) { 2578 pr_err("%s: failed to set pinctrl default state, %d\n", 2579 __func__, ret); 2580 goto fail_disable_clk; 2581 } 2582 2583 dsi_timing_setup(msm_host, is_bonded_dsi); 2584 dsi_sw_reset(msm_host); 2585 dsi_ctrl_config(msm_host, true, phy_shared_timings, phy); 2586 2587 if (msm_host->disp_en_gpio) 2588 gpiod_set_value(msm_host->disp_en_gpio, 1); 2589 2590 msm_host->power_on = true; 2591 mutex_unlock(&msm_host->dev_mutex); 2592 2593 return 0; 2594 2595 fail_disable_clk: 2596 cfg_hnd->ops->link_clk_disable(msm_host); 2597 pm_runtime_put(&msm_host->pdev->dev); 2598 fail_disable_reg: 2599 dsi_host_regulator_disable(msm_host); 2600 unlock_ret: 2601 mutex_unlock(&msm_host->dev_mutex); 2602 return ret; 2603 } 2604 2605 int msm_dsi_host_power_off(struct mipi_dsi_host *host) 2606 { 2607 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2608 const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd; 2609 2610 mutex_lock(&msm_host->dev_mutex); 2611 if (!msm_host->power_on) { 2612 DBG("dsi host already off"); 2613 goto unlock_ret; 2614 } 2615 2616 dsi_ctrl_config(msm_host, false, NULL, NULL); 2617 2618 if (msm_host->disp_en_gpio) 2619 gpiod_set_value(msm_host->disp_en_gpio, 0); 2620 2621 pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); 2622 2623 cfg_hnd->ops->link_clk_disable(msm_host); 2624 pm_runtime_put(&msm_host->pdev->dev); 2625 2626 dsi_host_regulator_disable(msm_host); 2627 2628 msm_dsi_sfpb_config(msm_host, false); 2629 2630 DBG("-"); 2631 2632 msm_host->power_on = false; 2633 2634 unlock_ret: 2635 mutex_unlock(&msm_host->dev_mutex); 2636 return 0; 2637 } 2638 2639 int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, 2640 const struct drm_display_mode *mode) 2641 { 2642 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2643 2644 if (msm_host->mode) { 2645 drm_mode_destroy(msm_host->dev, msm_host->mode); 2646 msm_host->mode = NULL; 2647 } 2648 2649 msm_host->mode = drm_mode_duplicate(msm_host->dev, mode); 2650 if (!msm_host->mode) { 2651 pr_err("%s: cannot duplicate mode\n", __func__); 2652 return -ENOMEM; 2653 } 2654 2655 return 0; 2656 } 2657 2658 enum drm_mode_status msm_dsi_host_check_dsc(struct mipi_dsi_host *host, 2659 const struct drm_display_mode *mode) 2660 { 2661 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2662 struct msm_display_dsc_config *dsc = msm_host->dsc; 2663 int pic_width = mode->hdisplay; 2664 int pic_height = mode->vdisplay; 2665 2666 if (!msm_host->dsc) 2667 return MODE_OK; 2668 2669 if (pic_width % dsc->drm->slice_width) { 2670 pr_err("DSI: pic_width %d has to be multiple of slice %d\n", 2671 pic_width, dsc->drm->slice_width); 2672 return MODE_H_ILLEGAL; 2673 } 2674 2675 if (pic_height % dsc->drm->slice_height) { 2676 pr_err("DSI: pic_height %d has to be multiple of slice %d\n", 2677 pic_height, dsc->drm->slice_height); 2678 return MODE_V_ILLEGAL; 2679 } 2680 2681 return MODE_OK; 2682 } 2683 2684 struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host) 2685 { 2686 return of_drm_find_panel(to_msm_dsi_host(host)->device_node); 2687 } 2688 2689 unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host) 2690 { 2691 return to_msm_dsi_host(host)->mode_flags; 2692 } 2693 2694 struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host) 2695 { 2696 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2697 2698 return of_drm_find_bridge(msm_host->device_node); 2699 } 2700 2701 void msm_dsi_host_snapshot(struct msm_disp_state *disp_state, struct mipi_dsi_host *host) 2702 { 2703 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2704 2705 pm_runtime_get_sync(&msm_host->pdev->dev); 2706 2707 msm_disp_snapshot_add_block(disp_state, msm_host->ctrl_size, 2708 msm_host->ctrl_base, "dsi%d_ctrl", msm_host->id); 2709 2710 pm_runtime_put_sync(&msm_host->pdev->dev); 2711 } 2712 2713 static void msm_dsi_host_video_test_pattern_setup(struct msm_dsi_host *msm_host) 2714 { 2715 u32 reg; 2716 2717 reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); 2718 2719 dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, 0xff); 2720 /* draw checkered rectangle pattern */ 2721 dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL, 2722 DSI_TPG_MAIN_CONTROL_CHECKERED_RECTANGLE_PATTERN); 2723 /* use 24-bit RGB test pttern */ 2724 dsi_write(msm_host, REG_DSI_TPG_VIDEO_CONFIG, 2725 DSI_TPG_VIDEO_CONFIG_BPP(VIDEO_CONFIG_24BPP) | 2726 DSI_TPG_VIDEO_CONFIG_RGB); 2727 2728 reg |= DSI_TEST_PATTERN_GEN_CTRL_VIDEO_PATTERN_SEL(VID_MDSS_GENERAL_PATTERN); 2729 dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg); 2730 2731 DBG("Video test pattern setup done\n"); 2732 } 2733 2734 static void msm_dsi_host_cmd_test_pattern_setup(struct msm_dsi_host *msm_host) 2735 { 2736 u32 reg; 2737 2738 reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); 2739 2740 /* initial value for test pattern */ 2741 dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0, 0xff); 2742 2743 reg |= DSI_TEST_PATTERN_GEN_CTRL_CMD_MDP_STREAM0_PATTERN_SEL(CMD_MDP_MDSS_GENERAL_PATTERN); 2744 2745 dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, reg); 2746 /* draw checkered rectangle pattern */ 2747 dsi_write(msm_host, REG_DSI_TPG_MAIN_CONTROL2, 2748 DSI_TPG_MAIN_CONTROL2_CMD_MDP0_CHECKERED_RECTANGLE_PATTERN); 2749 2750 DBG("Cmd test pattern setup done\n"); 2751 } 2752 2753 void msm_dsi_host_test_pattern_en(struct mipi_dsi_host *host) 2754 { 2755 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2756 bool is_video_mode = !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO); 2757 u32 reg; 2758 2759 if (is_video_mode) 2760 msm_dsi_host_video_test_pattern_setup(msm_host); 2761 else 2762 msm_dsi_host_cmd_test_pattern_setup(msm_host); 2763 2764 reg = dsi_read(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL); 2765 /* enable the test pattern generator */ 2766 dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CTRL, (reg | DSI_TEST_PATTERN_GEN_CTRL_EN)); 2767 2768 /* for command mode need to trigger one frame from tpg */ 2769 if (!is_video_mode) 2770 dsi_write(msm_host, REG_DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER, 2771 DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER_SW_TRIGGER); 2772 } 2773 2774 struct msm_display_dsc_config *msm_dsi_host_get_dsc_config(struct mipi_dsi_host *host) 2775 { 2776 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2777 2778 return msm_host->dsc; 2779 } 2780