1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015 MediaTek Inc. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/component.h> 8 #include <linux/iopoll.h> 9 #include <linux/irq.h> 10 #include <linux/of.h> 11 #include <linux/of_platform.h> 12 #include <linux/phy/phy.h> 13 #include <linux/platform_device.h> 14 15 #include <video/mipi_display.h> 16 #include <video/videomode.h> 17 18 #include <drm/drm_atomic_helper.h> 19 #include <drm/drm_bridge.h> 20 #include <drm/drm_mipi_dsi.h> 21 #include <drm/drm_of.h> 22 #include <drm/drm_panel.h> 23 #include <drm/drm_print.h> 24 #include <drm/drm_probe_helper.h> 25 26 #include "mtk_drm_ddp_comp.h" 27 28 #define DSI_START 0x00 29 30 #define DSI_INTEN 0x08 31 32 #define DSI_INTSTA 0x0c 33 #define LPRX_RD_RDY_INT_FLAG BIT(0) 34 #define CMD_DONE_INT_FLAG BIT(1) 35 #define TE_RDY_INT_FLAG BIT(2) 36 #define VM_DONE_INT_FLAG BIT(3) 37 #define EXT_TE_RDY_INT_FLAG BIT(4) 38 #define DSI_BUSY BIT(31) 39 40 #define DSI_CON_CTRL 0x10 41 #define DSI_RESET BIT(0) 42 #define DSI_EN BIT(1) 43 44 #define DSI_MODE_CTRL 0x14 45 #define MODE (3) 46 #define CMD_MODE 0 47 #define SYNC_PULSE_MODE 1 48 #define SYNC_EVENT_MODE 2 49 #define BURST_MODE 3 50 #define FRM_MODE BIT(16) 51 #define MIX_MODE BIT(17) 52 53 #define DSI_TXRX_CTRL 0x18 54 #define VC_NUM BIT(1) 55 #define LANE_NUM (0xf << 2) 56 #define DIS_EOT BIT(6) 57 #define NULL_EN BIT(7) 58 #define TE_FREERUN BIT(8) 59 #define EXT_TE_EN BIT(9) 60 #define EXT_TE_EDGE BIT(10) 61 #define MAX_RTN_SIZE (0xf << 12) 62 #define HSTX_CKLP_EN BIT(16) 63 64 #define DSI_PSCTRL 0x1c 65 #define DSI_PS_WC 0x3fff 66 #define DSI_PS_SEL (3 << 16) 67 #define PACKED_PS_16BIT_RGB565 (0 << 16) 68 #define LOOSELY_PS_18BIT_RGB666 (1 << 16) 69 #define PACKED_PS_18BIT_RGB666 (2 << 16) 70 #define PACKED_PS_24BIT_RGB888 (3 << 16) 71 72 #define DSI_VSA_NL 0x20 73 #define DSI_VBP_NL 0x24 74 #define DSI_VFP_NL 0x28 75 #define DSI_VACT_NL 0x2C 76 #define DSI_HSA_WC 0x50 77 #define DSI_HBP_WC 0x54 78 #define DSI_HFP_WC 0x58 79 80 #define DSI_CMDQ_SIZE 0x60 81 #define CMDQ_SIZE 0x3f 82 83 #define DSI_HSTX_CKL_WC 0x64 84 85 #define DSI_RX_DATA0 0x74 86 #define DSI_RX_DATA1 0x78 87 #define DSI_RX_DATA2 0x7c 88 #define DSI_RX_DATA3 0x80 89 90 #define DSI_RACK 0x84 91 #define RACK BIT(0) 92 93 #define DSI_PHY_LCCON 0x104 94 #define LC_HS_TX_EN BIT(0) 95 #define LC_ULPM_EN BIT(1) 96 #define LC_WAKEUP_EN BIT(2) 97 98 #define DSI_PHY_LD0CON 0x108 99 #define LD0_HS_TX_EN BIT(0) 100 #define LD0_ULPM_EN BIT(1) 101 #define LD0_WAKEUP_EN BIT(2) 102 103 #define DSI_PHY_TIMECON0 0x110 104 #define LPX (0xff << 0) 105 #define HS_PREP (0xff << 8) 106 #define HS_ZERO (0xff << 16) 107 #define HS_TRAIL (0xff << 24) 108 109 #define DSI_PHY_TIMECON1 0x114 110 #define TA_GO (0xff << 0) 111 #define TA_SURE (0xff << 8) 112 #define TA_GET (0xff << 16) 113 #define DA_HS_EXIT (0xff << 24) 114 115 #define DSI_PHY_TIMECON2 0x118 116 #define CONT_DET (0xff << 0) 117 #define CLK_ZERO (0xff << 16) 118 #define CLK_TRAIL (0xff << 24) 119 120 #define DSI_PHY_TIMECON3 0x11c 121 #define CLK_HS_PREP (0xff << 0) 122 #define CLK_HS_POST (0xff << 8) 123 #define CLK_HS_EXIT (0xff << 16) 124 125 #define DSI_VM_CMD_CON 0x130 126 #define VM_CMD_EN BIT(0) 127 #define TS_VFP_EN BIT(5) 128 129 #define DSI_CMDQ0 0x180 130 #define CONFIG (0xff << 0) 131 #define SHORT_PACKET 0 132 #define LONG_PACKET 2 133 #define BTA BIT(2) 134 #define DATA_ID (0xff << 8) 135 #define DATA_0 (0xff << 16) 136 #define DATA_1 (0xff << 24) 137 138 #define T_LPX 5 139 #define T_HS_PREP 6 140 #define T_HS_TRAIL 8 141 #define T_HS_EXIT 7 142 #define T_HS_ZERO 10 143 144 #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) 145 146 #define MTK_DSI_HOST_IS_READ(type) \ 147 ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \ 148 (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \ 149 (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \ 150 (type == MIPI_DSI_DCS_READ)) 151 152 struct phy; 153 154 struct mtk_dsi { 155 struct mtk_ddp_comp ddp_comp; 156 struct device *dev; 157 struct mipi_dsi_host host; 158 struct drm_encoder encoder; 159 struct drm_connector conn; 160 struct drm_panel *panel; 161 struct drm_bridge *bridge; 162 struct phy *phy; 163 164 void __iomem *regs; 165 166 struct clk *engine_clk; 167 struct clk *digital_clk; 168 struct clk *hs_clk; 169 170 u32 data_rate; 171 172 unsigned long mode_flags; 173 enum mipi_dsi_pixel_format format; 174 unsigned int lanes; 175 struct videomode vm; 176 int refcount; 177 bool enabled; 178 u32 irq_data; 179 wait_queue_head_t irq_wait_queue; 180 }; 181 182 static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e) 183 { 184 return container_of(e, struct mtk_dsi, encoder); 185 } 186 187 static inline struct mtk_dsi *connector_to_dsi(struct drm_connector *c) 188 { 189 return container_of(c, struct mtk_dsi, conn); 190 } 191 192 static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h) 193 { 194 return container_of(h, struct mtk_dsi, host); 195 } 196 197 static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data) 198 { 199 u32 temp = readl(dsi->regs + offset); 200 201 writel((temp & ~mask) | (data & mask), dsi->regs + offset); 202 } 203 204 static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi) 205 { 206 u32 timcon0, timcon1, timcon2, timcon3; 207 u32 ui, cycle_time; 208 209 ui = 1000 / dsi->data_rate + 0x01; 210 cycle_time = 8000 / dsi->data_rate + 0x01; 211 212 timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24; 213 timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 | 214 T_HS_EXIT << 24; 215 timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) | 216 (NS_TO_CYCLE(0x150, cycle_time) << 16); 217 timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 | 218 NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8; 219 220 writel(timcon0, dsi->regs + DSI_PHY_TIMECON0); 221 writel(timcon1, dsi->regs + DSI_PHY_TIMECON1); 222 writel(timcon2, dsi->regs + DSI_PHY_TIMECON2); 223 writel(timcon3, dsi->regs + DSI_PHY_TIMECON3); 224 } 225 226 static void mtk_dsi_enable(struct mtk_dsi *dsi) 227 { 228 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, DSI_EN); 229 } 230 231 static void mtk_dsi_disable(struct mtk_dsi *dsi) 232 { 233 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0); 234 } 235 236 static void mtk_dsi_reset_engine(struct mtk_dsi *dsi) 237 { 238 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET); 239 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0); 240 } 241 242 static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi) 243 { 244 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); 245 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0); 246 } 247 248 static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi) 249 { 250 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0); 251 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN); 252 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0); 253 } 254 255 static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi) 256 { 257 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0); 258 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0); 259 } 260 261 static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi) 262 { 263 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0); 264 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN); 265 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0); 266 } 267 268 static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi) 269 { 270 u32 tmp_reg1; 271 272 tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON); 273 return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false; 274 } 275 276 static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter) 277 { 278 if (enter && !mtk_dsi_clk_hs_state(dsi)) 279 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN); 280 else if (!enter && mtk_dsi_clk_hs_state(dsi)) 281 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); 282 } 283 284 static void mtk_dsi_set_mode(struct mtk_dsi *dsi) 285 { 286 u32 vid_mode = CMD_MODE; 287 288 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { 289 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) 290 vid_mode = BURST_MODE; 291 else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) 292 vid_mode = SYNC_PULSE_MODE; 293 else 294 vid_mode = SYNC_EVENT_MODE; 295 } 296 297 writel(vid_mode, dsi->regs + DSI_MODE_CTRL); 298 } 299 300 static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi) 301 { 302 mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN); 303 mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN); 304 } 305 306 static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi) 307 { 308 struct videomode *vm = &dsi->vm; 309 u32 dsi_buf_bpp, ps_wc; 310 u32 ps_bpp_mode; 311 312 if (dsi->format == MIPI_DSI_FMT_RGB565) 313 dsi_buf_bpp = 2; 314 else 315 dsi_buf_bpp = 3; 316 317 ps_wc = vm->hactive * dsi_buf_bpp; 318 ps_bpp_mode = ps_wc; 319 320 switch (dsi->format) { 321 case MIPI_DSI_FMT_RGB888: 322 ps_bpp_mode |= PACKED_PS_24BIT_RGB888; 323 break; 324 case MIPI_DSI_FMT_RGB666: 325 ps_bpp_mode |= PACKED_PS_18BIT_RGB666; 326 break; 327 case MIPI_DSI_FMT_RGB666_PACKED: 328 ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666; 329 break; 330 case MIPI_DSI_FMT_RGB565: 331 ps_bpp_mode |= PACKED_PS_16BIT_RGB565; 332 break; 333 } 334 335 writel(vm->vactive, dsi->regs + DSI_VACT_NL); 336 writel(ps_bpp_mode, dsi->regs + DSI_PSCTRL); 337 writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC); 338 } 339 340 static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi) 341 { 342 u32 tmp_reg; 343 344 switch (dsi->lanes) { 345 case 1: 346 tmp_reg = 1 << 2; 347 break; 348 case 2: 349 tmp_reg = 3 << 2; 350 break; 351 case 3: 352 tmp_reg = 7 << 2; 353 break; 354 case 4: 355 tmp_reg = 0xf << 2; 356 break; 357 default: 358 tmp_reg = 0xf << 2; 359 break; 360 } 361 362 tmp_reg |= (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) << 6; 363 tmp_reg |= (dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET) >> 3; 364 365 writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL); 366 } 367 368 static void mtk_dsi_ps_control(struct mtk_dsi *dsi) 369 { 370 u32 dsi_tmp_buf_bpp; 371 u32 tmp_reg; 372 373 switch (dsi->format) { 374 case MIPI_DSI_FMT_RGB888: 375 tmp_reg = PACKED_PS_24BIT_RGB888; 376 dsi_tmp_buf_bpp = 3; 377 break; 378 case MIPI_DSI_FMT_RGB666: 379 tmp_reg = LOOSELY_PS_18BIT_RGB666; 380 dsi_tmp_buf_bpp = 3; 381 break; 382 case MIPI_DSI_FMT_RGB666_PACKED: 383 tmp_reg = PACKED_PS_18BIT_RGB666; 384 dsi_tmp_buf_bpp = 3; 385 break; 386 case MIPI_DSI_FMT_RGB565: 387 tmp_reg = PACKED_PS_16BIT_RGB565; 388 dsi_tmp_buf_bpp = 2; 389 break; 390 default: 391 tmp_reg = PACKED_PS_24BIT_RGB888; 392 dsi_tmp_buf_bpp = 3; 393 break; 394 } 395 396 tmp_reg += dsi->vm.hactive * dsi_tmp_buf_bpp & DSI_PS_WC; 397 writel(tmp_reg, dsi->regs + DSI_PSCTRL); 398 } 399 400 static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) 401 { 402 u32 horizontal_sync_active_byte; 403 u32 horizontal_backporch_byte; 404 u32 horizontal_frontporch_byte; 405 u32 dsi_tmp_buf_bpp; 406 407 struct videomode *vm = &dsi->vm; 408 409 if (dsi->format == MIPI_DSI_FMT_RGB565) 410 dsi_tmp_buf_bpp = 2; 411 else 412 dsi_tmp_buf_bpp = 3; 413 414 writel(vm->vsync_len, dsi->regs + DSI_VSA_NL); 415 writel(vm->vback_porch, dsi->regs + DSI_VBP_NL); 416 writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL); 417 writel(vm->vactive, dsi->regs + DSI_VACT_NL); 418 419 horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10); 420 421 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) 422 horizontal_backporch_byte = 423 (vm->hback_porch * dsi_tmp_buf_bpp - 10); 424 else 425 horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) * 426 dsi_tmp_buf_bpp - 10); 427 428 horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12); 429 430 writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC); 431 writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC); 432 writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC); 433 434 mtk_dsi_ps_control(dsi); 435 } 436 437 static void mtk_dsi_start(struct mtk_dsi *dsi) 438 { 439 writel(0, dsi->regs + DSI_START); 440 writel(1, dsi->regs + DSI_START); 441 } 442 443 static void mtk_dsi_stop(struct mtk_dsi *dsi) 444 { 445 writel(0, dsi->regs + DSI_START); 446 } 447 448 static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi) 449 { 450 writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL); 451 } 452 453 static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi) 454 { 455 u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG; 456 457 writel(inten, dsi->regs + DSI_INTEN); 458 } 459 460 static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit) 461 { 462 dsi->irq_data |= irq_bit; 463 } 464 465 static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit) 466 { 467 dsi->irq_data &= ~irq_bit; 468 } 469 470 static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag, 471 unsigned int timeout) 472 { 473 s32 ret = 0; 474 unsigned long jiffies = msecs_to_jiffies(timeout); 475 476 ret = wait_event_interruptible_timeout(dsi->irq_wait_queue, 477 dsi->irq_data & irq_flag, 478 jiffies); 479 if (ret == 0) { 480 DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag); 481 482 mtk_dsi_enable(dsi); 483 mtk_dsi_reset_engine(dsi); 484 } 485 486 return ret; 487 } 488 489 static irqreturn_t mtk_dsi_irq(int irq, void *dev_id) 490 { 491 struct mtk_dsi *dsi = dev_id; 492 u32 status, tmp; 493 u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG; 494 495 status = readl(dsi->regs + DSI_INTSTA) & flag; 496 497 if (status) { 498 do { 499 mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK); 500 tmp = readl(dsi->regs + DSI_INTSTA); 501 } while (tmp & DSI_BUSY); 502 503 mtk_dsi_mask(dsi, DSI_INTSTA, status, 0); 504 mtk_dsi_irq_data_set(dsi, status); 505 wake_up_interruptible(&dsi->irq_wait_queue); 506 } 507 508 return IRQ_HANDLED; 509 } 510 511 static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t) 512 { 513 mtk_dsi_irq_data_clear(dsi, irq_flag); 514 mtk_dsi_set_cmd_mode(dsi); 515 516 if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) { 517 DRM_ERROR("failed to switch cmd mode\n"); 518 return -ETIME; 519 } else { 520 return 0; 521 } 522 } 523 524 static int mtk_dsi_poweron(struct mtk_dsi *dsi) 525 { 526 struct device *dev = dsi->dev; 527 int ret; 528 u64 pixel_clock, total_bits; 529 u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits; 530 531 if (++dsi->refcount != 1) 532 return 0; 533 534 switch (dsi->format) { 535 case MIPI_DSI_FMT_RGB565: 536 bit_per_pixel = 16; 537 break; 538 case MIPI_DSI_FMT_RGB666_PACKED: 539 bit_per_pixel = 18; 540 break; 541 case MIPI_DSI_FMT_RGB666: 542 case MIPI_DSI_FMT_RGB888: 543 default: 544 bit_per_pixel = 24; 545 break; 546 } 547 548 /** 549 * htotal_time = htotal * byte_per_pixel / num_lanes 550 * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit 551 * mipi_ratio = (htotal_time + overhead_time) / htotal_time 552 * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes; 553 */ 554 pixel_clock = dsi->vm.pixelclock; 555 htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch + 556 dsi->vm.hsync_len; 557 htotal_bits = htotal * bit_per_pixel; 558 559 overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL + 560 T_HS_EXIT; 561 overhead_bits = overhead_cycles * dsi->lanes * 8; 562 total_bits = htotal_bits + overhead_bits; 563 564 dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits, 565 htotal * dsi->lanes); 566 567 ret = clk_set_rate(dsi->hs_clk, dsi->data_rate); 568 if (ret < 0) { 569 dev_err(dev, "Failed to set data rate: %d\n", ret); 570 goto err_refcount; 571 } 572 573 phy_power_on(dsi->phy); 574 575 ret = clk_prepare_enable(dsi->engine_clk); 576 if (ret < 0) { 577 dev_err(dev, "Failed to enable engine clock: %d\n", ret); 578 goto err_phy_power_off; 579 } 580 581 ret = clk_prepare_enable(dsi->digital_clk); 582 if (ret < 0) { 583 dev_err(dev, "Failed to enable digital clock: %d\n", ret); 584 goto err_disable_engine_clk; 585 } 586 587 mtk_dsi_enable(dsi); 588 mtk_dsi_reset_engine(dsi); 589 mtk_dsi_phy_timconfig(dsi); 590 591 mtk_dsi_rxtx_control(dsi); 592 mtk_dsi_ps_control_vact(dsi); 593 mtk_dsi_set_vm_cmd(dsi); 594 mtk_dsi_config_vdo_timing(dsi); 595 mtk_dsi_set_interrupt_enable(dsi); 596 597 mtk_dsi_clk_ulp_mode_leave(dsi); 598 mtk_dsi_lane0_ulp_mode_leave(dsi); 599 mtk_dsi_clk_hs_mode(dsi, 0); 600 601 if (dsi->panel) { 602 if (drm_panel_prepare(dsi->panel)) { 603 DRM_ERROR("failed to prepare the panel\n"); 604 goto err_disable_digital_clk; 605 } 606 } 607 608 return 0; 609 err_disable_digital_clk: 610 clk_disable_unprepare(dsi->digital_clk); 611 err_disable_engine_clk: 612 clk_disable_unprepare(dsi->engine_clk); 613 err_phy_power_off: 614 phy_power_off(dsi->phy); 615 err_refcount: 616 dsi->refcount--; 617 return ret; 618 } 619 620 static void mtk_dsi_poweroff(struct mtk_dsi *dsi) 621 { 622 if (WARN_ON(dsi->refcount == 0)) 623 return; 624 625 if (--dsi->refcount != 0) 626 return; 627 628 /* 629 * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since 630 * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), 631 * which needs irq for vblank, and mtk_dsi_stop() will disable irq. 632 * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), 633 * after dsi is fully set. 634 */ 635 mtk_dsi_stop(dsi); 636 637 if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) { 638 if (dsi->panel) { 639 if (drm_panel_unprepare(dsi->panel)) { 640 DRM_ERROR("failed to unprepare the panel\n"); 641 return; 642 } 643 } 644 } 645 646 mtk_dsi_reset_engine(dsi); 647 mtk_dsi_lane0_ulp_mode_enter(dsi); 648 mtk_dsi_clk_ulp_mode_enter(dsi); 649 650 mtk_dsi_disable(dsi); 651 652 clk_disable_unprepare(dsi->engine_clk); 653 clk_disable_unprepare(dsi->digital_clk); 654 655 phy_power_off(dsi->phy); 656 } 657 658 static void mtk_output_dsi_enable(struct mtk_dsi *dsi) 659 { 660 int ret; 661 662 if (dsi->enabled) 663 return; 664 665 ret = mtk_dsi_poweron(dsi); 666 if (ret < 0) { 667 DRM_ERROR("failed to power on dsi\n"); 668 return; 669 } 670 671 mtk_dsi_set_mode(dsi); 672 mtk_dsi_clk_hs_mode(dsi, 1); 673 674 mtk_dsi_start(dsi); 675 676 if (dsi->panel) { 677 if (drm_panel_enable(dsi->panel)) { 678 DRM_ERROR("failed to enable the panel\n"); 679 goto err_dsi_power_off; 680 } 681 } 682 683 dsi->enabled = true; 684 685 return; 686 err_dsi_power_off: 687 mtk_dsi_stop(dsi); 688 mtk_dsi_poweroff(dsi); 689 } 690 691 static void mtk_output_dsi_disable(struct mtk_dsi *dsi) 692 { 693 if (!dsi->enabled) 694 return; 695 696 if (dsi->panel) { 697 if (drm_panel_disable(dsi->panel)) { 698 DRM_ERROR("failed to disable the panel\n"); 699 return; 700 } 701 } 702 703 mtk_dsi_poweroff(dsi); 704 705 dsi->enabled = false; 706 } 707 708 static void mtk_dsi_encoder_destroy(struct drm_encoder *encoder) 709 { 710 drm_encoder_cleanup(encoder); 711 } 712 713 static const struct drm_encoder_funcs mtk_dsi_encoder_funcs = { 714 .destroy = mtk_dsi_encoder_destroy, 715 }; 716 717 static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder, 718 const struct drm_display_mode *mode, 719 struct drm_display_mode *adjusted_mode) 720 { 721 return true; 722 } 723 724 static void mtk_dsi_encoder_mode_set(struct drm_encoder *encoder, 725 struct drm_display_mode *mode, 726 struct drm_display_mode *adjusted) 727 { 728 struct mtk_dsi *dsi = encoder_to_dsi(encoder); 729 730 drm_display_mode_to_videomode(adjusted, &dsi->vm); 731 } 732 733 static void mtk_dsi_encoder_disable(struct drm_encoder *encoder) 734 { 735 struct mtk_dsi *dsi = encoder_to_dsi(encoder); 736 737 mtk_output_dsi_disable(dsi); 738 } 739 740 static void mtk_dsi_encoder_enable(struct drm_encoder *encoder) 741 { 742 struct mtk_dsi *dsi = encoder_to_dsi(encoder); 743 744 mtk_output_dsi_enable(dsi); 745 } 746 747 static int mtk_dsi_connector_get_modes(struct drm_connector *connector) 748 { 749 struct mtk_dsi *dsi = connector_to_dsi(connector); 750 751 return drm_panel_get_modes(dsi->panel); 752 } 753 754 static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = { 755 .mode_fixup = mtk_dsi_encoder_mode_fixup, 756 .mode_set = mtk_dsi_encoder_mode_set, 757 .disable = mtk_dsi_encoder_disable, 758 .enable = mtk_dsi_encoder_enable, 759 }; 760 761 static const struct drm_connector_funcs mtk_dsi_connector_funcs = { 762 .fill_modes = drm_helper_probe_single_connector_modes, 763 .destroy = drm_connector_cleanup, 764 .reset = drm_atomic_helper_connector_reset, 765 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 766 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 767 }; 768 769 static const struct drm_connector_helper_funcs 770 mtk_dsi_connector_helper_funcs = { 771 .get_modes = mtk_dsi_connector_get_modes, 772 }; 773 774 static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi) 775 { 776 int ret; 777 778 ret = drm_connector_init(drm, &dsi->conn, &mtk_dsi_connector_funcs, 779 DRM_MODE_CONNECTOR_DSI); 780 if (ret) { 781 DRM_ERROR("Failed to connector init to drm\n"); 782 return ret; 783 } 784 785 drm_connector_helper_add(&dsi->conn, &mtk_dsi_connector_helper_funcs); 786 787 dsi->conn.dpms = DRM_MODE_DPMS_OFF; 788 drm_connector_attach_encoder(&dsi->conn, &dsi->encoder); 789 790 if (dsi->panel) { 791 ret = drm_panel_attach(dsi->panel, &dsi->conn); 792 if (ret) { 793 DRM_ERROR("Failed to attach panel to drm\n"); 794 goto err_connector_cleanup; 795 } 796 } 797 798 return 0; 799 800 err_connector_cleanup: 801 drm_connector_cleanup(&dsi->conn); 802 return ret; 803 } 804 805 static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi) 806 { 807 int ret; 808 809 ret = drm_encoder_init(drm, &dsi->encoder, &mtk_dsi_encoder_funcs, 810 DRM_MODE_ENCODER_DSI, NULL); 811 if (ret) { 812 DRM_ERROR("Failed to encoder init to drm\n"); 813 return ret; 814 } 815 drm_encoder_helper_add(&dsi->encoder, &mtk_dsi_encoder_helper_funcs); 816 817 /* 818 * Currently display data paths are statically assigned to a crtc each. 819 * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0 820 */ 821 dsi->encoder.possible_crtcs = 1; 822 823 /* If there's a bridge, attach to it and let it create the connector */ 824 if (dsi->bridge) { 825 ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL); 826 if (ret) { 827 DRM_ERROR("Failed to attach bridge to drm\n"); 828 goto err_encoder_cleanup; 829 } 830 } else { 831 /* Otherwise create our own connector and attach to a panel */ 832 ret = mtk_dsi_create_connector(drm, dsi); 833 if (ret) 834 goto err_encoder_cleanup; 835 } 836 837 return 0; 838 839 err_encoder_cleanup: 840 drm_encoder_cleanup(&dsi->encoder); 841 return ret; 842 } 843 844 static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi) 845 { 846 drm_encoder_cleanup(&dsi->encoder); 847 /* Skip connector cleanup if creation was delegated to the bridge */ 848 if (dsi->conn.dev) 849 drm_connector_cleanup(&dsi->conn); 850 if (dsi->panel) 851 drm_panel_detach(dsi->panel); 852 } 853 854 static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) 855 { 856 struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp); 857 858 mtk_dsi_poweron(dsi); 859 } 860 861 static void mtk_dsi_ddp_stop(struct mtk_ddp_comp *comp) 862 { 863 struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp); 864 865 mtk_dsi_poweroff(dsi); 866 } 867 868 static const struct mtk_ddp_comp_funcs mtk_dsi_funcs = { 869 .start = mtk_dsi_ddp_start, 870 .stop = mtk_dsi_ddp_stop, 871 }; 872 873 static int mtk_dsi_host_attach(struct mipi_dsi_host *host, 874 struct mipi_dsi_device *device) 875 { 876 struct mtk_dsi *dsi = host_to_dsi(host); 877 878 dsi->lanes = device->lanes; 879 dsi->format = device->format; 880 dsi->mode_flags = device->mode_flags; 881 882 if (dsi->conn.dev) 883 drm_helper_hpd_irq_event(dsi->conn.dev); 884 885 return 0; 886 } 887 888 static int mtk_dsi_host_detach(struct mipi_dsi_host *host, 889 struct mipi_dsi_device *device) 890 { 891 struct mtk_dsi *dsi = host_to_dsi(host); 892 893 if (dsi->conn.dev) 894 drm_helper_hpd_irq_event(dsi->conn.dev); 895 896 return 0; 897 } 898 899 static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) 900 { 901 int ret; 902 u32 val; 903 904 ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY), 905 4, 2000000); 906 if (ret) { 907 DRM_WARN("polling dsi wait not busy timeout!\n"); 908 909 mtk_dsi_enable(dsi); 910 mtk_dsi_reset_engine(dsi); 911 } 912 } 913 914 static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data) 915 { 916 switch (type) { 917 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: 918 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: 919 return 1; 920 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: 921 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: 922 return 2; 923 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE: 924 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE: 925 return read_data[1] + read_data[2] * 16; 926 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: 927 DRM_INFO("type is 0x02, try again\n"); 928 break; 929 default: 930 DRM_INFO("type(0x%x) not recognized\n", type); 931 break; 932 } 933 934 return 0; 935 } 936 937 static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg) 938 { 939 const char *tx_buf = msg->tx_buf; 940 u8 config, cmdq_size, cmdq_off, type = msg->type; 941 u32 reg_val, cmdq_mask, i; 942 943 if (MTK_DSI_HOST_IS_READ(type)) 944 config = BTA; 945 else 946 config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET; 947 948 if (msg->tx_len > 2) { 949 cmdq_size = 1 + (msg->tx_len + 3) / 4; 950 cmdq_off = 4; 951 cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1; 952 reg_val = (msg->tx_len << 16) | (type << 8) | config; 953 } else { 954 cmdq_size = 1; 955 cmdq_off = 2; 956 cmdq_mask = CONFIG | DATA_ID; 957 reg_val = (type << 8) | config; 958 } 959 960 for (i = 0; i < msg->tx_len; i++) 961 writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i); 962 963 mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val); 964 mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size); 965 } 966 967 static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi, 968 const struct mipi_dsi_msg *msg, u8 flag) 969 { 970 mtk_dsi_wait_for_idle(dsi); 971 mtk_dsi_irq_data_clear(dsi, flag); 972 mtk_dsi_cmdq(dsi, msg); 973 mtk_dsi_start(dsi); 974 975 if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000)) 976 return -ETIME; 977 else 978 return 0; 979 } 980 981 static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, 982 const struct mipi_dsi_msg *msg) 983 { 984 struct mtk_dsi *dsi = host_to_dsi(host); 985 u32 recv_cnt, i; 986 u8 read_data[16]; 987 void *src_addr; 988 u8 irq_flag = CMD_DONE_INT_FLAG; 989 990 if (readl(dsi->regs + DSI_MODE_CTRL) & MODE) { 991 DRM_ERROR("dsi engine is not command mode\n"); 992 return -EINVAL; 993 } 994 995 if (MTK_DSI_HOST_IS_READ(msg->type)) 996 irq_flag |= LPRX_RD_RDY_INT_FLAG; 997 998 if (mtk_dsi_host_send_cmd(dsi, msg, irq_flag) < 0) 999 return -ETIME; 1000 1001 if (!MTK_DSI_HOST_IS_READ(msg->type)) 1002 return 0; 1003 1004 if (!msg->rx_buf) { 1005 DRM_ERROR("dsi receive buffer size may be NULL\n"); 1006 return -EINVAL; 1007 } 1008 1009 for (i = 0; i < 16; i++) 1010 *(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i); 1011 1012 recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data); 1013 1014 if (recv_cnt > 2) 1015 src_addr = &read_data[4]; 1016 else 1017 src_addr = &read_data[1]; 1018 1019 if (recv_cnt > 10) 1020 recv_cnt = 10; 1021 1022 if (recv_cnt > msg->rx_len) 1023 recv_cnt = msg->rx_len; 1024 1025 if (recv_cnt) 1026 memcpy(msg->rx_buf, src_addr, recv_cnt); 1027 1028 DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n", 1029 recv_cnt, *((u8 *)(msg->tx_buf))); 1030 1031 return recv_cnt; 1032 } 1033 1034 static const struct mipi_dsi_host_ops mtk_dsi_ops = { 1035 .attach = mtk_dsi_host_attach, 1036 .detach = mtk_dsi_host_detach, 1037 .transfer = mtk_dsi_host_transfer, 1038 }; 1039 1040 static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) 1041 { 1042 int ret; 1043 struct drm_device *drm = data; 1044 struct mtk_dsi *dsi = dev_get_drvdata(dev); 1045 1046 ret = mtk_ddp_comp_register(drm, &dsi->ddp_comp); 1047 if (ret < 0) { 1048 dev_err(dev, "Failed to register component %pOF: %d\n", 1049 dev->of_node, ret); 1050 return ret; 1051 } 1052 1053 ret = mipi_dsi_host_register(&dsi->host); 1054 if (ret < 0) { 1055 dev_err(dev, "failed to register DSI host: %d\n", ret); 1056 goto err_ddp_comp_unregister; 1057 } 1058 1059 ret = mtk_dsi_create_conn_enc(drm, dsi); 1060 if (ret) { 1061 DRM_ERROR("Encoder create failed with %d\n", ret); 1062 goto err_unregister; 1063 } 1064 1065 return 0; 1066 1067 err_unregister: 1068 mipi_dsi_host_unregister(&dsi->host); 1069 err_ddp_comp_unregister: 1070 mtk_ddp_comp_unregister(drm, &dsi->ddp_comp); 1071 return ret; 1072 } 1073 1074 static void mtk_dsi_unbind(struct device *dev, struct device *master, 1075 void *data) 1076 { 1077 struct drm_device *drm = data; 1078 struct mtk_dsi *dsi = dev_get_drvdata(dev); 1079 1080 mtk_dsi_destroy_conn_enc(dsi); 1081 mipi_dsi_host_unregister(&dsi->host); 1082 mtk_ddp_comp_unregister(drm, &dsi->ddp_comp); 1083 } 1084 1085 static const struct component_ops mtk_dsi_component_ops = { 1086 .bind = mtk_dsi_bind, 1087 .unbind = mtk_dsi_unbind, 1088 }; 1089 1090 static int mtk_dsi_probe(struct platform_device *pdev) 1091 { 1092 struct mtk_dsi *dsi; 1093 struct device *dev = &pdev->dev; 1094 struct resource *regs; 1095 int irq_num; 1096 int comp_id; 1097 int ret; 1098 1099 dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); 1100 if (!dsi) 1101 return -ENOMEM; 1102 1103 dsi->host.ops = &mtk_dsi_ops; 1104 dsi->host.dev = dev; 1105 1106 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, 1107 &dsi->panel, &dsi->bridge); 1108 if (ret) 1109 return ret; 1110 1111 dsi->engine_clk = devm_clk_get(dev, "engine"); 1112 if (IS_ERR(dsi->engine_clk)) { 1113 ret = PTR_ERR(dsi->engine_clk); 1114 dev_err(dev, "Failed to get engine clock: %d\n", ret); 1115 return ret; 1116 } 1117 1118 dsi->digital_clk = devm_clk_get(dev, "digital"); 1119 if (IS_ERR(dsi->digital_clk)) { 1120 ret = PTR_ERR(dsi->digital_clk); 1121 dev_err(dev, "Failed to get digital clock: %d\n", ret); 1122 return ret; 1123 } 1124 1125 dsi->hs_clk = devm_clk_get(dev, "hs"); 1126 if (IS_ERR(dsi->hs_clk)) { 1127 ret = PTR_ERR(dsi->hs_clk); 1128 dev_err(dev, "Failed to get hs clock: %d\n", ret); 1129 return ret; 1130 } 1131 1132 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1133 dsi->regs = devm_ioremap_resource(dev, regs); 1134 if (IS_ERR(dsi->regs)) { 1135 ret = PTR_ERR(dsi->regs); 1136 dev_err(dev, "Failed to ioremap memory: %d\n", ret); 1137 return ret; 1138 } 1139 1140 dsi->phy = devm_phy_get(dev, "dphy"); 1141 if (IS_ERR(dsi->phy)) { 1142 ret = PTR_ERR(dsi->phy); 1143 dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret); 1144 return ret; 1145 } 1146 1147 comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI); 1148 if (comp_id < 0) { 1149 dev_err(dev, "Failed to identify by alias: %d\n", comp_id); 1150 return comp_id; 1151 } 1152 1153 ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id, 1154 &mtk_dsi_funcs); 1155 if (ret) { 1156 dev_err(dev, "Failed to initialize component: %d\n", ret); 1157 return ret; 1158 } 1159 1160 irq_num = platform_get_irq(pdev, 0); 1161 if (irq_num < 0) { 1162 dev_err(&pdev->dev, "failed to request dsi irq resource\n"); 1163 return -EPROBE_DEFER; 1164 } 1165 1166 irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW); 1167 ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq, 1168 IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi); 1169 if (ret) { 1170 dev_err(&pdev->dev, "failed to request mediatek dsi irq\n"); 1171 return -EPROBE_DEFER; 1172 } 1173 1174 init_waitqueue_head(&dsi->irq_wait_queue); 1175 1176 platform_set_drvdata(pdev, dsi); 1177 1178 return component_add(&pdev->dev, &mtk_dsi_component_ops); 1179 } 1180 1181 static int mtk_dsi_remove(struct platform_device *pdev) 1182 { 1183 struct mtk_dsi *dsi = platform_get_drvdata(pdev); 1184 1185 mtk_output_dsi_disable(dsi); 1186 component_del(&pdev->dev, &mtk_dsi_component_ops); 1187 1188 return 0; 1189 } 1190 1191 static const struct of_device_id mtk_dsi_of_match[] = { 1192 { .compatible = "mediatek,mt2701-dsi" }, 1193 { .compatible = "mediatek,mt8173-dsi" }, 1194 { }, 1195 }; 1196 1197 struct platform_driver mtk_dsi_driver = { 1198 .probe = mtk_dsi_probe, 1199 .remove = mtk_dsi_remove, 1200 .driver = { 1201 .name = "mtk-dsi", 1202 .of_match_table = mtk_dsi_of_match, 1203 }, 1204 }; 1205