1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015 MediaTek Inc. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/component.h> 8 #include <linux/iopoll.h> 9 #include <linux/irq.h> 10 #include <linux/of.h> 11 #include <linux/of_platform.h> 12 #include <linux/phy/phy.h> 13 #include <linux/platform_device.h> 14 #include <linux/reset.h> 15 16 #include <video/mipi_display.h> 17 #include <video/videomode.h> 18 19 #include <drm/drm_atomic_helper.h> 20 #include <drm/drm_bridge.h> 21 #include <drm/drm_bridge_connector.h> 22 #include <drm/drm_mipi_dsi.h> 23 #include <drm/drm_of.h> 24 #include <drm/drm_panel.h> 25 #include <drm/drm_print.h> 26 #include <drm/drm_probe_helper.h> 27 #include <drm/drm_simple_kms_helper.h> 28 29 #include "mtk_disp_drv.h" 30 #include "mtk_drm_ddp_comp.h" 31 #include "mtk_drm_drv.h" 32 33 #define DSI_START 0x00 34 35 #define DSI_INTEN 0x08 36 37 #define DSI_INTSTA 0x0c 38 #define LPRX_RD_RDY_INT_FLAG BIT(0) 39 #define CMD_DONE_INT_FLAG BIT(1) 40 #define TE_RDY_INT_FLAG BIT(2) 41 #define VM_DONE_INT_FLAG BIT(3) 42 #define EXT_TE_RDY_INT_FLAG BIT(4) 43 #define DSI_BUSY BIT(31) 44 45 #define DSI_CON_CTRL 0x10 46 #define DSI_RESET BIT(0) 47 #define DSI_EN BIT(1) 48 #define DPHY_RESET BIT(2) 49 50 #define DSI_MODE_CTRL 0x14 51 #define MODE (3) 52 #define CMD_MODE 0 53 #define SYNC_PULSE_MODE 1 54 #define SYNC_EVENT_MODE 2 55 #define BURST_MODE 3 56 #define FRM_MODE BIT(16) 57 #define MIX_MODE BIT(17) 58 59 #define DSI_TXRX_CTRL 0x18 60 #define VC_NUM BIT(1) 61 #define LANE_NUM (0xf << 2) 62 #define DIS_EOT BIT(6) 63 #define NULL_EN BIT(7) 64 #define TE_FREERUN BIT(8) 65 #define EXT_TE_EN BIT(9) 66 #define EXT_TE_EDGE BIT(10) 67 #define MAX_RTN_SIZE (0xf << 12) 68 #define HSTX_CKLP_EN BIT(16) 69 70 #define DSI_PSCTRL 0x1c 71 #define DSI_PS_WC 0x3fff 72 #define DSI_PS_SEL (3 << 16) 73 #define PACKED_PS_16BIT_RGB565 (0 << 16) 74 #define LOOSELY_PS_18BIT_RGB666 (1 << 16) 75 #define PACKED_PS_18BIT_RGB666 (2 << 16) 76 #define PACKED_PS_24BIT_RGB888 (3 << 16) 77 78 #define DSI_VSA_NL 0x20 79 #define DSI_VBP_NL 0x24 80 #define DSI_VFP_NL 0x28 81 #define DSI_VACT_NL 0x2C 82 #define DSI_SIZE_CON 0x38 83 #define DSI_HSA_WC 0x50 84 #define DSI_HBP_WC 0x54 85 #define DSI_HFP_WC 0x58 86 87 #define DSI_CMDQ_SIZE 0x60 88 #define CMDQ_SIZE 0x3f 89 90 #define DSI_HSTX_CKL_WC 0x64 91 92 #define DSI_RX_DATA0 0x74 93 #define DSI_RX_DATA1 0x78 94 #define DSI_RX_DATA2 0x7c 95 #define DSI_RX_DATA3 0x80 96 97 #define DSI_RACK 0x84 98 #define RACK BIT(0) 99 100 #define DSI_PHY_LCCON 0x104 101 #define LC_HS_TX_EN BIT(0) 102 #define LC_ULPM_EN BIT(1) 103 #define LC_WAKEUP_EN BIT(2) 104 105 #define DSI_PHY_LD0CON 0x108 106 #define LD0_HS_TX_EN BIT(0) 107 #define LD0_ULPM_EN BIT(1) 108 #define LD0_WAKEUP_EN BIT(2) 109 110 #define DSI_PHY_TIMECON0 0x110 111 #define LPX (0xff << 0) 112 #define HS_PREP (0xff << 8) 113 #define HS_ZERO (0xff << 16) 114 #define HS_TRAIL (0xff << 24) 115 116 #define DSI_PHY_TIMECON1 0x114 117 #define TA_GO (0xff << 0) 118 #define TA_SURE (0xff << 8) 119 #define TA_GET (0xff << 16) 120 #define DA_HS_EXIT (0xff << 24) 121 122 #define DSI_PHY_TIMECON2 0x118 123 #define CONT_DET (0xff << 0) 124 #define CLK_ZERO (0xff << 16) 125 #define CLK_TRAIL (0xff << 24) 126 127 #define DSI_PHY_TIMECON3 0x11c 128 #define CLK_HS_PREP (0xff << 0) 129 #define CLK_HS_POST (0xff << 8) 130 #define CLK_HS_EXIT (0xff << 16) 131 132 #define DSI_VM_CMD_CON 0x130 133 #define VM_CMD_EN BIT(0) 134 #define TS_VFP_EN BIT(5) 135 136 #define DSI_SHADOW_DEBUG 0x190U 137 #define FORCE_COMMIT BIT(0) 138 #define BYPASS_SHADOW BIT(1) 139 140 #define CONFIG (0xff << 0) 141 #define SHORT_PACKET 0 142 #define LONG_PACKET 2 143 #define BTA BIT(2) 144 #define DATA_ID (0xff << 8) 145 #define DATA_0 (0xff << 16) 146 #define DATA_1 (0xff << 24) 147 148 #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) 149 150 #define MTK_DSI_HOST_IS_READ(type) \ 151 ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \ 152 (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \ 153 (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \ 154 (type == MIPI_DSI_DCS_READ)) 155 156 struct mtk_phy_timing { 157 u32 lpx; 158 u32 da_hs_prepare; 159 u32 da_hs_zero; 160 u32 da_hs_trail; 161 162 u32 ta_go; 163 u32 ta_sure; 164 u32 ta_get; 165 u32 da_hs_exit; 166 167 u32 clk_hs_zero; 168 u32 clk_hs_trail; 169 170 u32 clk_hs_prepare; 171 u32 clk_hs_post; 172 u32 clk_hs_exit; 173 }; 174 175 struct phy; 176 177 struct mtk_dsi_driver_data { 178 const u32 reg_cmdq_off; 179 bool has_shadow_ctl; 180 bool has_size_ctl; 181 }; 182 183 struct mtk_dsi { 184 struct device *dev; 185 struct mipi_dsi_host host; 186 struct drm_encoder encoder; 187 struct drm_bridge bridge; 188 struct drm_bridge *next_bridge; 189 struct drm_connector *connector; 190 struct phy *phy; 191 192 void __iomem *regs; 193 194 struct clk *engine_clk; 195 struct clk *digital_clk; 196 struct clk *hs_clk; 197 198 u32 data_rate; 199 200 unsigned long mode_flags; 201 enum mipi_dsi_pixel_format format; 202 unsigned int lanes; 203 struct videomode vm; 204 struct mtk_phy_timing phy_timing; 205 int refcount; 206 bool enabled; 207 bool lanes_ready; 208 u32 irq_data; 209 wait_queue_head_t irq_wait_queue; 210 const struct mtk_dsi_driver_data *driver_data; 211 }; 212 213 static inline struct mtk_dsi *bridge_to_dsi(struct drm_bridge *b) 214 { 215 return container_of(b, struct mtk_dsi, bridge); 216 } 217 218 static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h) 219 { 220 return container_of(h, struct mtk_dsi, host); 221 } 222 223 static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data) 224 { 225 u32 temp = readl(dsi->regs + offset); 226 227 writel((temp & ~mask) | (data & mask), dsi->regs + offset); 228 } 229 230 static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi) 231 { 232 u32 timcon0, timcon1, timcon2, timcon3; 233 u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000); 234 struct mtk_phy_timing *timing = &dsi->phy_timing; 235 236 timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1; 237 timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000; 238 timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 - 239 timing->da_hs_prepare; 240 timing->da_hs_trail = timing->da_hs_prepare + 1; 241 242 timing->ta_go = 4 * timing->lpx - 2; 243 timing->ta_sure = timing->lpx + 2; 244 timing->ta_get = 4 * timing->lpx; 245 timing->da_hs_exit = 2 * timing->lpx + 1; 246 247 timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000); 248 timing->clk_hs_post = timing->clk_hs_prepare + 8; 249 timing->clk_hs_trail = timing->clk_hs_prepare; 250 timing->clk_hs_zero = timing->clk_hs_trail * 4; 251 timing->clk_hs_exit = 2 * timing->clk_hs_trail; 252 253 timcon0 = timing->lpx | timing->da_hs_prepare << 8 | 254 timing->da_hs_zero << 16 | timing->da_hs_trail << 24; 255 timcon1 = timing->ta_go | timing->ta_sure << 8 | 256 timing->ta_get << 16 | timing->da_hs_exit << 24; 257 timcon2 = 1 << 8 | timing->clk_hs_zero << 16 | 258 timing->clk_hs_trail << 24; 259 timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 | 260 timing->clk_hs_exit << 16; 261 262 writel(timcon0, dsi->regs + DSI_PHY_TIMECON0); 263 writel(timcon1, dsi->regs + DSI_PHY_TIMECON1); 264 writel(timcon2, dsi->regs + DSI_PHY_TIMECON2); 265 writel(timcon3, dsi->regs + DSI_PHY_TIMECON3); 266 } 267 268 static void mtk_dsi_enable(struct mtk_dsi *dsi) 269 { 270 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, DSI_EN); 271 } 272 273 static void mtk_dsi_disable(struct mtk_dsi *dsi) 274 { 275 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0); 276 } 277 278 static void mtk_dsi_reset_engine(struct mtk_dsi *dsi) 279 { 280 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET); 281 mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0); 282 } 283 284 static void mtk_dsi_reset_dphy(struct mtk_dsi *dsi) 285 { 286 mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, DPHY_RESET); 287 mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, 0); 288 } 289 290 static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi) 291 { 292 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); 293 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0); 294 } 295 296 static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi) 297 { 298 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0); 299 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN); 300 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0); 301 } 302 303 static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi) 304 { 305 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0); 306 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0); 307 } 308 309 static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi) 310 { 311 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0); 312 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN); 313 mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0); 314 } 315 316 static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi) 317 { 318 return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN; 319 } 320 321 static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter) 322 { 323 if (enter && !mtk_dsi_clk_hs_state(dsi)) 324 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN); 325 else if (!enter && mtk_dsi_clk_hs_state(dsi)) 326 mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); 327 } 328 329 static void mtk_dsi_set_mode(struct mtk_dsi *dsi) 330 { 331 u32 vid_mode = CMD_MODE; 332 333 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { 334 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) 335 vid_mode = BURST_MODE; 336 else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) 337 vid_mode = SYNC_PULSE_MODE; 338 else 339 vid_mode = SYNC_EVENT_MODE; 340 } 341 342 writel(vid_mode, dsi->regs + DSI_MODE_CTRL); 343 } 344 345 static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi) 346 { 347 mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN); 348 mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN); 349 } 350 351 static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi) 352 { 353 struct videomode *vm = &dsi->vm; 354 u32 dsi_buf_bpp, ps_wc; 355 u32 ps_bpp_mode; 356 357 if (dsi->format == MIPI_DSI_FMT_RGB565) 358 dsi_buf_bpp = 2; 359 else 360 dsi_buf_bpp = 3; 361 362 ps_wc = vm->hactive * dsi_buf_bpp; 363 ps_bpp_mode = ps_wc; 364 365 switch (dsi->format) { 366 case MIPI_DSI_FMT_RGB888: 367 ps_bpp_mode |= PACKED_PS_24BIT_RGB888; 368 break; 369 case MIPI_DSI_FMT_RGB666: 370 ps_bpp_mode |= PACKED_PS_18BIT_RGB666; 371 break; 372 case MIPI_DSI_FMT_RGB666_PACKED: 373 ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666; 374 break; 375 case MIPI_DSI_FMT_RGB565: 376 ps_bpp_mode |= PACKED_PS_16BIT_RGB565; 377 break; 378 } 379 380 writel(vm->vactive, dsi->regs + DSI_VACT_NL); 381 writel(ps_bpp_mode, dsi->regs + DSI_PSCTRL); 382 writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC); 383 } 384 385 static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi) 386 { 387 u32 tmp_reg; 388 389 switch (dsi->lanes) { 390 case 1: 391 tmp_reg = 1 << 2; 392 break; 393 case 2: 394 tmp_reg = 3 << 2; 395 break; 396 case 3: 397 tmp_reg = 7 << 2; 398 break; 399 case 4: 400 tmp_reg = 0xf << 2; 401 break; 402 default: 403 tmp_reg = 0xf << 2; 404 break; 405 } 406 407 if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) 408 tmp_reg |= HSTX_CKLP_EN; 409 410 if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET) 411 tmp_reg |= DIS_EOT; 412 413 writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL); 414 } 415 416 static void mtk_dsi_ps_control(struct mtk_dsi *dsi) 417 { 418 u32 dsi_tmp_buf_bpp; 419 u32 tmp_reg; 420 421 switch (dsi->format) { 422 case MIPI_DSI_FMT_RGB888: 423 tmp_reg = PACKED_PS_24BIT_RGB888; 424 dsi_tmp_buf_bpp = 3; 425 break; 426 case MIPI_DSI_FMT_RGB666: 427 tmp_reg = LOOSELY_PS_18BIT_RGB666; 428 dsi_tmp_buf_bpp = 3; 429 break; 430 case MIPI_DSI_FMT_RGB666_PACKED: 431 tmp_reg = PACKED_PS_18BIT_RGB666; 432 dsi_tmp_buf_bpp = 3; 433 break; 434 case MIPI_DSI_FMT_RGB565: 435 tmp_reg = PACKED_PS_16BIT_RGB565; 436 dsi_tmp_buf_bpp = 2; 437 break; 438 default: 439 tmp_reg = PACKED_PS_24BIT_RGB888; 440 dsi_tmp_buf_bpp = 3; 441 break; 442 } 443 444 tmp_reg += dsi->vm.hactive * dsi_tmp_buf_bpp & DSI_PS_WC; 445 writel(tmp_reg, dsi->regs + DSI_PSCTRL); 446 } 447 448 static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) 449 { 450 u32 horizontal_sync_active_byte; 451 u32 horizontal_backporch_byte; 452 u32 horizontal_frontporch_byte; 453 u32 horizontal_front_back_byte; 454 u32 data_phy_cycles_byte; 455 u32 dsi_tmp_buf_bpp, data_phy_cycles; 456 u32 delta; 457 struct mtk_phy_timing *timing = &dsi->phy_timing; 458 459 struct videomode *vm = &dsi->vm; 460 461 if (dsi->format == MIPI_DSI_FMT_RGB565) 462 dsi_tmp_buf_bpp = 2; 463 else 464 dsi_tmp_buf_bpp = 3; 465 466 writel(vm->vsync_len, dsi->regs + DSI_VSA_NL); 467 writel(vm->vback_porch, dsi->regs + DSI_VBP_NL); 468 writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL); 469 writel(vm->vactive, dsi->regs + DSI_VACT_NL); 470 471 if (dsi->driver_data->has_size_ctl) 472 writel(vm->vactive << 16 | vm->hactive, 473 dsi->regs + DSI_SIZE_CON); 474 475 horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10); 476 477 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) 478 horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10; 479 else 480 horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) * 481 dsi_tmp_buf_bpp - 10; 482 483 data_phy_cycles = timing->lpx + timing->da_hs_prepare + 484 timing->da_hs_zero + timing->da_hs_exit + 3; 485 486 delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12; 487 delta += dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET ? 0 : 2; 488 489 horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp; 490 horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte; 491 data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta; 492 493 if (horizontal_front_back_byte > data_phy_cycles_byte) { 494 horizontal_frontporch_byte -= data_phy_cycles_byte * 495 horizontal_frontporch_byte / 496 horizontal_front_back_byte; 497 498 horizontal_backporch_byte -= data_phy_cycles_byte * 499 horizontal_backporch_byte / 500 horizontal_front_back_byte; 501 } else { 502 DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n"); 503 } 504 505 if ((dsi->mode_flags & MIPI_DSI_HS_PKT_END_ALIGNED) && 506 (dsi->lanes == 4)) { 507 horizontal_sync_active_byte = 508 roundup(horizontal_sync_active_byte, dsi->lanes) - 2; 509 horizontal_frontporch_byte = 510 roundup(horizontal_frontporch_byte, dsi->lanes) - 2; 511 horizontal_backporch_byte = 512 roundup(horizontal_backporch_byte, dsi->lanes) - 2; 513 horizontal_backporch_byte -= 514 (vm->hactive * dsi_tmp_buf_bpp + 2) % dsi->lanes; 515 } 516 517 writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC); 518 writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC); 519 writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC); 520 521 mtk_dsi_ps_control(dsi); 522 } 523 524 static void mtk_dsi_start(struct mtk_dsi *dsi) 525 { 526 writel(0, dsi->regs + DSI_START); 527 writel(1, dsi->regs + DSI_START); 528 } 529 530 static void mtk_dsi_stop(struct mtk_dsi *dsi) 531 { 532 writel(0, dsi->regs + DSI_START); 533 } 534 535 static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi) 536 { 537 writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL); 538 } 539 540 static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi) 541 { 542 u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG; 543 544 writel(inten, dsi->regs + DSI_INTEN); 545 } 546 547 static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit) 548 { 549 dsi->irq_data |= irq_bit; 550 } 551 552 static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit) 553 { 554 dsi->irq_data &= ~irq_bit; 555 } 556 557 static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag, 558 unsigned int timeout) 559 { 560 s32 ret = 0; 561 unsigned long jiffies = msecs_to_jiffies(timeout); 562 563 ret = wait_event_interruptible_timeout(dsi->irq_wait_queue, 564 dsi->irq_data & irq_flag, 565 jiffies); 566 if (ret == 0) { 567 DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag); 568 569 mtk_dsi_enable(dsi); 570 mtk_dsi_reset_engine(dsi); 571 } 572 573 return ret; 574 } 575 576 static irqreturn_t mtk_dsi_irq(int irq, void *dev_id) 577 { 578 struct mtk_dsi *dsi = dev_id; 579 u32 status, tmp; 580 u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG; 581 582 status = readl(dsi->regs + DSI_INTSTA) & flag; 583 584 if (status) { 585 do { 586 mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK); 587 tmp = readl(dsi->regs + DSI_INTSTA); 588 } while (tmp & DSI_BUSY); 589 590 mtk_dsi_mask(dsi, DSI_INTSTA, status, 0); 591 mtk_dsi_irq_data_set(dsi, status); 592 wake_up_interruptible(&dsi->irq_wait_queue); 593 } 594 595 return IRQ_HANDLED; 596 } 597 598 static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t) 599 { 600 mtk_dsi_irq_data_clear(dsi, irq_flag); 601 mtk_dsi_set_cmd_mode(dsi); 602 603 if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) { 604 DRM_ERROR("failed to switch cmd mode\n"); 605 return -ETIME; 606 } else { 607 return 0; 608 } 609 } 610 611 static int mtk_dsi_poweron(struct mtk_dsi *dsi) 612 { 613 struct device *dev = dsi->host.dev; 614 int ret; 615 u32 bit_per_pixel; 616 617 if (++dsi->refcount != 1) 618 return 0; 619 620 switch (dsi->format) { 621 case MIPI_DSI_FMT_RGB565: 622 bit_per_pixel = 16; 623 break; 624 case MIPI_DSI_FMT_RGB666_PACKED: 625 bit_per_pixel = 18; 626 break; 627 case MIPI_DSI_FMT_RGB666: 628 case MIPI_DSI_FMT_RGB888: 629 default: 630 bit_per_pixel = 24; 631 break; 632 } 633 634 dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel, 635 dsi->lanes); 636 637 ret = clk_set_rate(dsi->hs_clk, dsi->data_rate); 638 if (ret < 0) { 639 dev_err(dev, "Failed to set data rate: %d\n", ret); 640 goto err_refcount; 641 } 642 643 phy_power_on(dsi->phy); 644 645 ret = clk_prepare_enable(dsi->engine_clk); 646 if (ret < 0) { 647 dev_err(dev, "Failed to enable engine clock: %d\n", ret); 648 goto err_phy_power_off; 649 } 650 651 ret = clk_prepare_enable(dsi->digital_clk); 652 if (ret < 0) { 653 dev_err(dev, "Failed to enable digital clock: %d\n", ret); 654 goto err_disable_engine_clk; 655 } 656 657 mtk_dsi_enable(dsi); 658 659 if (dsi->driver_data->has_shadow_ctl) 660 writel(FORCE_COMMIT | BYPASS_SHADOW, 661 dsi->regs + DSI_SHADOW_DEBUG); 662 663 mtk_dsi_reset_engine(dsi); 664 mtk_dsi_phy_timconfig(dsi); 665 666 mtk_dsi_ps_control_vact(dsi); 667 mtk_dsi_set_vm_cmd(dsi); 668 mtk_dsi_config_vdo_timing(dsi); 669 mtk_dsi_set_interrupt_enable(dsi); 670 671 return 0; 672 err_disable_engine_clk: 673 clk_disable_unprepare(dsi->engine_clk); 674 err_phy_power_off: 675 phy_power_off(dsi->phy); 676 err_refcount: 677 dsi->refcount--; 678 return ret; 679 } 680 681 static void mtk_dsi_poweroff(struct mtk_dsi *dsi) 682 { 683 if (WARN_ON(dsi->refcount == 0)) 684 return; 685 686 if (--dsi->refcount != 0) 687 return; 688 689 /* 690 * mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since 691 * mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(), 692 * which needs irq for vblank, and mtk_dsi_stop() will disable irq. 693 * mtk_dsi_start() needs to be called in mtk_output_dsi_enable(), 694 * after dsi is fully set. 695 */ 696 mtk_dsi_stop(dsi); 697 698 mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); 699 mtk_dsi_reset_engine(dsi); 700 mtk_dsi_lane0_ulp_mode_enter(dsi); 701 mtk_dsi_clk_ulp_mode_enter(dsi); 702 /* set the lane number as 0 to pull down mipi */ 703 writel(0, dsi->regs + DSI_TXRX_CTRL); 704 705 mtk_dsi_disable(dsi); 706 707 clk_disable_unprepare(dsi->engine_clk); 708 clk_disable_unprepare(dsi->digital_clk); 709 710 phy_power_off(dsi->phy); 711 712 dsi->lanes_ready = false; 713 } 714 715 static void mtk_dsi_lane_ready(struct mtk_dsi *dsi) 716 { 717 if (!dsi->lanes_ready) { 718 dsi->lanes_ready = true; 719 mtk_dsi_rxtx_control(dsi); 720 usleep_range(30, 100); 721 mtk_dsi_reset_dphy(dsi); 722 mtk_dsi_clk_ulp_mode_leave(dsi); 723 mtk_dsi_lane0_ulp_mode_leave(dsi); 724 mtk_dsi_clk_hs_mode(dsi, 0); 725 usleep_range(1000, 3000); 726 /* The reaction time after pulling up the mipi signal for dsi_rx */ 727 } 728 } 729 730 static void mtk_output_dsi_enable(struct mtk_dsi *dsi) 731 { 732 if (dsi->enabled) 733 return; 734 735 mtk_dsi_lane_ready(dsi); 736 mtk_dsi_set_mode(dsi); 737 mtk_dsi_clk_hs_mode(dsi, 1); 738 739 mtk_dsi_start(dsi); 740 741 dsi->enabled = true; 742 } 743 744 static void mtk_output_dsi_disable(struct mtk_dsi *dsi) 745 { 746 if (!dsi->enabled) 747 return; 748 749 dsi->enabled = false; 750 } 751 752 static int mtk_dsi_bridge_attach(struct drm_bridge *bridge, 753 enum drm_bridge_attach_flags flags) 754 { 755 struct mtk_dsi *dsi = bridge_to_dsi(bridge); 756 757 /* Attach the panel or bridge to the dsi bridge */ 758 return drm_bridge_attach(bridge->encoder, dsi->next_bridge, 759 &dsi->bridge, flags); 760 } 761 762 static void mtk_dsi_bridge_mode_set(struct drm_bridge *bridge, 763 const struct drm_display_mode *mode, 764 const struct drm_display_mode *adjusted) 765 { 766 struct mtk_dsi *dsi = bridge_to_dsi(bridge); 767 768 drm_display_mode_to_videomode(adjusted, &dsi->vm); 769 } 770 771 static void mtk_dsi_bridge_atomic_disable(struct drm_bridge *bridge, 772 struct drm_bridge_state *old_bridge_state) 773 { 774 struct mtk_dsi *dsi = bridge_to_dsi(bridge); 775 776 mtk_output_dsi_disable(dsi); 777 } 778 779 static void mtk_dsi_bridge_atomic_enable(struct drm_bridge *bridge, 780 struct drm_bridge_state *old_bridge_state) 781 { 782 struct mtk_dsi *dsi = bridge_to_dsi(bridge); 783 784 if (dsi->refcount == 0) 785 return; 786 787 mtk_output_dsi_enable(dsi); 788 } 789 790 static void mtk_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge, 791 struct drm_bridge_state *old_bridge_state) 792 { 793 struct mtk_dsi *dsi = bridge_to_dsi(bridge); 794 int ret; 795 796 ret = mtk_dsi_poweron(dsi); 797 if (ret < 0) 798 DRM_ERROR("failed to power on dsi\n"); 799 } 800 801 static void mtk_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge, 802 struct drm_bridge_state *old_bridge_state) 803 { 804 struct mtk_dsi *dsi = bridge_to_dsi(bridge); 805 806 mtk_dsi_poweroff(dsi); 807 } 808 809 static const struct drm_bridge_funcs mtk_dsi_bridge_funcs = { 810 .attach = mtk_dsi_bridge_attach, 811 .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, 812 .atomic_disable = mtk_dsi_bridge_atomic_disable, 813 .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, 814 .atomic_enable = mtk_dsi_bridge_atomic_enable, 815 .atomic_pre_enable = mtk_dsi_bridge_atomic_pre_enable, 816 .atomic_post_disable = mtk_dsi_bridge_atomic_post_disable, 817 .atomic_reset = drm_atomic_helper_bridge_reset, 818 .mode_set = mtk_dsi_bridge_mode_set, 819 }; 820 821 void mtk_dsi_ddp_start(struct device *dev) 822 { 823 struct mtk_dsi *dsi = dev_get_drvdata(dev); 824 825 mtk_dsi_poweron(dsi); 826 } 827 828 void mtk_dsi_ddp_stop(struct device *dev) 829 { 830 struct mtk_dsi *dsi = dev_get_drvdata(dev); 831 832 mtk_dsi_poweroff(dsi); 833 } 834 835 static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi) 836 { 837 int ret; 838 839 ret = drm_simple_encoder_init(drm, &dsi->encoder, 840 DRM_MODE_ENCODER_DSI); 841 if (ret) { 842 DRM_ERROR("Failed to encoder init to drm\n"); 843 return ret; 844 } 845 846 dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev); 847 848 ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL, 849 DRM_BRIDGE_ATTACH_NO_CONNECTOR); 850 if (ret) 851 goto err_cleanup_encoder; 852 853 dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder); 854 if (IS_ERR(dsi->connector)) { 855 DRM_ERROR("Unable to create bridge connector\n"); 856 ret = PTR_ERR(dsi->connector); 857 goto err_cleanup_encoder; 858 } 859 drm_connector_attach_encoder(dsi->connector, &dsi->encoder); 860 861 return 0; 862 863 err_cleanup_encoder: 864 drm_encoder_cleanup(&dsi->encoder); 865 return ret; 866 } 867 868 static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) 869 { 870 int ret; 871 struct drm_device *drm = data; 872 struct mtk_dsi *dsi = dev_get_drvdata(dev); 873 874 ret = mtk_dsi_encoder_init(drm, dsi); 875 if (ret) 876 return ret; 877 878 return device_reset_optional(dev); 879 } 880 881 static void mtk_dsi_unbind(struct device *dev, struct device *master, 882 void *data) 883 { 884 struct mtk_dsi *dsi = dev_get_drvdata(dev); 885 886 drm_encoder_cleanup(&dsi->encoder); 887 } 888 889 static const struct component_ops mtk_dsi_component_ops = { 890 .bind = mtk_dsi_bind, 891 .unbind = mtk_dsi_unbind, 892 }; 893 894 static int mtk_dsi_host_attach(struct mipi_dsi_host *host, 895 struct mipi_dsi_device *device) 896 { 897 struct mtk_dsi *dsi = host_to_dsi(host); 898 struct device *dev = host->dev; 899 int ret; 900 901 dsi->lanes = device->lanes; 902 dsi->format = device->format; 903 dsi->mode_flags = device->mode_flags; 904 dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0); 905 if (IS_ERR(dsi->next_bridge)) 906 return PTR_ERR(dsi->next_bridge); 907 908 drm_bridge_add(&dsi->bridge); 909 910 ret = component_add(host->dev, &mtk_dsi_component_ops); 911 if (ret) { 912 DRM_ERROR("failed to add dsi_host component: %d\n", ret); 913 drm_bridge_remove(&dsi->bridge); 914 return ret; 915 } 916 917 return 0; 918 } 919 920 static int mtk_dsi_host_detach(struct mipi_dsi_host *host, 921 struct mipi_dsi_device *device) 922 { 923 struct mtk_dsi *dsi = host_to_dsi(host); 924 925 component_del(host->dev, &mtk_dsi_component_ops); 926 drm_bridge_remove(&dsi->bridge); 927 return 0; 928 } 929 930 static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) 931 { 932 int ret; 933 u32 val; 934 935 ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY), 936 4, 2000000); 937 if (ret) { 938 DRM_WARN("polling dsi wait not busy timeout!\n"); 939 940 mtk_dsi_enable(dsi); 941 mtk_dsi_reset_engine(dsi); 942 } 943 } 944 945 static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data) 946 { 947 switch (type) { 948 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE: 949 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE: 950 return 1; 951 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: 952 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE: 953 return 2; 954 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE: 955 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE: 956 return read_data[1] + read_data[2] * 16; 957 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT: 958 DRM_INFO("type is 0x02, try again\n"); 959 break; 960 default: 961 DRM_INFO("type(0x%x) not recognized\n", type); 962 break; 963 } 964 965 return 0; 966 } 967 968 static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg) 969 { 970 const char *tx_buf = msg->tx_buf; 971 u8 config, cmdq_size, cmdq_off, type = msg->type; 972 u32 reg_val, cmdq_mask, i; 973 u32 reg_cmdq_off = dsi->driver_data->reg_cmdq_off; 974 975 if (MTK_DSI_HOST_IS_READ(type)) 976 config = BTA; 977 else 978 config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET; 979 980 if (msg->tx_len > 2) { 981 cmdq_size = 1 + (msg->tx_len + 3) / 4; 982 cmdq_off = 4; 983 cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1; 984 reg_val = (msg->tx_len << 16) | (type << 8) | config; 985 } else { 986 cmdq_size = 1; 987 cmdq_off = 2; 988 cmdq_mask = CONFIG | DATA_ID; 989 reg_val = (type << 8) | config; 990 } 991 992 for (i = 0; i < msg->tx_len; i++) 993 mtk_dsi_mask(dsi, (reg_cmdq_off + cmdq_off + i) & (~0x3U), 994 (0xffUL << (((i + cmdq_off) & 3U) * 8U)), 995 tx_buf[i] << (((i + cmdq_off) & 3U) * 8U)); 996 997 mtk_dsi_mask(dsi, reg_cmdq_off, cmdq_mask, reg_val); 998 mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size); 999 } 1000 1001 static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi, 1002 const struct mipi_dsi_msg *msg, u8 flag) 1003 { 1004 mtk_dsi_wait_for_idle(dsi); 1005 mtk_dsi_irq_data_clear(dsi, flag); 1006 mtk_dsi_cmdq(dsi, msg); 1007 mtk_dsi_start(dsi); 1008 1009 if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000)) 1010 return -ETIME; 1011 else 1012 return 0; 1013 } 1014 1015 static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, 1016 const struct mipi_dsi_msg *msg) 1017 { 1018 struct mtk_dsi *dsi = host_to_dsi(host); 1019 u32 recv_cnt, i; 1020 u8 read_data[16]; 1021 void *src_addr; 1022 u8 irq_flag = CMD_DONE_INT_FLAG; 1023 u32 dsi_mode; 1024 int ret; 1025 1026 dsi_mode = readl(dsi->regs + DSI_MODE_CTRL); 1027 if (dsi_mode & MODE) { 1028 mtk_dsi_stop(dsi); 1029 ret = mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500); 1030 if (ret) 1031 goto restore_dsi_mode; 1032 } 1033 1034 if (MTK_DSI_HOST_IS_READ(msg->type)) 1035 irq_flag |= LPRX_RD_RDY_INT_FLAG; 1036 1037 mtk_dsi_lane_ready(dsi); 1038 1039 ret = mtk_dsi_host_send_cmd(dsi, msg, irq_flag); 1040 if (ret) 1041 goto restore_dsi_mode; 1042 1043 if (!MTK_DSI_HOST_IS_READ(msg->type)) { 1044 recv_cnt = 0; 1045 goto restore_dsi_mode; 1046 } 1047 1048 if (!msg->rx_buf) { 1049 DRM_ERROR("dsi receive buffer size may be NULL\n"); 1050 ret = -EINVAL; 1051 goto restore_dsi_mode; 1052 } 1053 1054 for (i = 0; i < 16; i++) 1055 *(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i); 1056 1057 recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data); 1058 1059 if (recv_cnt > 2) 1060 src_addr = &read_data[4]; 1061 else 1062 src_addr = &read_data[1]; 1063 1064 if (recv_cnt > 10) 1065 recv_cnt = 10; 1066 1067 if (recv_cnt > msg->rx_len) 1068 recv_cnt = msg->rx_len; 1069 1070 if (recv_cnt) 1071 memcpy(msg->rx_buf, src_addr, recv_cnt); 1072 1073 DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n", 1074 recv_cnt, *((u8 *)(msg->tx_buf))); 1075 1076 restore_dsi_mode: 1077 if (dsi_mode & MODE) { 1078 mtk_dsi_set_mode(dsi); 1079 mtk_dsi_start(dsi); 1080 } 1081 1082 return ret < 0 ? ret : recv_cnt; 1083 } 1084 1085 static const struct mipi_dsi_host_ops mtk_dsi_ops = { 1086 .attach = mtk_dsi_host_attach, 1087 .detach = mtk_dsi_host_detach, 1088 .transfer = mtk_dsi_host_transfer, 1089 }; 1090 1091 static int mtk_dsi_probe(struct platform_device *pdev) 1092 { 1093 struct mtk_dsi *dsi; 1094 struct device *dev = &pdev->dev; 1095 struct resource *regs; 1096 int irq_num; 1097 int ret; 1098 1099 dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); 1100 if (!dsi) 1101 return -ENOMEM; 1102 1103 dsi->host.ops = &mtk_dsi_ops; 1104 dsi->host.dev = dev; 1105 ret = mipi_dsi_host_register(&dsi->host); 1106 if (ret < 0) { 1107 dev_err(dev, "failed to register DSI host: %d\n", ret); 1108 return ret; 1109 } 1110 1111 dsi->driver_data = of_device_get_match_data(dev); 1112 1113 dsi->engine_clk = devm_clk_get(dev, "engine"); 1114 if (IS_ERR(dsi->engine_clk)) { 1115 ret = PTR_ERR(dsi->engine_clk); 1116 1117 if (ret != -EPROBE_DEFER) 1118 dev_err(dev, "Failed to get engine clock: %d\n", ret); 1119 goto err_unregister_host; 1120 } 1121 1122 dsi->digital_clk = devm_clk_get(dev, "digital"); 1123 if (IS_ERR(dsi->digital_clk)) { 1124 ret = PTR_ERR(dsi->digital_clk); 1125 1126 if (ret != -EPROBE_DEFER) 1127 dev_err(dev, "Failed to get digital clock: %d\n", ret); 1128 goto err_unregister_host; 1129 } 1130 1131 dsi->hs_clk = devm_clk_get(dev, "hs"); 1132 if (IS_ERR(dsi->hs_clk)) { 1133 ret = PTR_ERR(dsi->hs_clk); 1134 dev_err(dev, "Failed to get hs clock: %d\n", ret); 1135 goto err_unregister_host; 1136 } 1137 1138 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1139 dsi->regs = devm_ioremap_resource(dev, regs); 1140 if (IS_ERR(dsi->regs)) { 1141 ret = PTR_ERR(dsi->regs); 1142 dev_err(dev, "Failed to ioremap memory: %d\n", ret); 1143 goto err_unregister_host; 1144 } 1145 1146 dsi->phy = devm_phy_get(dev, "dphy"); 1147 if (IS_ERR(dsi->phy)) { 1148 ret = PTR_ERR(dsi->phy); 1149 dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret); 1150 goto err_unregister_host; 1151 } 1152 1153 irq_num = platform_get_irq(pdev, 0); 1154 if (irq_num < 0) { 1155 ret = irq_num; 1156 goto err_unregister_host; 1157 } 1158 1159 ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq, 1160 IRQF_TRIGGER_NONE, dev_name(&pdev->dev), dsi); 1161 if (ret) { 1162 dev_err(&pdev->dev, "failed to request mediatek dsi irq\n"); 1163 goto err_unregister_host; 1164 } 1165 1166 init_waitqueue_head(&dsi->irq_wait_queue); 1167 1168 platform_set_drvdata(pdev, dsi); 1169 1170 dsi->bridge.funcs = &mtk_dsi_bridge_funcs; 1171 dsi->bridge.of_node = dev->of_node; 1172 dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; 1173 1174 return 0; 1175 1176 err_unregister_host: 1177 mipi_dsi_host_unregister(&dsi->host); 1178 return ret; 1179 } 1180 1181 static void mtk_dsi_remove(struct platform_device *pdev) 1182 { 1183 struct mtk_dsi *dsi = platform_get_drvdata(pdev); 1184 1185 mtk_output_dsi_disable(dsi); 1186 mipi_dsi_host_unregister(&dsi->host); 1187 } 1188 1189 static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = { 1190 .reg_cmdq_off = 0x200, 1191 }; 1192 1193 static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = { 1194 .reg_cmdq_off = 0x180, 1195 }; 1196 1197 static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = { 1198 .reg_cmdq_off = 0x200, 1199 .has_shadow_ctl = true, 1200 .has_size_ctl = true, 1201 }; 1202 1203 static const struct mtk_dsi_driver_data mt8186_dsi_driver_data = { 1204 .reg_cmdq_off = 0xd00, 1205 .has_shadow_ctl = true, 1206 .has_size_ctl = true, 1207 }; 1208 1209 static const struct of_device_id mtk_dsi_of_match[] = { 1210 { .compatible = "mediatek,mt2701-dsi", 1211 .data = &mt2701_dsi_driver_data }, 1212 { .compatible = "mediatek,mt8173-dsi", 1213 .data = &mt8173_dsi_driver_data }, 1214 { .compatible = "mediatek,mt8183-dsi", 1215 .data = &mt8183_dsi_driver_data }, 1216 { .compatible = "mediatek,mt8186-dsi", 1217 .data = &mt8186_dsi_driver_data }, 1218 { }, 1219 }; 1220 MODULE_DEVICE_TABLE(of, mtk_dsi_of_match); 1221 1222 struct platform_driver mtk_dsi_driver = { 1223 .probe = mtk_dsi_probe, 1224 .remove_new = mtk_dsi_remove, 1225 .driver = { 1226 .name = "mtk-dsi", 1227 .of_match_table = mtk_dsi_of_match, 1228 }, 1229 }; 1230