1 /* 2 * Copyright (C) 2012 Texas Instruments 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <drm/drm_atomic.h> 19 #include <drm/drm_atomic_helper.h> 20 #include <drm/drm_crtc.h> 21 #include <drm/drm_flip_work.h> 22 #include <drm/drm_plane_helper.h> 23 #include <linux/workqueue.h> 24 #include <linux/completion.h> 25 #include <linux/dma-mapping.h> 26 27 #include "tilcdc_drv.h" 28 #include "tilcdc_regs.h" 29 30 #define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000 31 #define TILCDC_PALETTE_SIZE 32 32 #define TILCDC_PALETTE_FIRST_ENTRY 0x4000 33 34 struct tilcdc_crtc { 35 struct drm_crtc base; 36 37 struct drm_plane primary; 38 const struct tilcdc_panel_info *info; 39 struct drm_pending_vblank_event *event; 40 struct mutex enable_lock; 41 bool enabled; 42 bool shutdown; 43 wait_queue_head_t frame_done_wq; 44 bool frame_done; 45 spinlock_t irq_lock; 46 47 unsigned int lcd_fck_rate; 48 49 ktime_t last_vblank; 50 51 struct drm_framebuffer *curr_fb; 52 struct drm_framebuffer *next_fb; 53 54 /* for deferred fb unref's: */ 55 struct drm_flip_work unref_work; 56 57 /* Only set if an external encoder is connected */ 58 bool simulate_vesa_sync; 59 60 int sync_lost_count; 61 bool frame_intact; 62 struct work_struct recover_work; 63 64 dma_addr_t palette_dma_handle; 65 u16 *palette_base; 66 struct completion palette_loaded; 67 }; 68 #define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base) 69 70 static void unref_worker(struct drm_flip_work *work, void *val) 71 { 72 struct tilcdc_crtc *tilcdc_crtc = 73 container_of(work, struct tilcdc_crtc, unref_work); 74 struct drm_device *dev = tilcdc_crtc->base.dev; 75 76 mutex_lock(&dev->mode_config.mutex); 77 drm_framebuffer_unreference(val); 78 mutex_unlock(&dev->mode_config.mutex); 79 } 80 81 static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) 82 { 83 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 84 struct drm_device *dev = crtc->dev; 85 struct tilcdc_drm_private *priv = dev->dev_private; 86 struct drm_gem_cma_object *gem; 87 dma_addr_t start, end; 88 u64 dma_base_and_ceiling; 89 90 gem = drm_fb_cma_get_gem_obj(fb, 0); 91 92 start = gem->paddr + fb->offsets[0] + 93 crtc->y * fb->pitches[0] + 94 crtc->x * fb->format->cpp[0]; 95 96 end = start + (crtc->mode.vdisplay * fb->pitches[0]); 97 98 /* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG 99 * with a single insruction, if available. This should make it more 100 * unlikely that LCDC would fetch the DMA addresses in the middle of 101 * an update. 102 */ 103 if (priv->rev == 1) 104 end -= 1; 105 106 dma_base_and_ceiling = (u64)end << 32 | start; 107 tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling); 108 109 if (tilcdc_crtc->curr_fb) 110 drm_flip_work_queue(&tilcdc_crtc->unref_work, 111 tilcdc_crtc->curr_fb); 112 113 tilcdc_crtc->curr_fb = fb; 114 } 115 116 /* 117 * The driver currently only supports only true color formats. For 118 * true color the palette block is bypassed, but a 32 byte palette 119 * should still be loaded. The first 16-bit entry must be 0x4000 while 120 * all other entries must be zeroed. 121 */ 122 static void tilcdc_crtc_load_palette(struct drm_crtc *crtc) 123 { 124 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 125 struct drm_device *dev = crtc->dev; 126 struct tilcdc_drm_private *priv = dev->dev_private; 127 int ret; 128 129 reinit_completion(&tilcdc_crtc->palette_loaded); 130 131 /* Tell the LCDC where the palette is located. */ 132 tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, 133 tilcdc_crtc->palette_dma_handle); 134 tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG, 135 (u32) tilcdc_crtc->palette_dma_handle + 136 TILCDC_PALETTE_SIZE - 1); 137 138 /* Set dma load mode for palette loading only. */ 139 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, 140 LCDC_PALETTE_LOAD_MODE(PALETTE_ONLY), 141 LCDC_PALETTE_LOAD_MODE_MASK); 142 143 /* Enable DMA Palette Loaded Interrupt */ 144 if (priv->rev == 1) 145 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA); 146 else 147 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_PL_INT_ENA); 148 149 /* Enable LCDC DMA and wait for palette to be loaded. */ 150 tilcdc_clear_irqstatus(dev, 0xffffffff); 151 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); 152 153 ret = wait_for_completion_timeout(&tilcdc_crtc->palette_loaded, 154 msecs_to_jiffies(50)); 155 if (ret == 0) 156 dev_err(dev->dev, "%s: Palette loading timeout", __func__); 157 158 /* Disable LCDC DMA and DMA Palette Loaded Interrupt. */ 159 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); 160 if (priv->rev == 1) 161 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA); 162 else 163 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_V2_PL_INT_ENA); 164 } 165 166 static void tilcdc_crtc_enable_irqs(struct drm_device *dev) 167 { 168 struct tilcdc_drm_private *priv = dev->dev_private; 169 170 tilcdc_clear_irqstatus(dev, 0xffffffff); 171 172 if (priv->rev == 1) { 173 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, 174 LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA | 175 LCDC_V1_UNDERFLOW_INT_ENA); 176 tilcdc_set(dev, LCDC_DMA_CTRL_REG, 177 LCDC_V1_END_OF_FRAME_INT_ENA); 178 } else { 179 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, 180 LCDC_V2_UNDERFLOW_INT_ENA | 181 LCDC_V2_END_OF_FRAME0_INT_ENA | 182 LCDC_FRAME_DONE | LCDC_SYNC_LOST); 183 } 184 } 185 186 static void tilcdc_crtc_disable_irqs(struct drm_device *dev) 187 { 188 struct tilcdc_drm_private *priv = dev->dev_private; 189 190 /* disable irqs that we might have enabled: */ 191 if (priv->rev == 1) { 192 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, 193 LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA | 194 LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA); 195 tilcdc_clear(dev, LCDC_DMA_CTRL_REG, 196 LCDC_V1_END_OF_FRAME_INT_ENA); 197 } else { 198 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, 199 LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA | 200 LCDC_V2_END_OF_FRAME0_INT_ENA | 201 LCDC_FRAME_DONE | LCDC_SYNC_LOST); 202 } 203 } 204 205 static void reset(struct drm_crtc *crtc) 206 { 207 struct drm_device *dev = crtc->dev; 208 struct tilcdc_drm_private *priv = dev->dev_private; 209 210 if (priv->rev != 2) 211 return; 212 213 tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET); 214 usleep_range(250, 1000); 215 tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET); 216 } 217 218 /* 219 * Calculate the percentage difference between the requested pixel clock rate 220 * and the effective rate resulting from calculating the clock divider value. 221 */ 222 static unsigned int tilcdc_pclk_diff(unsigned long rate, 223 unsigned long real_rate) 224 { 225 int r = rate / 100, rr = real_rate / 100; 226 227 return (unsigned int)(abs(((rr - r) * 100) / r)); 228 } 229 230 static void tilcdc_crtc_set_clk(struct drm_crtc *crtc) 231 { 232 struct drm_device *dev = crtc->dev; 233 struct tilcdc_drm_private *priv = dev->dev_private; 234 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 235 unsigned long clk_rate, real_rate, req_rate; 236 unsigned int clkdiv; 237 int ret; 238 239 clkdiv = 2; /* first try using a standard divider of 2 */ 240 241 /* mode.clock is in KHz, set_rate wants parameter in Hz */ 242 req_rate = crtc->mode.clock * 1000; 243 244 ret = clk_set_rate(priv->clk, req_rate * clkdiv); 245 clk_rate = clk_get_rate(priv->clk); 246 if (ret < 0) { 247 /* 248 * If we fail to set the clock rate (some architectures don't 249 * use the common clock framework yet and may not implement 250 * all the clk API calls for every clock), try the next best 251 * thing: adjusting the clock divider, unless clk_get_rate() 252 * failed as well. 253 */ 254 if (!clk_rate) { 255 /* Nothing more we can do. Just bail out. */ 256 dev_err(dev->dev, 257 "failed to set the pixel clock - unable to read current lcdc clock rate\n"); 258 return; 259 } 260 261 clkdiv = DIV_ROUND_CLOSEST(clk_rate, req_rate); 262 263 /* 264 * Emit a warning if the real clock rate resulting from the 265 * calculated divider differs much from the requested rate. 266 * 267 * 5% is an arbitrary value - LCDs are usually quite tolerant 268 * about pixel clock rates. 269 */ 270 real_rate = clkdiv * req_rate; 271 272 if (tilcdc_pclk_diff(clk_rate, real_rate) > 5) { 273 dev_warn(dev->dev, 274 "effective pixel clock rate (%luHz) differs from the calculated rate (%luHz)\n", 275 clk_rate, real_rate); 276 } 277 } 278 279 tilcdc_crtc->lcd_fck_rate = clk_rate; 280 281 DBG("lcd_clk=%u, mode clock=%d, div=%u", 282 tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv); 283 284 /* Configure the LCD clock divisor. */ 285 tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) | 286 LCDC_RASTER_MODE); 287 288 if (priv->rev == 2) 289 tilcdc_set(dev, LCDC_CLK_ENABLE_REG, 290 LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN | 291 LCDC_V2_CORE_CLK_EN); 292 } 293 294 static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) 295 { 296 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 297 struct drm_device *dev = crtc->dev; 298 struct tilcdc_drm_private *priv = dev->dev_private; 299 const struct tilcdc_panel_info *info = tilcdc_crtc->info; 300 uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw; 301 struct drm_display_mode *mode = &crtc->state->adjusted_mode; 302 struct drm_framebuffer *fb = crtc->primary->state->fb; 303 304 if (WARN_ON(!info)) 305 return; 306 307 if (WARN_ON(!fb)) 308 return; 309 310 /* Configure the Burst Size and fifo threshold of DMA: */ 311 reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770; 312 switch (info->dma_burst_sz) { 313 case 1: 314 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1); 315 break; 316 case 2: 317 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2); 318 break; 319 case 4: 320 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4); 321 break; 322 case 8: 323 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8); 324 break; 325 case 16: 326 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16); 327 break; 328 default: 329 dev_err(dev->dev, "invalid burst size\n"); 330 return; 331 } 332 reg |= (info->fifo_th << 8); 333 tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg); 334 335 /* Configure timings: */ 336 hbp = mode->htotal - mode->hsync_end; 337 hfp = mode->hsync_start - mode->hdisplay; 338 hsw = mode->hsync_end - mode->hsync_start; 339 vbp = mode->vtotal - mode->vsync_end; 340 vfp = mode->vsync_start - mode->vdisplay; 341 vsw = mode->vsync_end - mode->vsync_start; 342 343 DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u", 344 mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw); 345 346 /* Set AC Bias Period and Number of Transitions per Interrupt: */ 347 reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00; 348 reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) | 349 LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt); 350 351 /* 352 * subtract one from hfp, hbp, hsw because the hardware uses 353 * a value of 0 as 1 354 */ 355 if (priv->rev == 2) { 356 /* clear bits we're going to set */ 357 reg &= ~0x78000033; 358 reg |= ((hfp-1) & 0x300) >> 8; 359 reg |= ((hbp-1) & 0x300) >> 4; 360 reg |= ((hsw-1) & 0x3c0) << 21; 361 } 362 tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg); 363 364 reg = (((mode->hdisplay >> 4) - 1) << 4) | 365 (((hbp-1) & 0xff) << 24) | 366 (((hfp-1) & 0xff) << 16) | 367 (((hsw-1) & 0x3f) << 10); 368 if (priv->rev == 2) 369 reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3; 370 tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg); 371 372 reg = ((mode->vdisplay - 1) & 0x3ff) | 373 ((vbp & 0xff) << 24) | 374 ((vfp & 0xff) << 16) | 375 (((vsw-1) & 0x3f) << 10); 376 tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg); 377 378 /* 379 * be sure to set Bit 10 for the V2 LCDC controller, 380 * otherwise limited to 1024 pixels width, stopping 381 * 1920x1080 being supported. 382 */ 383 if (priv->rev == 2) { 384 if ((mode->vdisplay - 1) & 0x400) { 385 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, 386 LCDC_LPP_B10); 387 } else { 388 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, 389 LCDC_LPP_B10); 390 } 391 } 392 393 /* Configure display type: */ 394 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) & 395 ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE | 396 LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK | 397 0x000ff000 /* Palette Loading Delay bits */); 398 reg |= LCDC_TFT_MODE; /* no monochrome/passive support */ 399 if (info->tft_alt_mode) 400 reg |= LCDC_TFT_ALT_ENABLE; 401 if (priv->rev == 2) { 402 switch (fb->format->format) { 403 case DRM_FORMAT_BGR565: 404 case DRM_FORMAT_RGB565: 405 break; 406 case DRM_FORMAT_XBGR8888: 407 case DRM_FORMAT_XRGB8888: 408 reg |= LCDC_V2_TFT_24BPP_UNPACK; 409 /* fallthrough */ 410 case DRM_FORMAT_BGR888: 411 case DRM_FORMAT_RGB888: 412 reg |= LCDC_V2_TFT_24BPP_MODE; 413 break; 414 default: 415 dev_err(dev->dev, "invalid pixel format\n"); 416 return; 417 } 418 } 419 reg |= info->fdd < 12; 420 tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg); 421 422 if (info->invert_pxl_clk) 423 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK); 424 else 425 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK); 426 427 if (info->sync_ctrl) 428 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL); 429 else 430 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL); 431 432 if (info->sync_edge) 433 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE); 434 else 435 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE); 436 437 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 438 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); 439 else 440 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); 441 442 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 443 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC); 444 else 445 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC); 446 447 if (info->raster_order) 448 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER); 449 else 450 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER); 451 452 tilcdc_crtc_set_clk(crtc); 453 454 tilcdc_crtc_load_palette(crtc); 455 456 set_scanout(crtc, fb); 457 458 drm_framebuffer_reference(fb); 459 460 crtc->hwmode = crtc->state->adjusted_mode; 461 } 462 463 static void tilcdc_crtc_enable(struct drm_crtc *crtc) 464 { 465 struct drm_device *dev = crtc->dev; 466 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 467 unsigned long flags; 468 469 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 470 mutex_lock(&tilcdc_crtc->enable_lock); 471 if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) { 472 mutex_unlock(&tilcdc_crtc->enable_lock); 473 return; 474 } 475 476 pm_runtime_get_sync(dev->dev); 477 478 reset(crtc); 479 480 tilcdc_crtc_set_mode(crtc); 481 482 tilcdc_crtc_enable_irqs(dev); 483 484 tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE); 485 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, 486 LCDC_PALETTE_LOAD_MODE(DATA_ONLY), 487 LCDC_PALETTE_LOAD_MODE_MASK); 488 489 /* There is no real chance for a race here as the time stamp 490 * is taken before the raster DMA is started. The spin-lock is 491 * taken to have a memory barrier after taking the time-stamp 492 * and to avoid a context switch between taking the stamp and 493 * enabling the raster. 494 */ 495 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); 496 tilcdc_crtc->last_vblank = ktime_get(); 497 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); 498 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 499 500 drm_crtc_vblank_on(crtc); 501 502 tilcdc_crtc->enabled = true; 503 mutex_unlock(&tilcdc_crtc->enable_lock); 504 } 505 506 static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown) 507 { 508 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 509 struct drm_device *dev = crtc->dev; 510 struct tilcdc_drm_private *priv = dev->dev_private; 511 int ret; 512 513 mutex_lock(&tilcdc_crtc->enable_lock); 514 if (shutdown) 515 tilcdc_crtc->shutdown = true; 516 if (!tilcdc_crtc->enabled) { 517 mutex_unlock(&tilcdc_crtc->enable_lock); 518 return; 519 } 520 tilcdc_crtc->frame_done = false; 521 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); 522 523 /* 524 * Wait for framedone irq which will still come before putting 525 * things to sleep.. 526 */ 527 ret = wait_event_timeout(tilcdc_crtc->frame_done_wq, 528 tilcdc_crtc->frame_done, 529 msecs_to_jiffies(500)); 530 if (ret == 0) 531 dev_err(dev->dev, "%s: timeout waiting for framedone\n", 532 __func__); 533 534 drm_crtc_vblank_off(crtc); 535 536 tilcdc_crtc_disable_irqs(dev); 537 538 pm_runtime_put_sync(dev->dev); 539 540 if (tilcdc_crtc->next_fb) { 541 drm_flip_work_queue(&tilcdc_crtc->unref_work, 542 tilcdc_crtc->next_fb); 543 tilcdc_crtc->next_fb = NULL; 544 } 545 546 if (tilcdc_crtc->curr_fb) { 547 drm_flip_work_queue(&tilcdc_crtc->unref_work, 548 tilcdc_crtc->curr_fb); 549 tilcdc_crtc->curr_fb = NULL; 550 } 551 552 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); 553 554 tilcdc_crtc->enabled = false; 555 mutex_unlock(&tilcdc_crtc->enable_lock); 556 } 557 558 static void tilcdc_crtc_disable(struct drm_crtc *crtc) 559 { 560 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 561 tilcdc_crtc_off(crtc, false); 562 } 563 564 void tilcdc_crtc_shutdown(struct drm_crtc *crtc) 565 { 566 tilcdc_crtc_off(crtc, true); 567 } 568 569 static bool tilcdc_crtc_is_on(struct drm_crtc *crtc) 570 { 571 return crtc->state && crtc->state->enable && crtc->state->active; 572 } 573 574 static void tilcdc_crtc_recover_work(struct work_struct *work) 575 { 576 struct tilcdc_crtc *tilcdc_crtc = 577 container_of(work, struct tilcdc_crtc, recover_work); 578 struct drm_crtc *crtc = &tilcdc_crtc->base; 579 580 dev_info(crtc->dev->dev, "%s: Reset CRTC", __func__); 581 582 drm_modeset_lock_crtc(crtc, NULL); 583 584 if (!tilcdc_crtc_is_on(crtc)) 585 goto out; 586 587 tilcdc_crtc_disable(crtc); 588 tilcdc_crtc_enable(crtc); 589 out: 590 drm_modeset_unlock_crtc(crtc); 591 } 592 593 static void tilcdc_crtc_destroy(struct drm_crtc *crtc) 594 { 595 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 596 struct tilcdc_drm_private *priv = crtc->dev->dev_private; 597 598 drm_modeset_lock_crtc(crtc, NULL); 599 tilcdc_crtc_disable(crtc); 600 drm_modeset_unlock_crtc(crtc); 601 602 flush_workqueue(priv->wq); 603 604 of_node_put(crtc->port); 605 drm_crtc_cleanup(crtc); 606 drm_flip_work_cleanup(&tilcdc_crtc->unref_work); 607 } 608 609 int tilcdc_crtc_update_fb(struct drm_crtc *crtc, 610 struct drm_framebuffer *fb, 611 struct drm_pending_vblank_event *event) 612 { 613 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 614 struct drm_device *dev = crtc->dev; 615 616 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 617 618 if (tilcdc_crtc->event) { 619 dev_err(dev->dev, "already pending page flip!\n"); 620 return -EBUSY; 621 } 622 623 drm_framebuffer_reference(fb); 624 625 crtc->primary->fb = fb; 626 tilcdc_crtc->event = event; 627 628 mutex_lock(&tilcdc_crtc->enable_lock); 629 630 if (tilcdc_crtc->enabled) { 631 unsigned long flags; 632 ktime_t next_vblank; 633 s64 tdiff; 634 635 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); 636 637 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, 638 1000000 / crtc->hwmode.vrefresh); 639 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); 640 641 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) 642 tilcdc_crtc->next_fb = fb; 643 else 644 set_scanout(crtc, fb); 645 646 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 647 } 648 649 mutex_unlock(&tilcdc_crtc->enable_lock); 650 651 return 0; 652 } 653 654 static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc, 655 const struct drm_display_mode *mode, 656 struct drm_display_mode *adjusted_mode) 657 { 658 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 659 660 if (!tilcdc_crtc->simulate_vesa_sync) 661 return true; 662 663 /* 664 * tilcdc does not generate VESA-compliant sync but aligns 665 * VS on the second edge of HS instead of first edge. 666 * We use adjusted_mode, to fixup sync by aligning both rising 667 * edges and add HSKEW offset to fix the sync. 668 */ 669 adjusted_mode->hskew = mode->hsync_end - mode->hsync_start; 670 adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW; 671 672 if (mode->flags & DRM_MODE_FLAG_NHSYNC) { 673 adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC; 674 adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC; 675 } else { 676 adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC; 677 adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC; 678 } 679 680 return true; 681 } 682 683 static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc, 684 struct drm_crtc_state *state) 685 { 686 struct drm_display_mode *mode = &state->mode; 687 int ret; 688 689 /* If we are not active we don't care */ 690 if (!state->active) 691 return 0; 692 693 if (state->state->planes[0].ptr != crtc->primary || 694 state->state->planes[0].state == NULL || 695 state->state->planes[0].state->crtc != crtc) { 696 dev_dbg(crtc->dev->dev, "CRTC primary plane must be present"); 697 return -EINVAL; 698 } 699 700 ret = tilcdc_crtc_mode_valid(crtc, mode); 701 if (ret) { 702 dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name); 703 return -EINVAL; 704 } 705 706 return 0; 707 } 708 709 static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc) 710 { 711 return 0; 712 } 713 714 static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc) 715 { 716 } 717 718 static const struct drm_crtc_funcs tilcdc_crtc_funcs = { 719 .destroy = tilcdc_crtc_destroy, 720 .set_config = drm_atomic_helper_set_config, 721 .page_flip = drm_atomic_helper_page_flip, 722 .reset = drm_atomic_helper_crtc_reset, 723 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 724 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 725 .enable_vblank = tilcdc_crtc_enable_vblank, 726 .disable_vblank = tilcdc_crtc_disable_vblank, 727 }; 728 729 static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = { 730 .mode_fixup = tilcdc_crtc_mode_fixup, 731 .enable = tilcdc_crtc_enable, 732 .disable = tilcdc_crtc_disable, 733 .atomic_check = tilcdc_crtc_atomic_check, 734 }; 735 736 int tilcdc_crtc_max_width(struct drm_crtc *crtc) 737 { 738 struct drm_device *dev = crtc->dev; 739 struct tilcdc_drm_private *priv = dev->dev_private; 740 int max_width = 0; 741 742 if (priv->rev == 1) 743 max_width = 1024; 744 else if (priv->rev == 2) 745 max_width = 2048; 746 747 return max_width; 748 } 749 750 int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode) 751 { 752 struct tilcdc_drm_private *priv = crtc->dev->dev_private; 753 unsigned int bandwidth; 754 uint32_t hbp, hfp, hsw, vbp, vfp, vsw; 755 756 /* 757 * check to see if the width is within the range that 758 * the LCD Controller physically supports 759 */ 760 if (mode->hdisplay > tilcdc_crtc_max_width(crtc)) 761 return MODE_VIRTUAL_X; 762 763 /* width must be multiple of 16 */ 764 if (mode->hdisplay & 0xf) 765 return MODE_VIRTUAL_X; 766 767 if (mode->vdisplay > 2048) 768 return MODE_VIRTUAL_Y; 769 770 DBG("Processing mode %dx%d@%d with pixel clock %d", 771 mode->hdisplay, mode->vdisplay, 772 drm_mode_vrefresh(mode), mode->clock); 773 774 hbp = mode->htotal - mode->hsync_end; 775 hfp = mode->hsync_start - mode->hdisplay; 776 hsw = mode->hsync_end - mode->hsync_start; 777 vbp = mode->vtotal - mode->vsync_end; 778 vfp = mode->vsync_start - mode->vdisplay; 779 vsw = mode->vsync_end - mode->vsync_start; 780 781 if ((hbp-1) & ~0x3ff) { 782 DBG("Pruning mode: Horizontal Back Porch out of range"); 783 return MODE_HBLANK_WIDE; 784 } 785 786 if ((hfp-1) & ~0x3ff) { 787 DBG("Pruning mode: Horizontal Front Porch out of range"); 788 return MODE_HBLANK_WIDE; 789 } 790 791 if ((hsw-1) & ~0x3ff) { 792 DBG("Pruning mode: Horizontal Sync Width out of range"); 793 return MODE_HSYNC_WIDE; 794 } 795 796 if (vbp & ~0xff) { 797 DBG("Pruning mode: Vertical Back Porch out of range"); 798 return MODE_VBLANK_WIDE; 799 } 800 801 if (vfp & ~0xff) { 802 DBG("Pruning mode: Vertical Front Porch out of range"); 803 return MODE_VBLANK_WIDE; 804 } 805 806 if ((vsw-1) & ~0x3f) { 807 DBG("Pruning mode: Vertical Sync Width out of range"); 808 return MODE_VSYNC_WIDE; 809 } 810 811 /* 812 * some devices have a maximum allowed pixel clock 813 * configured from the DT 814 */ 815 if (mode->clock > priv->max_pixelclock) { 816 DBG("Pruning mode: pixel clock too high"); 817 return MODE_CLOCK_HIGH; 818 } 819 820 /* 821 * some devices further limit the max horizontal resolution 822 * configured from the DT 823 */ 824 if (mode->hdisplay > priv->max_width) 825 return MODE_BAD_WIDTH; 826 827 /* filter out modes that would require too much memory bandwidth: */ 828 bandwidth = mode->hdisplay * mode->vdisplay * 829 drm_mode_vrefresh(mode); 830 if (bandwidth > priv->max_bandwidth) { 831 DBG("Pruning mode: exceeds defined bandwidth limit"); 832 return MODE_BAD; 833 } 834 835 return MODE_OK; 836 } 837 838 void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc, 839 const struct tilcdc_panel_info *info) 840 { 841 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 842 tilcdc_crtc->info = info; 843 } 844 845 void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc, 846 bool simulate_vesa_sync) 847 { 848 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 849 850 tilcdc_crtc->simulate_vesa_sync = simulate_vesa_sync; 851 } 852 853 void tilcdc_crtc_update_clk(struct drm_crtc *crtc) 854 { 855 struct drm_device *dev = crtc->dev; 856 struct tilcdc_drm_private *priv = dev->dev_private; 857 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 858 859 drm_modeset_lock_crtc(crtc, NULL); 860 if (tilcdc_crtc->lcd_fck_rate != clk_get_rate(priv->clk)) { 861 if (tilcdc_crtc_is_on(crtc)) { 862 pm_runtime_get_sync(dev->dev); 863 tilcdc_crtc_disable(crtc); 864 865 tilcdc_crtc_set_clk(crtc); 866 867 tilcdc_crtc_enable(crtc); 868 pm_runtime_put_sync(dev->dev); 869 } 870 } 871 drm_modeset_unlock_crtc(crtc); 872 } 873 874 #define SYNC_LOST_COUNT_LIMIT 50 875 876 irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc) 877 { 878 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 879 struct drm_device *dev = crtc->dev; 880 struct tilcdc_drm_private *priv = dev->dev_private; 881 uint32_t stat, reg; 882 883 stat = tilcdc_read_irqstatus(dev); 884 tilcdc_clear_irqstatus(dev, stat); 885 886 if (stat & LCDC_END_OF_FRAME0) { 887 unsigned long flags; 888 bool skip_event = false; 889 ktime_t now; 890 891 now = ktime_get(); 892 893 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); 894 895 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); 896 897 tilcdc_crtc->last_vblank = now; 898 899 if (tilcdc_crtc->next_fb) { 900 set_scanout(crtc, tilcdc_crtc->next_fb); 901 tilcdc_crtc->next_fb = NULL; 902 skip_event = true; 903 } 904 905 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 906 907 drm_crtc_handle_vblank(crtc); 908 909 if (!skip_event) { 910 struct drm_pending_vblank_event *event; 911 912 spin_lock_irqsave(&dev->event_lock, flags); 913 914 event = tilcdc_crtc->event; 915 tilcdc_crtc->event = NULL; 916 if (event) 917 drm_crtc_send_vblank_event(crtc, event); 918 919 spin_unlock_irqrestore(&dev->event_lock, flags); 920 } 921 922 if (tilcdc_crtc->frame_intact) 923 tilcdc_crtc->sync_lost_count = 0; 924 else 925 tilcdc_crtc->frame_intact = true; 926 } 927 928 if (stat & LCDC_FIFO_UNDERFLOW) 929 dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underflow", 930 __func__, stat); 931 932 if (stat & LCDC_PL_LOAD_DONE) { 933 complete(&tilcdc_crtc->palette_loaded); 934 if (priv->rev == 1) 935 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, 936 LCDC_V1_PL_INT_ENA); 937 else 938 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, 939 LCDC_V2_PL_INT_ENA); 940 } 941 942 if (stat & LCDC_SYNC_LOST) { 943 dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost", 944 __func__, stat); 945 tilcdc_crtc->frame_intact = false; 946 if (priv->rev == 1) { 947 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG); 948 if (reg & LCDC_RASTER_ENABLE) { 949 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, 950 LCDC_RASTER_ENABLE); 951 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, 952 LCDC_RASTER_ENABLE); 953 } 954 } else { 955 if (tilcdc_crtc->sync_lost_count++ > 956 SYNC_LOST_COUNT_LIMIT) { 957 dev_err(dev->dev, 958 "%s(0x%08x): Sync lost flood detected, recovering", 959 __func__, stat); 960 queue_work(system_wq, 961 &tilcdc_crtc->recover_work); 962 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, 963 LCDC_SYNC_LOST); 964 tilcdc_crtc->sync_lost_count = 0; 965 } 966 } 967 } 968 969 if (stat & LCDC_FRAME_DONE) { 970 tilcdc_crtc->frame_done = true; 971 wake_up(&tilcdc_crtc->frame_done_wq); 972 /* rev 1 lcdc appears to hang if irq is not disbaled here */ 973 if (priv->rev == 1) 974 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, 975 LCDC_V1_FRAME_DONE_INT_ENA); 976 } 977 978 /* For revision 2 only */ 979 if (priv->rev == 2) { 980 /* Indicate to LCDC that the interrupt service routine has 981 * completed, see 13.3.6.1.6 in AM335x TRM. 982 */ 983 tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0); 984 } 985 986 return IRQ_HANDLED; 987 } 988 989 int tilcdc_crtc_create(struct drm_device *dev) 990 { 991 struct tilcdc_drm_private *priv = dev->dev_private; 992 struct tilcdc_crtc *tilcdc_crtc; 993 struct drm_crtc *crtc; 994 int ret; 995 996 tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL); 997 if (!tilcdc_crtc) { 998 dev_err(dev->dev, "allocation failed\n"); 999 return -ENOMEM; 1000 } 1001 1002 init_completion(&tilcdc_crtc->palette_loaded); 1003 tilcdc_crtc->palette_base = dmam_alloc_coherent(dev->dev, 1004 TILCDC_PALETTE_SIZE, 1005 &tilcdc_crtc->palette_dma_handle, 1006 GFP_KERNEL | __GFP_ZERO); 1007 if (!tilcdc_crtc->palette_base) 1008 return -ENOMEM; 1009 *tilcdc_crtc->palette_base = TILCDC_PALETTE_FIRST_ENTRY; 1010 1011 crtc = &tilcdc_crtc->base; 1012 1013 ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary); 1014 if (ret < 0) 1015 goto fail; 1016 1017 mutex_init(&tilcdc_crtc->enable_lock); 1018 1019 init_waitqueue_head(&tilcdc_crtc->frame_done_wq); 1020 1021 drm_flip_work_init(&tilcdc_crtc->unref_work, 1022 "unref", unref_worker); 1023 1024 spin_lock_init(&tilcdc_crtc->irq_lock); 1025 INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work); 1026 1027 ret = drm_crtc_init_with_planes(dev, crtc, 1028 &tilcdc_crtc->primary, 1029 NULL, 1030 &tilcdc_crtc_funcs, 1031 "tilcdc crtc"); 1032 if (ret < 0) 1033 goto fail; 1034 1035 drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs); 1036 1037 if (priv->is_componentized) { 1038 struct device_node *ports = 1039 of_get_child_by_name(dev->dev->of_node, "ports"); 1040 1041 if (ports) { 1042 crtc->port = of_get_child_by_name(ports, "port"); 1043 of_node_put(ports); 1044 } else { 1045 crtc->port = 1046 of_get_child_by_name(dev->dev->of_node, "port"); 1047 } 1048 if (!crtc->port) { /* This should never happen */ 1049 dev_err(dev->dev, "Port node not found in %s\n", 1050 dev->dev->of_node->full_name); 1051 ret = -EINVAL; 1052 goto fail; 1053 } 1054 } 1055 1056 priv->crtc = crtc; 1057 return 0; 1058 1059 fail: 1060 tilcdc_crtc_destroy(crtc); 1061 return ret; 1062 } 1063