1 /* 2 * Copyright (C) 2012 Texas Instruments 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <drm/drm_atomic.h> 19 #include <drm/drm_atomic_helper.h> 20 #include <drm/drm_crtc.h> 21 #include <drm/drm_flip_work.h> 22 #include <drm/drm_plane_helper.h> 23 #include <linux/workqueue.h> 24 #include <linux/completion.h> 25 #include <linux/dma-mapping.h> 26 27 #include "tilcdc_drv.h" 28 #include "tilcdc_regs.h" 29 30 #define TILCDC_VBLANK_SAFETY_THRESHOLD_US 1000 31 #define TILCDC_PALETTE_SIZE 32 32 #define TILCDC_PALETTE_FIRST_ENTRY 0x4000 33 34 struct tilcdc_crtc { 35 struct drm_crtc base; 36 37 struct drm_plane primary; 38 const struct tilcdc_panel_info *info; 39 struct drm_pending_vblank_event *event; 40 struct mutex enable_lock; 41 bool enabled; 42 bool shutdown; 43 wait_queue_head_t frame_done_wq; 44 bool frame_done; 45 spinlock_t irq_lock; 46 47 unsigned int lcd_fck_rate; 48 49 ktime_t last_vblank; 50 51 struct drm_framebuffer *curr_fb; 52 struct drm_framebuffer *next_fb; 53 54 /* for deferred fb unref's: */ 55 struct drm_flip_work unref_work; 56 57 /* Only set if an external encoder is connected */ 58 bool simulate_vesa_sync; 59 60 int sync_lost_count; 61 bool frame_intact; 62 struct work_struct recover_work; 63 64 dma_addr_t palette_dma_handle; 65 u16 *palette_base; 66 struct completion palette_loaded; 67 }; 68 #define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base) 69 70 static void unref_worker(struct drm_flip_work *work, void *val) 71 { 72 struct tilcdc_crtc *tilcdc_crtc = 73 container_of(work, struct tilcdc_crtc, unref_work); 74 struct drm_device *dev = tilcdc_crtc->base.dev; 75 76 mutex_lock(&dev->mode_config.mutex); 77 drm_framebuffer_unreference(val); 78 mutex_unlock(&dev->mode_config.mutex); 79 } 80 81 static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) 82 { 83 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 84 struct drm_device *dev = crtc->dev; 85 struct tilcdc_drm_private *priv = dev->dev_private; 86 struct drm_gem_cma_object *gem; 87 dma_addr_t start, end; 88 u64 dma_base_and_ceiling; 89 90 gem = drm_fb_cma_get_gem_obj(fb, 0); 91 92 start = gem->paddr + fb->offsets[0] + 93 crtc->y * fb->pitches[0] + 94 crtc->x * fb->format->cpp[0]; 95 96 end = start + (crtc->mode.vdisplay * fb->pitches[0]); 97 98 /* Write LCDC_DMA_FB_BASE_ADDR_0_REG and LCDC_DMA_FB_CEILING_ADDR_0_REG 99 * with a single insruction, if available. This should make it more 100 * unlikely that LCDC would fetch the DMA addresses in the middle of 101 * an update. 102 */ 103 if (priv->rev == 1) 104 end -= 1; 105 106 dma_base_and_ceiling = (u64)end << 32 | start; 107 tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling); 108 109 if (tilcdc_crtc->curr_fb) 110 drm_flip_work_queue(&tilcdc_crtc->unref_work, 111 tilcdc_crtc->curr_fb); 112 113 tilcdc_crtc->curr_fb = fb; 114 } 115 116 /* 117 * The driver currently only supports only true color formats. For 118 * true color the palette block is bypassed, but a 32 byte palette 119 * should still be loaded. The first 16-bit entry must be 0x4000 while 120 * all other entries must be zeroed. 121 */ 122 static void tilcdc_crtc_load_palette(struct drm_crtc *crtc) 123 { 124 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 125 struct drm_device *dev = crtc->dev; 126 struct tilcdc_drm_private *priv = dev->dev_private; 127 int ret; 128 129 reinit_completion(&tilcdc_crtc->palette_loaded); 130 131 /* Tell the LCDC where the palette is located. */ 132 tilcdc_write(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, 133 tilcdc_crtc->palette_dma_handle); 134 tilcdc_write(dev, LCDC_DMA_FB_CEILING_ADDR_0_REG, 135 (u32) tilcdc_crtc->palette_dma_handle + 136 TILCDC_PALETTE_SIZE - 1); 137 138 /* Set dma load mode for palette loading only. */ 139 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, 140 LCDC_PALETTE_LOAD_MODE(PALETTE_ONLY), 141 LCDC_PALETTE_LOAD_MODE_MASK); 142 143 /* Enable DMA Palette Loaded Interrupt */ 144 if (priv->rev == 1) 145 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA); 146 else 147 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_PL_INT_ENA); 148 149 /* Enable LCDC DMA and wait for palette to be loaded. */ 150 tilcdc_clear_irqstatus(dev, 0xffffffff); 151 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); 152 153 ret = wait_for_completion_timeout(&tilcdc_crtc->palette_loaded, 154 msecs_to_jiffies(50)); 155 if (ret == 0) 156 dev_err(dev->dev, "%s: Palette loading timeout", __func__); 157 158 /* Disable LCDC DMA and DMA Palette Loaded Interrupt. */ 159 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); 160 if (priv->rev == 1) 161 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_PL_INT_ENA); 162 else 163 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, LCDC_V2_PL_INT_ENA); 164 } 165 166 static void tilcdc_crtc_enable_irqs(struct drm_device *dev) 167 { 168 struct tilcdc_drm_private *priv = dev->dev_private; 169 170 tilcdc_clear_irqstatus(dev, 0xffffffff); 171 172 if (priv->rev == 1) { 173 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, 174 LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA | 175 LCDC_V1_UNDERFLOW_INT_ENA); 176 tilcdc_set(dev, LCDC_DMA_CTRL_REG, 177 LCDC_V1_END_OF_FRAME_INT_ENA); 178 } else { 179 tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG, 180 LCDC_V2_UNDERFLOW_INT_ENA | 181 LCDC_V2_END_OF_FRAME0_INT_ENA | 182 LCDC_FRAME_DONE | LCDC_SYNC_LOST); 183 } 184 } 185 186 static void tilcdc_crtc_disable_irqs(struct drm_device *dev) 187 { 188 struct tilcdc_drm_private *priv = dev->dev_private; 189 190 /* disable irqs that we might have enabled: */ 191 if (priv->rev == 1) { 192 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, 193 LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA | 194 LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA); 195 tilcdc_clear(dev, LCDC_DMA_CTRL_REG, 196 LCDC_V1_END_OF_FRAME_INT_ENA); 197 } else { 198 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, 199 LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA | 200 LCDC_V2_END_OF_FRAME0_INT_ENA | 201 LCDC_FRAME_DONE | LCDC_SYNC_LOST); 202 } 203 } 204 205 static void reset(struct drm_crtc *crtc) 206 { 207 struct drm_device *dev = crtc->dev; 208 struct tilcdc_drm_private *priv = dev->dev_private; 209 210 if (priv->rev != 2) 211 return; 212 213 tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET); 214 usleep_range(250, 1000); 215 tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET); 216 } 217 218 /* 219 * Calculate the percentage difference between the requested pixel clock rate 220 * and the effective rate resulting from calculating the clock divider value. 221 */ 222 static unsigned int tilcdc_pclk_diff(unsigned long rate, 223 unsigned long real_rate) 224 { 225 int r = rate / 100, rr = real_rate / 100; 226 227 return (unsigned int)(abs(((rr - r) * 100) / r)); 228 } 229 230 static void tilcdc_crtc_set_clk(struct drm_crtc *crtc) 231 { 232 struct drm_device *dev = crtc->dev; 233 struct tilcdc_drm_private *priv = dev->dev_private; 234 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 235 unsigned long clk_rate, real_rate, req_rate; 236 unsigned int clkdiv; 237 int ret; 238 239 clkdiv = 2; /* first try using a standard divider of 2 */ 240 241 /* mode.clock is in KHz, set_rate wants parameter in Hz */ 242 req_rate = crtc->mode.clock * 1000; 243 244 ret = clk_set_rate(priv->clk, req_rate * clkdiv); 245 clk_rate = clk_get_rate(priv->clk); 246 if (ret < 0) { 247 /* 248 * If we fail to set the clock rate (some architectures don't 249 * use the common clock framework yet and may not implement 250 * all the clk API calls for every clock), try the next best 251 * thing: adjusting the clock divider, unless clk_get_rate() 252 * failed as well. 253 */ 254 if (!clk_rate) { 255 /* Nothing more we can do. Just bail out. */ 256 dev_err(dev->dev, 257 "failed to set the pixel clock - unable to read current lcdc clock rate\n"); 258 return; 259 } 260 261 clkdiv = DIV_ROUND_CLOSEST(clk_rate, req_rate); 262 263 /* 264 * Emit a warning if the real clock rate resulting from the 265 * calculated divider differs much from the requested rate. 266 * 267 * 5% is an arbitrary value - LCDs are usually quite tolerant 268 * about pixel clock rates. 269 */ 270 real_rate = clkdiv * req_rate; 271 272 if (tilcdc_pclk_diff(clk_rate, real_rate) > 5) { 273 dev_warn(dev->dev, 274 "effective pixel clock rate (%luHz) differs from the calculated rate (%luHz)\n", 275 clk_rate, real_rate); 276 } 277 } 278 279 tilcdc_crtc->lcd_fck_rate = clk_rate; 280 281 DBG("lcd_clk=%u, mode clock=%d, div=%u", 282 tilcdc_crtc->lcd_fck_rate, crtc->mode.clock, clkdiv); 283 284 /* Configure the LCD clock divisor. */ 285 tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(clkdiv) | 286 LCDC_RASTER_MODE); 287 288 if (priv->rev == 2) 289 tilcdc_set(dev, LCDC_CLK_ENABLE_REG, 290 LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN | 291 LCDC_V2_CORE_CLK_EN); 292 } 293 294 static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) 295 { 296 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 297 struct drm_device *dev = crtc->dev; 298 struct tilcdc_drm_private *priv = dev->dev_private; 299 const struct tilcdc_panel_info *info = tilcdc_crtc->info; 300 uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw; 301 struct drm_display_mode *mode = &crtc->state->adjusted_mode; 302 struct drm_framebuffer *fb = crtc->primary->state->fb; 303 304 if (WARN_ON(!info)) 305 return; 306 307 if (WARN_ON(!fb)) 308 return; 309 310 /* Configure the Burst Size and fifo threshold of DMA: */ 311 reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770; 312 switch (info->dma_burst_sz) { 313 case 1: 314 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1); 315 break; 316 case 2: 317 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2); 318 break; 319 case 4: 320 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4); 321 break; 322 case 8: 323 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8); 324 break; 325 case 16: 326 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16); 327 break; 328 default: 329 dev_err(dev->dev, "invalid burst size\n"); 330 return; 331 } 332 reg |= (info->fifo_th << 8); 333 tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg); 334 335 /* Configure timings: */ 336 hbp = mode->htotal - mode->hsync_end; 337 hfp = mode->hsync_start - mode->hdisplay; 338 hsw = mode->hsync_end - mode->hsync_start; 339 vbp = mode->vtotal - mode->vsync_end; 340 vfp = mode->vsync_start - mode->vdisplay; 341 vsw = mode->vsync_end - mode->vsync_start; 342 343 DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u", 344 mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw); 345 346 /* Set AC Bias Period and Number of Transitions per Interrupt: */ 347 reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00; 348 reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) | 349 LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt); 350 351 /* 352 * subtract one from hfp, hbp, hsw because the hardware uses 353 * a value of 0 as 1 354 */ 355 if (priv->rev == 2) { 356 /* clear bits we're going to set */ 357 reg &= ~0x78000033; 358 reg |= ((hfp-1) & 0x300) >> 8; 359 reg |= ((hbp-1) & 0x300) >> 4; 360 reg |= ((hsw-1) & 0x3c0) << 21; 361 } 362 tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg); 363 364 reg = (((mode->hdisplay >> 4) - 1) << 4) | 365 (((hbp-1) & 0xff) << 24) | 366 (((hfp-1) & 0xff) << 16) | 367 (((hsw-1) & 0x3f) << 10); 368 if (priv->rev == 2) 369 reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3; 370 tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg); 371 372 reg = ((mode->vdisplay - 1) & 0x3ff) | 373 ((vbp & 0xff) << 24) | 374 ((vfp & 0xff) << 16) | 375 (((vsw-1) & 0x3f) << 10); 376 tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg); 377 378 /* 379 * be sure to set Bit 10 for the V2 LCDC controller, 380 * otherwise limited to 1024 pixels width, stopping 381 * 1920x1080 being supported. 382 */ 383 if (priv->rev == 2) { 384 if ((mode->vdisplay - 1) & 0x400) { 385 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, 386 LCDC_LPP_B10); 387 } else { 388 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, 389 LCDC_LPP_B10); 390 } 391 } 392 393 /* Configure display type: */ 394 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) & 395 ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE | 396 LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK | 397 0x000ff000 /* Palette Loading Delay bits */); 398 reg |= LCDC_TFT_MODE; /* no monochrome/passive support */ 399 if (info->tft_alt_mode) 400 reg |= LCDC_TFT_ALT_ENABLE; 401 if (priv->rev == 2) { 402 switch (fb->format->format) { 403 case DRM_FORMAT_BGR565: 404 case DRM_FORMAT_RGB565: 405 break; 406 case DRM_FORMAT_XBGR8888: 407 case DRM_FORMAT_XRGB8888: 408 reg |= LCDC_V2_TFT_24BPP_UNPACK; 409 /* fallthrough */ 410 case DRM_FORMAT_BGR888: 411 case DRM_FORMAT_RGB888: 412 reg |= LCDC_V2_TFT_24BPP_MODE; 413 break; 414 default: 415 dev_err(dev->dev, "invalid pixel format\n"); 416 return; 417 } 418 } 419 reg |= info->fdd < 12; 420 tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg); 421 422 if (info->invert_pxl_clk) 423 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK); 424 else 425 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK); 426 427 if (info->sync_ctrl) 428 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL); 429 else 430 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL); 431 432 if (info->sync_edge) 433 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE); 434 else 435 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE); 436 437 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 438 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); 439 else 440 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC); 441 442 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 443 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC); 444 else 445 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC); 446 447 if (info->raster_order) 448 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER); 449 else 450 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER); 451 452 tilcdc_crtc_set_clk(crtc); 453 454 tilcdc_crtc_load_palette(crtc); 455 456 set_scanout(crtc, fb); 457 458 drm_framebuffer_reference(fb); 459 460 crtc->hwmode = crtc->state->adjusted_mode; 461 } 462 463 static void tilcdc_crtc_enable(struct drm_crtc *crtc) 464 { 465 struct drm_device *dev = crtc->dev; 466 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 467 unsigned long flags; 468 469 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 470 mutex_lock(&tilcdc_crtc->enable_lock); 471 if (tilcdc_crtc->enabled || tilcdc_crtc->shutdown) { 472 mutex_unlock(&tilcdc_crtc->enable_lock); 473 return; 474 } 475 476 pm_runtime_get_sync(dev->dev); 477 478 reset(crtc); 479 480 tilcdc_crtc_set_mode(crtc); 481 482 tilcdc_crtc_enable_irqs(dev); 483 484 tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE); 485 tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, 486 LCDC_PALETTE_LOAD_MODE(DATA_ONLY), 487 LCDC_PALETTE_LOAD_MODE_MASK); 488 489 /* There is no real chance for a race here as the time stamp 490 * is taken before the raster DMA is started. The spin-lock is 491 * taken to have a memory barrier after taking the time-stamp 492 * and to avoid a context switch between taking the stamp and 493 * enabling the raster. 494 */ 495 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); 496 tilcdc_crtc->last_vblank = ktime_get(); 497 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); 498 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 499 500 drm_crtc_vblank_on(crtc); 501 502 tilcdc_crtc->enabled = true; 503 mutex_unlock(&tilcdc_crtc->enable_lock); 504 } 505 506 static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown) 507 { 508 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 509 struct drm_device *dev = crtc->dev; 510 struct tilcdc_drm_private *priv = dev->dev_private; 511 int ret; 512 513 mutex_lock(&tilcdc_crtc->enable_lock); 514 if (shutdown) 515 tilcdc_crtc->shutdown = true; 516 if (!tilcdc_crtc->enabled) { 517 mutex_unlock(&tilcdc_crtc->enable_lock); 518 return; 519 } 520 tilcdc_crtc->frame_done = false; 521 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); 522 523 /* 524 * Wait for framedone irq which will still come before putting 525 * things to sleep.. 526 */ 527 ret = wait_event_timeout(tilcdc_crtc->frame_done_wq, 528 tilcdc_crtc->frame_done, 529 msecs_to_jiffies(500)); 530 if (ret == 0) 531 dev_err(dev->dev, "%s: timeout waiting for framedone\n", 532 __func__); 533 534 drm_crtc_vblank_off(crtc); 535 536 tilcdc_crtc_disable_irqs(dev); 537 538 pm_runtime_put_sync(dev->dev); 539 540 if (tilcdc_crtc->next_fb) { 541 drm_flip_work_queue(&tilcdc_crtc->unref_work, 542 tilcdc_crtc->next_fb); 543 tilcdc_crtc->next_fb = NULL; 544 } 545 546 if (tilcdc_crtc->curr_fb) { 547 drm_flip_work_queue(&tilcdc_crtc->unref_work, 548 tilcdc_crtc->curr_fb); 549 tilcdc_crtc->curr_fb = NULL; 550 } 551 552 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); 553 554 tilcdc_crtc->enabled = false; 555 mutex_unlock(&tilcdc_crtc->enable_lock); 556 } 557 558 static void tilcdc_crtc_disable(struct drm_crtc *crtc) 559 { 560 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 561 tilcdc_crtc_off(crtc, false); 562 } 563 564 void tilcdc_crtc_shutdown(struct drm_crtc *crtc) 565 { 566 tilcdc_crtc_off(crtc, true); 567 } 568 569 static bool tilcdc_crtc_is_on(struct drm_crtc *crtc) 570 { 571 return crtc->state && crtc->state->enable && crtc->state->active; 572 } 573 574 static void tilcdc_crtc_recover_work(struct work_struct *work) 575 { 576 struct tilcdc_crtc *tilcdc_crtc = 577 container_of(work, struct tilcdc_crtc, recover_work); 578 struct drm_crtc *crtc = &tilcdc_crtc->base; 579 580 dev_info(crtc->dev->dev, "%s: Reset CRTC", __func__); 581 582 drm_modeset_lock_crtc(crtc, NULL); 583 584 if (!tilcdc_crtc_is_on(crtc)) 585 goto out; 586 587 tilcdc_crtc_disable(crtc); 588 tilcdc_crtc_enable(crtc); 589 out: 590 drm_modeset_unlock_crtc(crtc); 591 } 592 593 static void tilcdc_crtc_destroy(struct drm_crtc *crtc) 594 { 595 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 596 struct tilcdc_drm_private *priv = crtc->dev->dev_private; 597 598 drm_modeset_lock_crtc(crtc, NULL); 599 tilcdc_crtc_disable(crtc); 600 drm_modeset_unlock_crtc(crtc); 601 602 flush_workqueue(priv->wq); 603 604 of_node_put(crtc->port); 605 drm_crtc_cleanup(crtc); 606 drm_flip_work_cleanup(&tilcdc_crtc->unref_work); 607 } 608 609 int tilcdc_crtc_update_fb(struct drm_crtc *crtc, 610 struct drm_framebuffer *fb, 611 struct drm_pending_vblank_event *event) 612 { 613 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 614 struct drm_device *dev = crtc->dev; 615 616 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 617 618 if (tilcdc_crtc->event) { 619 dev_err(dev->dev, "already pending page flip!\n"); 620 return -EBUSY; 621 } 622 623 drm_framebuffer_reference(fb); 624 625 crtc->primary->fb = fb; 626 tilcdc_crtc->event = event; 627 628 mutex_lock(&tilcdc_crtc->enable_lock); 629 630 if (tilcdc_crtc->enabled) { 631 unsigned long flags; 632 ktime_t next_vblank; 633 s64 tdiff; 634 635 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); 636 637 next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, 638 1000000 / crtc->hwmode.vrefresh); 639 tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); 640 641 if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) 642 tilcdc_crtc->next_fb = fb; 643 else 644 set_scanout(crtc, fb); 645 646 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 647 } 648 649 mutex_unlock(&tilcdc_crtc->enable_lock); 650 651 return 0; 652 } 653 654 static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc, 655 const struct drm_display_mode *mode, 656 struct drm_display_mode *adjusted_mode) 657 { 658 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 659 660 if (!tilcdc_crtc->simulate_vesa_sync) 661 return true; 662 663 /* 664 * tilcdc does not generate VESA-compliant sync but aligns 665 * VS on the second edge of HS instead of first edge. 666 * We use adjusted_mode, to fixup sync by aligning both rising 667 * edges and add HSKEW offset to fix the sync. 668 */ 669 adjusted_mode->hskew = mode->hsync_end - mode->hsync_start; 670 adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW; 671 672 if (mode->flags & DRM_MODE_FLAG_NHSYNC) { 673 adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC; 674 adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC; 675 } else { 676 adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC; 677 adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC; 678 } 679 680 return true; 681 } 682 683 static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc, 684 struct drm_crtc_state *state) 685 { 686 struct drm_display_mode *mode = &state->mode; 687 int ret; 688 689 /* If we are not active we don't care */ 690 if (!state->active) 691 return 0; 692 693 if (state->state->planes[0].ptr != crtc->primary || 694 state->state->planes[0].state == NULL || 695 state->state->planes[0].state->crtc != crtc) { 696 dev_dbg(crtc->dev->dev, "CRTC primary plane must be present"); 697 return -EINVAL; 698 } 699 700 ret = tilcdc_crtc_mode_valid(crtc, mode); 701 if (ret) { 702 dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name); 703 return -EINVAL; 704 } 705 706 return 0; 707 } 708 709 static const struct drm_crtc_funcs tilcdc_crtc_funcs = { 710 .destroy = tilcdc_crtc_destroy, 711 .set_config = drm_atomic_helper_set_config, 712 .page_flip = drm_atomic_helper_page_flip, 713 .reset = drm_atomic_helper_crtc_reset, 714 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 715 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 716 }; 717 718 static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = { 719 .mode_fixup = tilcdc_crtc_mode_fixup, 720 .enable = tilcdc_crtc_enable, 721 .disable = tilcdc_crtc_disable, 722 .atomic_check = tilcdc_crtc_atomic_check, 723 }; 724 725 int tilcdc_crtc_max_width(struct drm_crtc *crtc) 726 { 727 struct drm_device *dev = crtc->dev; 728 struct tilcdc_drm_private *priv = dev->dev_private; 729 int max_width = 0; 730 731 if (priv->rev == 1) 732 max_width = 1024; 733 else if (priv->rev == 2) 734 max_width = 2048; 735 736 return max_width; 737 } 738 739 int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode) 740 { 741 struct tilcdc_drm_private *priv = crtc->dev->dev_private; 742 unsigned int bandwidth; 743 uint32_t hbp, hfp, hsw, vbp, vfp, vsw; 744 745 /* 746 * check to see if the width is within the range that 747 * the LCD Controller physically supports 748 */ 749 if (mode->hdisplay > tilcdc_crtc_max_width(crtc)) 750 return MODE_VIRTUAL_X; 751 752 /* width must be multiple of 16 */ 753 if (mode->hdisplay & 0xf) 754 return MODE_VIRTUAL_X; 755 756 if (mode->vdisplay > 2048) 757 return MODE_VIRTUAL_Y; 758 759 DBG("Processing mode %dx%d@%d with pixel clock %d", 760 mode->hdisplay, mode->vdisplay, 761 drm_mode_vrefresh(mode), mode->clock); 762 763 hbp = mode->htotal - mode->hsync_end; 764 hfp = mode->hsync_start - mode->hdisplay; 765 hsw = mode->hsync_end - mode->hsync_start; 766 vbp = mode->vtotal - mode->vsync_end; 767 vfp = mode->vsync_start - mode->vdisplay; 768 vsw = mode->vsync_end - mode->vsync_start; 769 770 if ((hbp-1) & ~0x3ff) { 771 DBG("Pruning mode: Horizontal Back Porch out of range"); 772 return MODE_HBLANK_WIDE; 773 } 774 775 if ((hfp-1) & ~0x3ff) { 776 DBG("Pruning mode: Horizontal Front Porch out of range"); 777 return MODE_HBLANK_WIDE; 778 } 779 780 if ((hsw-1) & ~0x3ff) { 781 DBG("Pruning mode: Horizontal Sync Width out of range"); 782 return MODE_HSYNC_WIDE; 783 } 784 785 if (vbp & ~0xff) { 786 DBG("Pruning mode: Vertical Back Porch out of range"); 787 return MODE_VBLANK_WIDE; 788 } 789 790 if (vfp & ~0xff) { 791 DBG("Pruning mode: Vertical Front Porch out of range"); 792 return MODE_VBLANK_WIDE; 793 } 794 795 if ((vsw-1) & ~0x3f) { 796 DBG("Pruning mode: Vertical Sync Width out of range"); 797 return MODE_VSYNC_WIDE; 798 } 799 800 /* 801 * some devices have a maximum allowed pixel clock 802 * configured from the DT 803 */ 804 if (mode->clock > priv->max_pixelclock) { 805 DBG("Pruning mode: pixel clock too high"); 806 return MODE_CLOCK_HIGH; 807 } 808 809 /* 810 * some devices further limit the max horizontal resolution 811 * configured from the DT 812 */ 813 if (mode->hdisplay > priv->max_width) 814 return MODE_BAD_WIDTH; 815 816 /* filter out modes that would require too much memory bandwidth: */ 817 bandwidth = mode->hdisplay * mode->vdisplay * 818 drm_mode_vrefresh(mode); 819 if (bandwidth > priv->max_bandwidth) { 820 DBG("Pruning mode: exceeds defined bandwidth limit"); 821 return MODE_BAD; 822 } 823 824 return MODE_OK; 825 } 826 827 void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc, 828 const struct tilcdc_panel_info *info) 829 { 830 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 831 tilcdc_crtc->info = info; 832 } 833 834 void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc, 835 bool simulate_vesa_sync) 836 { 837 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 838 839 tilcdc_crtc->simulate_vesa_sync = simulate_vesa_sync; 840 } 841 842 void tilcdc_crtc_update_clk(struct drm_crtc *crtc) 843 { 844 struct drm_device *dev = crtc->dev; 845 struct tilcdc_drm_private *priv = dev->dev_private; 846 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 847 848 drm_modeset_lock_crtc(crtc, NULL); 849 if (tilcdc_crtc->lcd_fck_rate != clk_get_rate(priv->clk)) { 850 if (tilcdc_crtc_is_on(crtc)) { 851 pm_runtime_get_sync(dev->dev); 852 tilcdc_crtc_disable(crtc); 853 854 tilcdc_crtc_set_clk(crtc); 855 856 tilcdc_crtc_enable(crtc); 857 pm_runtime_put_sync(dev->dev); 858 } 859 } 860 drm_modeset_unlock_crtc(crtc); 861 } 862 863 #define SYNC_LOST_COUNT_LIMIT 50 864 865 irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc) 866 { 867 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); 868 struct drm_device *dev = crtc->dev; 869 struct tilcdc_drm_private *priv = dev->dev_private; 870 uint32_t stat, reg; 871 872 stat = tilcdc_read_irqstatus(dev); 873 tilcdc_clear_irqstatus(dev, stat); 874 875 if (stat & LCDC_END_OF_FRAME0) { 876 unsigned long flags; 877 bool skip_event = false; 878 ktime_t now; 879 880 now = ktime_get(); 881 882 drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); 883 884 spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); 885 886 tilcdc_crtc->last_vblank = now; 887 888 if (tilcdc_crtc->next_fb) { 889 set_scanout(crtc, tilcdc_crtc->next_fb); 890 tilcdc_crtc->next_fb = NULL; 891 skip_event = true; 892 } 893 894 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 895 896 drm_crtc_handle_vblank(crtc); 897 898 if (!skip_event) { 899 struct drm_pending_vblank_event *event; 900 901 spin_lock_irqsave(&dev->event_lock, flags); 902 903 event = tilcdc_crtc->event; 904 tilcdc_crtc->event = NULL; 905 if (event) 906 drm_crtc_send_vblank_event(crtc, event); 907 908 spin_unlock_irqrestore(&dev->event_lock, flags); 909 } 910 911 if (tilcdc_crtc->frame_intact) 912 tilcdc_crtc->sync_lost_count = 0; 913 else 914 tilcdc_crtc->frame_intact = true; 915 } 916 917 if (stat & LCDC_FIFO_UNDERFLOW) 918 dev_err_ratelimited(dev->dev, "%s(0x%08x): FIFO underflow", 919 __func__, stat); 920 921 if (stat & LCDC_PL_LOAD_DONE) { 922 complete(&tilcdc_crtc->palette_loaded); 923 if (priv->rev == 1) 924 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, 925 LCDC_V1_PL_INT_ENA); 926 else 927 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, 928 LCDC_V2_PL_INT_ENA); 929 } 930 931 if (stat & LCDC_SYNC_LOST) { 932 dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost", 933 __func__, stat); 934 tilcdc_crtc->frame_intact = false; 935 if (priv->rev == 1) { 936 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG); 937 if (reg & LCDC_RASTER_ENABLE) { 938 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, 939 LCDC_RASTER_ENABLE); 940 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, 941 LCDC_RASTER_ENABLE); 942 } 943 } else { 944 if (tilcdc_crtc->sync_lost_count++ > 945 SYNC_LOST_COUNT_LIMIT) { 946 dev_err(dev->dev, 947 "%s(0x%08x): Sync lost flood detected, recovering", 948 __func__, stat); 949 queue_work(system_wq, 950 &tilcdc_crtc->recover_work); 951 tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG, 952 LCDC_SYNC_LOST); 953 tilcdc_crtc->sync_lost_count = 0; 954 } 955 } 956 } 957 958 if (stat & LCDC_FRAME_DONE) { 959 tilcdc_crtc->frame_done = true; 960 wake_up(&tilcdc_crtc->frame_done_wq); 961 /* rev 1 lcdc appears to hang if irq is not disbaled here */ 962 if (priv->rev == 1) 963 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, 964 LCDC_V1_FRAME_DONE_INT_ENA); 965 } 966 967 /* For revision 2 only */ 968 if (priv->rev == 2) { 969 /* Indicate to LCDC that the interrupt service routine has 970 * completed, see 13.3.6.1.6 in AM335x TRM. 971 */ 972 tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0); 973 } 974 975 return IRQ_HANDLED; 976 } 977 978 int tilcdc_crtc_create(struct drm_device *dev) 979 { 980 struct tilcdc_drm_private *priv = dev->dev_private; 981 struct tilcdc_crtc *tilcdc_crtc; 982 struct drm_crtc *crtc; 983 int ret; 984 985 tilcdc_crtc = devm_kzalloc(dev->dev, sizeof(*tilcdc_crtc), GFP_KERNEL); 986 if (!tilcdc_crtc) { 987 dev_err(dev->dev, "allocation failed\n"); 988 return -ENOMEM; 989 } 990 991 init_completion(&tilcdc_crtc->palette_loaded); 992 tilcdc_crtc->palette_base = dmam_alloc_coherent(dev->dev, 993 TILCDC_PALETTE_SIZE, 994 &tilcdc_crtc->palette_dma_handle, 995 GFP_KERNEL | __GFP_ZERO); 996 if (!tilcdc_crtc->palette_base) 997 return -ENOMEM; 998 *tilcdc_crtc->palette_base = TILCDC_PALETTE_FIRST_ENTRY; 999 1000 crtc = &tilcdc_crtc->base; 1001 1002 ret = tilcdc_plane_init(dev, &tilcdc_crtc->primary); 1003 if (ret < 0) 1004 goto fail; 1005 1006 mutex_init(&tilcdc_crtc->enable_lock); 1007 1008 init_waitqueue_head(&tilcdc_crtc->frame_done_wq); 1009 1010 drm_flip_work_init(&tilcdc_crtc->unref_work, 1011 "unref", unref_worker); 1012 1013 spin_lock_init(&tilcdc_crtc->irq_lock); 1014 INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work); 1015 1016 ret = drm_crtc_init_with_planes(dev, crtc, 1017 &tilcdc_crtc->primary, 1018 NULL, 1019 &tilcdc_crtc_funcs, 1020 "tilcdc crtc"); 1021 if (ret < 0) 1022 goto fail; 1023 1024 drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs); 1025 1026 if (priv->is_componentized) { 1027 struct device_node *ports = 1028 of_get_child_by_name(dev->dev->of_node, "ports"); 1029 1030 if (ports) { 1031 crtc->port = of_get_child_by_name(ports, "port"); 1032 of_node_put(ports); 1033 } else { 1034 crtc->port = 1035 of_get_child_by_name(dev->dev->of_node, "port"); 1036 } 1037 if (!crtc->port) { /* This should never happen */ 1038 dev_err(dev->dev, "Port node not found in %s\n", 1039 dev->dev->of_node->full_name); 1040 ret = -EINVAL; 1041 goto fail; 1042 } 1043 } 1044 1045 priv->crtc = crtc; 1046 return 0; 1047 1048 fail: 1049 tilcdc_crtc_destroy(crtc); 1050 return ret; 1051 } 1052