1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Red Hat 4 * Author: Rob Clark <robdclark@gmail.com> 5 */ 6 7 #include <drm/drm_crtc.h> 8 #include <drm/drm_flip_work.h> 9 #include <drm/drm_mode.h> 10 #include <drm/drm_probe_helper.h> 11 12 #include "mdp4_kms.h" 13 14 struct mdp4_crtc { 15 struct drm_crtc base; 16 char name[8]; 17 int id; 18 int ovlp; 19 enum mdp4_dma dma; 20 bool enabled; 21 22 /* which mixer/encoder we route output to: */ 23 int mixer; 24 25 struct { 26 spinlock_t lock; 27 bool stale; 28 uint32_t width, height; 29 uint32_t x, y; 30 31 /* next cursor to scan-out: */ 32 uint32_t next_iova; 33 struct drm_gem_object *next_bo; 34 35 /* current cursor being scanned out: */ 36 struct drm_gem_object *scanout_bo; 37 } cursor; 38 39 40 /* if there is a pending flip, these will be non-null: */ 41 struct drm_pending_vblank_event *event; 42 43 /* Bits have been flushed at the last commit, 44 * used to decide if a vsync has happened since last commit. 45 */ 46 u32 flushed_mask; 47 48 #define PENDING_CURSOR 0x1 49 #define PENDING_FLIP 0x2 50 atomic_t pending; 51 52 /* for unref'ing cursor bo's after scanout completes: */ 53 struct drm_flip_work unref_cursor_work; 54 55 struct mdp_irq vblank; 56 struct mdp_irq err; 57 }; 58 #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base) 59 60 static struct mdp4_kms *get_kms(struct drm_crtc *crtc) 61 { 62 struct msm_drm_private *priv = crtc->dev->dev_private; 63 return to_mdp4_kms(to_mdp_kms(priv->kms)); 64 } 65 66 static void request_pending(struct drm_crtc *crtc, uint32_t pending) 67 { 68 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 69 70 atomic_or(pending, &mdp4_crtc->pending); 71 mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank); 72 } 73 74 static void crtc_flush(struct drm_crtc *crtc) 75 { 76 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 77 struct mdp4_kms *mdp4_kms = get_kms(crtc); 78 struct drm_plane *plane; 79 uint32_t flush = 0; 80 81 drm_atomic_crtc_for_each_plane(plane, crtc) { 82 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); 83 flush |= pipe2flush(pipe_id); 84 } 85 86 flush |= ovlp2flush(mdp4_crtc->ovlp); 87 88 DBG("%s: flush=%08x", mdp4_crtc->name, flush); 89 90 mdp4_crtc->flushed_mask = flush; 91 92 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); 93 } 94 95 /* if file!=NULL, this is preclose potential cancel-flip path */ 96 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) 97 { 98 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 99 struct drm_device *dev = crtc->dev; 100 struct drm_pending_vblank_event *event; 101 unsigned long flags; 102 103 spin_lock_irqsave(&dev->event_lock, flags); 104 event = mdp4_crtc->event; 105 if (event) { 106 mdp4_crtc->event = NULL; 107 DBG("%s: send event: %p", mdp4_crtc->name, event); 108 drm_crtc_send_vblank_event(crtc, event); 109 } 110 spin_unlock_irqrestore(&dev->event_lock, flags); 111 } 112 113 static void unref_cursor_worker(struct drm_flip_work *work, void *val) 114 { 115 struct mdp4_crtc *mdp4_crtc = 116 container_of(work, struct mdp4_crtc, unref_cursor_work); 117 struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base); 118 struct msm_kms *kms = &mdp4_kms->base.base; 119 120 msm_gem_unpin_iova(val, kms->aspace); 121 drm_gem_object_put_unlocked(val); 122 } 123 124 static void mdp4_crtc_destroy(struct drm_crtc *crtc) 125 { 126 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 127 128 drm_crtc_cleanup(crtc); 129 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work); 130 131 kfree(mdp4_crtc); 132 } 133 134 /* statically (for now) map planes to mixer stage (z-order): */ 135 static const int idxs[] = { 136 [VG1] = 1, 137 [VG2] = 2, 138 [RGB1] = 0, 139 [RGB2] = 0, 140 [RGB3] = 0, 141 [VG3] = 3, 142 [VG4] = 4, 143 144 }; 145 146 /* setup mixer config, for which we need to consider all crtc's and 147 * the planes attached to them 148 * 149 * TODO may possibly need some extra locking here 150 */ 151 static void setup_mixer(struct mdp4_kms *mdp4_kms) 152 { 153 struct drm_mode_config *config = &mdp4_kms->dev->mode_config; 154 struct drm_crtc *crtc; 155 uint32_t mixer_cfg = 0; 156 static const enum mdp_mixer_stage_id stages[] = { 157 STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, 158 }; 159 160 list_for_each_entry(crtc, &config->crtc_list, head) { 161 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 162 struct drm_plane *plane; 163 164 drm_atomic_crtc_for_each_plane(plane, crtc) { 165 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); 166 int idx = idxs[pipe_id]; 167 mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer, 168 pipe_id, stages[idx]); 169 } 170 } 171 172 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg); 173 } 174 175 static void blend_setup(struct drm_crtc *crtc) 176 { 177 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 178 struct mdp4_kms *mdp4_kms = get_kms(crtc); 179 struct drm_plane *plane; 180 int i, ovlp = mdp4_crtc->ovlp; 181 bool alpha[4]= { false, false, false, false }; 182 183 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); 184 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); 185 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0); 186 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0); 187 188 drm_atomic_crtc_for_each_plane(plane, crtc) { 189 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); 190 int idx = idxs[pipe_id]; 191 if (idx > 0) { 192 const struct mdp_format *format = 193 to_mdp_format(msm_framebuffer_format(plane->state->fb)); 194 alpha[idx-1] = format->alpha_enable; 195 } 196 } 197 198 for (i = 0; i < 4; i++) { 199 uint32_t op; 200 201 if (alpha[i]) { 202 op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) | 203 MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) | 204 MDP4_OVLP_STAGE_OP_BG_INV_ALPHA; 205 } else { 206 op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) | 207 MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST); 208 } 209 210 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff); 211 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00); 212 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op); 213 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1); 214 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0); 215 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0); 216 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0); 217 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); 218 } 219 220 setup_mixer(mdp4_kms); 221 } 222 223 static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc) 224 { 225 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 226 struct mdp4_kms *mdp4_kms = get_kms(crtc); 227 enum mdp4_dma dma = mdp4_crtc->dma; 228 int ovlp = mdp4_crtc->ovlp; 229 struct drm_display_mode *mode; 230 231 if (WARN_ON(!crtc->state)) 232 return; 233 234 mode = &crtc->state->adjusted_mode; 235 236 DBG("%s: set mode: " DRM_MODE_FMT, 237 mdp4_crtc->name, DRM_MODE_ARG(mode)); 238 239 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), 240 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | 241 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); 242 243 /* take data from pipe: */ 244 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0); 245 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0); 246 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma), 247 MDP4_DMA_DST_SIZE_WIDTH(0) | 248 MDP4_DMA_DST_SIZE_HEIGHT(0)); 249 250 mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0); 251 mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp), 252 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) | 253 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay)); 254 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0); 255 256 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); 257 258 if (dma == DMA_E) { 259 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000); 260 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); 261 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); 262 } 263 } 264 265 static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc, 266 struct drm_crtc_state *old_state) 267 { 268 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 269 struct mdp4_kms *mdp4_kms = get_kms(crtc); 270 271 DBG("%s", mdp4_crtc->name); 272 273 if (WARN_ON(!mdp4_crtc->enabled)) 274 return; 275 276 /* Disable/save vblank irq handling before power is disabled */ 277 drm_crtc_vblank_off(crtc); 278 279 mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); 280 mdp4_disable(mdp4_kms); 281 282 mdp4_crtc->enabled = false; 283 } 284 285 static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc, 286 struct drm_crtc_state *old_state) 287 { 288 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 289 struct mdp4_kms *mdp4_kms = get_kms(crtc); 290 291 DBG("%s", mdp4_crtc->name); 292 293 if (WARN_ON(mdp4_crtc->enabled)) 294 return; 295 296 mdp4_enable(mdp4_kms); 297 298 /* Restore vblank irq handling after power is enabled */ 299 drm_crtc_vblank_on(crtc); 300 301 mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err); 302 303 crtc_flush(crtc); 304 305 mdp4_crtc->enabled = true; 306 } 307 308 static int mdp4_crtc_atomic_check(struct drm_crtc *crtc, 309 struct drm_crtc_state *state) 310 { 311 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 312 DBG("%s: check", mdp4_crtc->name); 313 // TODO anything else to check? 314 return 0; 315 } 316 317 static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc, 318 struct drm_crtc_state *old_crtc_state) 319 { 320 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 321 DBG("%s: begin", mdp4_crtc->name); 322 } 323 324 static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc, 325 struct drm_crtc_state *old_crtc_state) 326 { 327 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 328 struct drm_device *dev = crtc->dev; 329 unsigned long flags; 330 331 DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event); 332 333 WARN_ON(mdp4_crtc->event); 334 335 spin_lock_irqsave(&dev->event_lock, flags); 336 mdp4_crtc->event = crtc->state->event; 337 crtc->state->event = NULL; 338 spin_unlock_irqrestore(&dev->event_lock, flags); 339 340 blend_setup(crtc); 341 crtc_flush(crtc); 342 request_pending(crtc, PENDING_FLIP); 343 } 344 345 #define CURSOR_WIDTH 64 346 #define CURSOR_HEIGHT 64 347 348 /* called from IRQ to update cursor related registers (if needed). The 349 * cursor registers, other than x/y position, appear not to be double 350 * buffered, and changing them other than from vblank seems to trigger 351 * underflow. 352 */ 353 static void update_cursor(struct drm_crtc *crtc) 354 { 355 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 356 struct mdp4_kms *mdp4_kms = get_kms(crtc); 357 struct msm_kms *kms = &mdp4_kms->base.base; 358 enum mdp4_dma dma = mdp4_crtc->dma; 359 unsigned long flags; 360 361 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); 362 if (mdp4_crtc->cursor.stale) { 363 struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; 364 struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; 365 uint64_t iova = mdp4_crtc->cursor.next_iova; 366 367 if (next_bo) { 368 /* take a obj ref + iova ref when we start scanning out: */ 369 drm_gem_object_get(next_bo); 370 msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova); 371 372 /* enable cursor: */ 373 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), 374 MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) | 375 MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height)); 376 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova); 377 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), 378 MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) | 379 MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); 380 } else { 381 /* disable cursor: */ 382 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 383 mdp4_kms->blank_cursor_iova); 384 } 385 386 /* and drop the iova ref + obj rev when done scanning out: */ 387 if (prev_bo) 388 drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo); 389 390 mdp4_crtc->cursor.scanout_bo = next_bo; 391 mdp4_crtc->cursor.stale = false; 392 } 393 394 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), 395 MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) | 396 MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y)); 397 398 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); 399 } 400 401 static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, 402 struct drm_file *file_priv, uint32_t handle, 403 uint32_t width, uint32_t height) 404 { 405 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 406 struct mdp4_kms *mdp4_kms = get_kms(crtc); 407 struct msm_kms *kms = &mdp4_kms->base.base; 408 struct drm_device *dev = crtc->dev; 409 struct drm_gem_object *cursor_bo, *old_bo; 410 unsigned long flags; 411 uint64_t iova; 412 int ret; 413 414 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { 415 DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height); 416 return -EINVAL; 417 } 418 419 if (handle) { 420 cursor_bo = drm_gem_object_lookup(file_priv, handle); 421 if (!cursor_bo) 422 return -ENOENT; 423 } else { 424 cursor_bo = NULL; 425 } 426 427 if (cursor_bo) { 428 ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova); 429 if (ret) 430 goto fail; 431 } else { 432 iova = 0; 433 } 434 435 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); 436 old_bo = mdp4_crtc->cursor.next_bo; 437 mdp4_crtc->cursor.next_bo = cursor_bo; 438 mdp4_crtc->cursor.next_iova = iova; 439 mdp4_crtc->cursor.width = width; 440 mdp4_crtc->cursor.height = height; 441 mdp4_crtc->cursor.stale = true; 442 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); 443 444 if (old_bo) { 445 /* drop our previous reference: */ 446 drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo); 447 } 448 449 request_pending(crtc, PENDING_CURSOR); 450 451 return 0; 452 453 fail: 454 drm_gem_object_put_unlocked(cursor_bo); 455 return ret; 456 } 457 458 static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 459 { 460 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 461 unsigned long flags; 462 463 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); 464 mdp4_crtc->cursor.x = x; 465 mdp4_crtc->cursor.y = y; 466 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); 467 468 crtc_flush(crtc); 469 request_pending(crtc, PENDING_CURSOR); 470 471 return 0; 472 } 473 474 static const struct drm_crtc_funcs mdp4_crtc_funcs = { 475 .set_config = drm_atomic_helper_set_config, 476 .destroy = mdp4_crtc_destroy, 477 .page_flip = drm_atomic_helper_page_flip, 478 .cursor_set = mdp4_crtc_cursor_set, 479 .cursor_move = mdp4_crtc_cursor_move, 480 .reset = drm_atomic_helper_crtc_reset, 481 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 482 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 483 }; 484 485 static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { 486 .mode_set_nofb = mdp4_crtc_mode_set_nofb, 487 .atomic_check = mdp4_crtc_atomic_check, 488 .atomic_begin = mdp4_crtc_atomic_begin, 489 .atomic_flush = mdp4_crtc_atomic_flush, 490 .atomic_enable = mdp4_crtc_atomic_enable, 491 .atomic_disable = mdp4_crtc_atomic_disable, 492 }; 493 494 static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) 495 { 496 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank); 497 struct drm_crtc *crtc = &mdp4_crtc->base; 498 struct msm_drm_private *priv = crtc->dev->dev_private; 499 unsigned pending; 500 501 mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank); 502 503 pending = atomic_xchg(&mdp4_crtc->pending, 0); 504 505 if (pending & PENDING_FLIP) { 506 complete_flip(crtc, NULL); 507 } 508 509 if (pending & PENDING_CURSOR) { 510 update_cursor(crtc); 511 drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq); 512 } 513 } 514 515 static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) 516 { 517 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err); 518 struct drm_crtc *crtc = &mdp4_crtc->base; 519 DBG("%s: error: %08x", mdp4_crtc->name, irqstatus); 520 crtc_flush(crtc); 521 } 522 523 static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc) 524 { 525 struct drm_device *dev = crtc->dev; 526 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 527 struct mdp4_kms *mdp4_kms = get_kms(crtc); 528 int ret; 529 530 ret = drm_crtc_vblank_get(crtc); 531 if (ret) 532 return; 533 534 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue, 535 !(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) & 536 mdp4_crtc->flushed_mask), 537 msecs_to_jiffies(50)); 538 if (ret <= 0) 539 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id); 540 541 mdp4_crtc->flushed_mask = 0; 542 543 drm_crtc_vblank_put(crtc); 544 } 545 546 uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc) 547 { 548 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 549 return mdp4_crtc->vblank.irqmask; 550 } 551 552 /* set dma config, ie. the format the encoder wants. */ 553 void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config) 554 { 555 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 556 struct mdp4_kms *mdp4_kms = get_kms(crtc); 557 558 mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config); 559 } 560 561 /* set interface for routing crtc->encoder: */ 562 void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer) 563 { 564 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 565 struct mdp4_kms *mdp4_kms = get_kms(crtc); 566 uint32_t intf_sel; 567 568 intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL); 569 570 switch (mdp4_crtc->dma) { 571 case DMA_P: 572 intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK; 573 intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf); 574 break; 575 case DMA_S: 576 intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK; 577 intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf); 578 break; 579 case DMA_E: 580 intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK; 581 intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf); 582 break; 583 } 584 585 if (intf == INTF_DSI_VIDEO) { 586 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD; 587 intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO; 588 } else if (intf == INTF_DSI_CMD) { 589 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO; 590 intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD; 591 } 592 593 mdp4_crtc->mixer = mixer; 594 595 blend_setup(crtc); 596 597 DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel); 598 599 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel); 600 } 601 602 void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc) 603 { 604 /* wait_for_flush_done is the only case for now. 605 * Later we will have command mode CRTC to wait for 606 * other event. 607 */ 608 mdp4_crtc_wait_for_flush_done(crtc); 609 } 610 611 static const char *dma_names[] = { 612 "DMA_P", "DMA_S", "DMA_E", 613 }; 614 615 /* initialize crtc */ 616 struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, 617 struct drm_plane *plane, int id, int ovlp_id, 618 enum mdp4_dma dma_id) 619 { 620 struct drm_crtc *crtc = NULL; 621 struct mdp4_crtc *mdp4_crtc; 622 623 mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL); 624 if (!mdp4_crtc) 625 return ERR_PTR(-ENOMEM); 626 627 crtc = &mdp4_crtc->base; 628 629 mdp4_crtc->id = id; 630 631 mdp4_crtc->ovlp = ovlp_id; 632 mdp4_crtc->dma = dma_id; 633 634 mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma); 635 mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq; 636 637 mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma); 638 mdp4_crtc->err.irq = mdp4_crtc_err_irq; 639 640 snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d", 641 dma_names[dma_id], ovlp_id); 642 643 spin_lock_init(&mdp4_crtc->cursor.lock); 644 645 drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 646 "unref cursor", unref_cursor_worker); 647 648 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs, 649 NULL); 650 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); 651 652 return crtc; 653 } 654