1 /* 2 * Copyright (C) 2015 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 /** 10 * DOC: VC4 CRTC module 11 * 12 * In VC4, the Pixel Valve is what most closely corresponds to the 13 * DRM's concept of a CRTC. The PV generates video timings from the 14 * output's clock plus its configuration. It pulls scaled pixels from 15 * the HVS at that timing, and feeds it to the encoder. 16 * 17 * However, the DRM CRTC also collects the configuration of all the 18 * DRM planes attached to it. As a result, this file also manages 19 * setup of the VC4 HVS's display elements on the CRTC. 20 * 21 * The 2835 has 3 different pixel valves. pv0 in the audio power 22 * domain feeds DSI0 or DPI, while pv1 feeds DS1 or SMI. pv2 in the 23 * image domain can feed either HDMI or the SDTV controller. The 24 * pixel valve chooses from the CPRMAN clocks (HSM for HDMI, VEC for 25 * SDTV, etc.) according to which output type is chosen in the mux. 26 * 27 * For power management, the pixel valve's registers are all clocked 28 * by the AXI clock, while the timings and FIFOs make use of the 29 * output-specific clock. Since the encoders also directly consume 30 * the CPRMAN clocks, and know what timings they need, they are the 31 * ones that set the clock. 32 */ 33 34 #include "drm_atomic.h" 35 #include "drm_atomic_helper.h" 36 #include "drm_crtc_helper.h" 37 #include "linux/clk.h" 38 #include "drm_fb_cma_helper.h" 39 #include "linux/component.h" 40 #include "linux/of_device.h" 41 #include "vc4_drv.h" 42 #include "vc4_regs.h" 43 44 struct vc4_crtc { 45 struct drm_crtc base; 46 const struct vc4_crtc_data *data; 47 void __iomem *regs; 48 49 /* Which HVS channel we're using for our CRTC. */ 50 int channel; 51 52 struct drm_pending_vblank_event *event; 53 }; 54 55 struct vc4_crtc_state { 56 struct drm_crtc_state base; 57 /* Dlist area for this CRTC configuration. */ 58 struct drm_mm_node mm; 59 }; 60 61 static inline struct vc4_crtc * 62 to_vc4_crtc(struct drm_crtc *crtc) 63 { 64 return (struct vc4_crtc *)crtc; 65 } 66 67 static inline struct vc4_crtc_state * 68 to_vc4_crtc_state(struct drm_crtc_state *crtc_state) 69 { 70 return (struct vc4_crtc_state *)crtc_state; 71 } 72 73 struct vc4_crtc_data { 74 /* Which channel of the HVS this pixelvalve sources from. */ 75 int hvs_channel; 76 77 enum vc4_encoder_type encoder0_type; 78 enum vc4_encoder_type encoder1_type; 79 }; 80 81 #define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset)) 82 #define CRTC_READ(offset) readl(vc4_crtc->regs + (offset)) 83 84 #define CRTC_REG(reg) { reg, #reg } 85 static const struct { 86 u32 reg; 87 const char *name; 88 } crtc_regs[] = { 89 CRTC_REG(PV_CONTROL), 90 CRTC_REG(PV_V_CONTROL), 91 CRTC_REG(PV_VSYNCD_EVEN), 92 CRTC_REG(PV_HORZA), 93 CRTC_REG(PV_HORZB), 94 CRTC_REG(PV_VERTA), 95 CRTC_REG(PV_VERTB), 96 CRTC_REG(PV_VERTA_EVEN), 97 CRTC_REG(PV_VERTB_EVEN), 98 CRTC_REG(PV_INTEN), 99 CRTC_REG(PV_INTSTAT), 100 CRTC_REG(PV_STAT), 101 CRTC_REG(PV_HACT_ACT), 102 }; 103 104 static void vc4_crtc_dump_regs(struct vc4_crtc *vc4_crtc) 105 { 106 int i; 107 108 for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) { 109 DRM_INFO("0x%04x (%s): 0x%08x\n", 110 crtc_regs[i].reg, crtc_regs[i].name, 111 CRTC_READ(crtc_regs[i].reg)); 112 } 113 } 114 115 #ifdef CONFIG_DEBUG_FS 116 int vc4_crtc_debugfs_regs(struct seq_file *m, void *unused) 117 { 118 struct drm_info_node *node = (struct drm_info_node *)m->private; 119 struct drm_device *dev = node->minor->dev; 120 int crtc_index = (uintptr_t)node->info_ent->data; 121 struct drm_crtc *crtc; 122 struct vc4_crtc *vc4_crtc; 123 int i; 124 125 i = 0; 126 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 127 if (i == crtc_index) 128 break; 129 i++; 130 } 131 if (!crtc) 132 return 0; 133 vc4_crtc = to_vc4_crtc(crtc); 134 135 for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) { 136 seq_printf(m, "%s (0x%04x): 0x%08x\n", 137 crtc_regs[i].name, crtc_regs[i].reg, 138 CRTC_READ(crtc_regs[i].reg)); 139 } 140 141 return 0; 142 } 143 #endif 144 145 static void vc4_crtc_destroy(struct drm_crtc *crtc) 146 { 147 drm_crtc_cleanup(crtc); 148 } 149 150 static u32 vc4_get_fifo_full_level(u32 format) 151 { 152 static const u32 fifo_len_bytes = 64; 153 static const u32 hvs_latency_pix = 6; 154 155 switch (format) { 156 case PV_CONTROL_FORMAT_DSIV_16: 157 case PV_CONTROL_FORMAT_DSIC_16: 158 return fifo_len_bytes - 2 * hvs_latency_pix; 159 case PV_CONTROL_FORMAT_DSIV_18: 160 return fifo_len_bytes - 14; 161 case PV_CONTROL_FORMAT_24: 162 case PV_CONTROL_FORMAT_DSIV_24: 163 default: 164 return fifo_len_bytes - 3 * hvs_latency_pix; 165 } 166 } 167 168 /* 169 * Returns the clock select bit for the connector attached to the 170 * CRTC. 171 */ 172 static int vc4_get_clock_select(struct drm_crtc *crtc) 173 { 174 struct drm_connector *connector; 175 176 drm_for_each_connector(connector, crtc->dev) { 177 if (connector->state->crtc == crtc) { 178 struct drm_encoder *encoder = connector->encoder; 179 struct vc4_encoder *vc4_encoder = 180 to_vc4_encoder(encoder); 181 182 return vc4_encoder->clock_select; 183 } 184 } 185 186 return -1; 187 } 188 189 static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc) 190 { 191 struct drm_device *dev = crtc->dev; 192 struct vc4_dev *vc4 = to_vc4_dev(dev); 193 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 194 struct drm_crtc_state *state = crtc->state; 195 struct drm_display_mode *mode = &state->adjusted_mode; 196 bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE; 197 u32 vactive = (mode->vdisplay >> (interlace ? 1 : 0)); 198 u32 format = PV_CONTROL_FORMAT_24; 199 bool debug_dump_regs = false; 200 int clock_select = vc4_get_clock_select(crtc); 201 202 if (debug_dump_regs) { 203 DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc)); 204 vc4_crtc_dump_regs(vc4_crtc); 205 } 206 207 /* Reset the PV fifo. */ 208 CRTC_WRITE(PV_CONTROL, 0); 209 CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR | PV_CONTROL_EN); 210 CRTC_WRITE(PV_CONTROL, 0); 211 212 CRTC_WRITE(PV_HORZA, 213 VC4_SET_FIELD(mode->htotal - mode->hsync_end, 214 PV_HORZA_HBP) | 215 VC4_SET_FIELD(mode->hsync_end - mode->hsync_start, 216 PV_HORZA_HSYNC)); 217 CRTC_WRITE(PV_HORZB, 218 VC4_SET_FIELD(mode->hsync_start - mode->hdisplay, 219 PV_HORZB_HFP) | 220 VC4_SET_FIELD(mode->hdisplay, PV_HORZB_HACTIVE)); 221 222 CRTC_WRITE(PV_VERTA, 223 VC4_SET_FIELD(mode->vtotal - mode->vsync_end, 224 PV_VERTA_VBP) | 225 VC4_SET_FIELD(mode->vsync_end - mode->vsync_start, 226 PV_VERTA_VSYNC)); 227 CRTC_WRITE(PV_VERTB, 228 VC4_SET_FIELD(mode->vsync_start - mode->vdisplay, 229 PV_VERTB_VFP) | 230 VC4_SET_FIELD(vactive, PV_VERTB_VACTIVE)); 231 232 if (interlace) { 233 CRTC_WRITE(PV_VERTA_EVEN, 234 VC4_SET_FIELD(mode->vtotal - mode->vsync_end - 1, 235 PV_VERTA_VBP) | 236 VC4_SET_FIELD(mode->vsync_end - mode->vsync_start, 237 PV_VERTA_VSYNC)); 238 CRTC_WRITE(PV_VERTB_EVEN, 239 VC4_SET_FIELD(mode->vsync_start - mode->vdisplay, 240 PV_VERTB_VFP) | 241 VC4_SET_FIELD(vactive, PV_VERTB_VACTIVE)); 242 } 243 244 CRTC_WRITE(PV_HACT_ACT, mode->hdisplay); 245 246 CRTC_WRITE(PV_V_CONTROL, 247 PV_VCONTROL_CONTINUOUS | 248 (interlace ? PV_VCONTROL_INTERLACE : 0)); 249 250 CRTC_WRITE(PV_CONTROL, 251 VC4_SET_FIELD(format, PV_CONTROL_FORMAT) | 252 VC4_SET_FIELD(vc4_get_fifo_full_level(format), 253 PV_CONTROL_FIFO_LEVEL) | 254 PV_CONTROL_CLR_AT_START | 255 PV_CONTROL_TRIGGER_UNDERFLOW | 256 PV_CONTROL_WAIT_HSTART | 257 VC4_SET_FIELD(clock_select, PV_CONTROL_CLK_SELECT) | 258 PV_CONTROL_FIFO_CLR | 259 PV_CONTROL_EN); 260 261 HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), 262 SCALER_DISPBKGND_AUTOHS | 263 (interlace ? SCALER_DISPBKGND_INTERLACE : 0)); 264 265 if (debug_dump_regs) { 266 DRM_INFO("CRTC %d regs after:\n", drm_crtc_index(crtc)); 267 vc4_crtc_dump_regs(vc4_crtc); 268 } 269 } 270 271 static void require_hvs_enabled(struct drm_device *dev) 272 { 273 struct vc4_dev *vc4 = to_vc4_dev(dev); 274 275 WARN_ON_ONCE((HVS_READ(SCALER_DISPCTRL) & SCALER_DISPCTRL_ENABLE) != 276 SCALER_DISPCTRL_ENABLE); 277 } 278 279 static void vc4_crtc_disable(struct drm_crtc *crtc) 280 { 281 struct drm_device *dev = crtc->dev; 282 struct vc4_dev *vc4 = to_vc4_dev(dev); 283 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 284 u32 chan = vc4_crtc->channel; 285 int ret; 286 require_hvs_enabled(dev); 287 288 CRTC_WRITE(PV_V_CONTROL, 289 CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN); 290 ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1); 291 WARN_ONCE(ret, "Timeout waiting for !PV_VCONTROL_VIDEN\n"); 292 293 if (HVS_READ(SCALER_DISPCTRLX(chan)) & 294 SCALER_DISPCTRLX_ENABLE) { 295 HVS_WRITE(SCALER_DISPCTRLX(chan), 296 SCALER_DISPCTRLX_RESET); 297 298 /* While the docs say that reset is self-clearing, it 299 * seems it doesn't actually. 300 */ 301 HVS_WRITE(SCALER_DISPCTRLX(chan), 0); 302 } 303 304 /* Once we leave, the scaler should be disabled and its fifo empty. */ 305 306 WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET); 307 308 WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)), 309 SCALER_DISPSTATX_MODE) != 310 SCALER_DISPSTATX_MODE_DISABLED); 311 312 WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) & 313 (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) != 314 SCALER_DISPSTATX_EMPTY); 315 } 316 317 static void vc4_crtc_enable(struct drm_crtc *crtc) 318 { 319 struct drm_device *dev = crtc->dev; 320 struct vc4_dev *vc4 = to_vc4_dev(dev); 321 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 322 struct drm_crtc_state *state = crtc->state; 323 struct drm_display_mode *mode = &state->adjusted_mode; 324 325 require_hvs_enabled(dev); 326 327 /* Turn on the scaler, which will wait for vstart to start 328 * compositing. 329 */ 330 HVS_WRITE(SCALER_DISPCTRLX(vc4_crtc->channel), 331 VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) | 332 VC4_SET_FIELD(mode->vdisplay, SCALER_DISPCTRLX_HEIGHT) | 333 SCALER_DISPCTRLX_ENABLE); 334 335 /* Turn on the pixel valve, which will emit the vstart signal. */ 336 CRTC_WRITE(PV_V_CONTROL, 337 CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); 338 } 339 340 static int vc4_crtc_atomic_check(struct drm_crtc *crtc, 341 struct drm_crtc_state *state) 342 { 343 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); 344 struct drm_device *dev = crtc->dev; 345 struct vc4_dev *vc4 = to_vc4_dev(dev); 346 struct drm_plane *plane; 347 unsigned long flags; 348 u32 dlist_count = 0; 349 int ret; 350 351 /* The pixelvalve can only feed one encoder (and encoders are 352 * 1:1 with connectors.) 353 */ 354 if (hweight32(state->connector_mask) > 1) 355 return -EINVAL; 356 357 drm_atomic_crtc_state_for_each_plane(plane, state) { 358 struct drm_plane_state *plane_state = 359 state->state->plane_states[drm_plane_index(plane)]; 360 361 /* plane might not have changed, in which case take 362 * current state: 363 */ 364 if (!plane_state) 365 plane_state = plane->state; 366 367 dlist_count += vc4_plane_dlist_size(plane_state); 368 } 369 370 dlist_count++; /* Account for SCALER_CTL0_END. */ 371 372 spin_lock_irqsave(&vc4->hvs->mm_lock, flags); 373 ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm, 374 dlist_count, 1, 0); 375 spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags); 376 if (ret) 377 return ret; 378 379 return 0; 380 } 381 382 static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, 383 struct drm_crtc_state *old_state) 384 { 385 struct drm_device *dev = crtc->dev; 386 struct vc4_dev *vc4 = to_vc4_dev(dev); 387 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 388 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 389 struct drm_plane *plane; 390 bool debug_dump_regs = false; 391 u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start; 392 u32 __iomem *dlist_next = dlist_start; 393 394 if (debug_dump_regs) { 395 DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc)); 396 vc4_hvs_dump_state(dev); 397 } 398 399 /* Copy all the active planes' dlist contents to the hardware dlist. */ 400 drm_atomic_crtc_for_each_plane(plane, crtc) { 401 dlist_next += vc4_plane_write_dlist(plane, dlist_next); 402 } 403 404 writel(SCALER_CTL0_END, dlist_next); 405 dlist_next++; 406 407 WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); 408 409 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 410 vc4_state->mm.start); 411 412 if (debug_dump_regs) { 413 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); 414 vc4_hvs_dump_state(dev); 415 } 416 417 if (crtc->state->event) { 418 unsigned long flags; 419 420 crtc->state->event->pipe = drm_crtc_index(crtc); 421 422 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 423 424 spin_lock_irqsave(&dev->event_lock, flags); 425 vc4_crtc->event = crtc->state->event; 426 spin_unlock_irqrestore(&dev->event_lock, flags); 427 crtc->state->event = NULL; 428 } 429 } 430 431 int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id) 432 { 433 struct vc4_dev *vc4 = to_vc4_dev(dev); 434 struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id]; 435 436 CRTC_WRITE(PV_INTEN, PV_INT_VFP_START); 437 438 return 0; 439 } 440 441 void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id) 442 { 443 struct vc4_dev *vc4 = to_vc4_dev(dev); 444 struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id]; 445 446 CRTC_WRITE(PV_INTEN, 0); 447 } 448 449 static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) 450 { 451 struct drm_crtc *crtc = &vc4_crtc->base; 452 struct drm_device *dev = crtc->dev; 453 unsigned long flags; 454 455 spin_lock_irqsave(&dev->event_lock, flags); 456 if (vc4_crtc->event) { 457 drm_crtc_send_vblank_event(crtc, vc4_crtc->event); 458 vc4_crtc->event = NULL; 459 } 460 spin_unlock_irqrestore(&dev->event_lock, flags); 461 } 462 463 static irqreturn_t vc4_crtc_irq_handler(int irq, void *data) 464 { 465 struct vc4_crtc *vc4_crtc = data; 466 u32 stat = CRTC_READ(PV_INTSTAT); 467 irqreturn_t ret = IRQ_NONE; 468 469 if (stat & PV_INT_VFP_START) { 470 CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START); 471 drm_crtc_handle_vblank(&vc4_crtc->base); 472 vc4_crtc_handle_page_flip(vc4_crtc); 473 ret = IRQ_HANDLED; 474 } 475 476 return ret; 477 } 478 479 struct vc4_async_flip_state { 480 struct drm_crtc *crtc; 481 struct drm_framebuffer *fb; 482 struct drm_pending_vblank_event *event; 483 484 struct vc4_seqno_cb cb; 485 }; 486 487 /* Called when the V3D execution for the BO being flipped to is done, so that 488 * we can actually update the plane's address to point to it. 489 */ 490 static void 491 vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) 492 { 493 struct vc4_async_flip_state *flip_state = 494 container_of(cb, struct vc4_async_flip_state, cb); 495 struct drm_crtc *crtc = flip_state->crtc; 496 struct drm_device *dev = crtc->dev; 497 struct vc4_dev *vc4 = to_vc4_dev(dev); 498 struct drm_plane *plane = crtc->primary; 499 500 vc4_plane_async_set_fb(plane, flip_state->fb); 501 if (flip_state->event) { 502 unsigned long flags; 503 504 spin_lock_irqsave(&dev->event_lock, flags); 505 drm_crtc_send_vblank_event(crtc, flip_state->event); 506 spin_unlock_irqrestore(&dev->event_lock, flags); 507 } 508 509 drm_framebuffer_unreference(flip_state->fb); 510 kfree(flip_state); 511 512 up(&vc4->async_modeset); 513 } 514 515 /* Implements async (non-vblank-synced) page flips. 516 * 517 * The page flip ioctl needs to return immediately, so we grab the 518 * modeset semaphore on the pipe, and queue the address update for 519 * when V3D is done with the BO being flipped to. 520 */ 521 static int vc4_async_page_flip(struct drm_crtc *crtc, 522 struct drm_framebuffer *fb, 523 struct drm_pending_vblank_event *event, 524 uint32_t flags) 525 { 526 struct drm_device *dev = crtc->dev; 527 struct vc4_dev *vc4 = to_vc4_dev(dev); 528 struct drm_plane *plane = crtc->primary; 529 int ret = 0; 530 struct vc4_async_flip_state *flip_state; 531 struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0); 532 struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); 533 534 flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL); 535 if (!flip_state) 536 return -ENOMEM; 537 538 drm_framebuffer_reference(fb); 539 flip_state->fb = fb; 540 flip_state->crtc = crtc; 541 flip_state->event = event; 542 543 /* Make sure all other async modesetes have landed. */ 544 ret = down_interruptible(&vc4->async_modeset); 545 if (ret) { 546 drm_framebuffer_unreference(fb); 547 kfree(flip_state); 548 return ret; 549 } 550 551 /* Immediately update the plane's legacy fb pointer, so that later 552 * modeset prep sees the state that will be present when the semaphore 553 * is released. 554 */ 555 drm_atomic_set_fb_for_plane(plane->state, fb); 556 plane->fb = fb; 557 558 vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno, 559 vc4_async_page_flip_complete); 560 561 /* Driver takes ownership of state on successful async commit. */ 562 return 0; 563 } 564 565 static int vc4_page_flip(struct drm_crtc *crtc, 566 struct drm_framebuffer *fb, 567 struct drm_pending_vblank_event *event, 568 uint32_t flags) 569 { 570 if (flags & DRM_MODE_PAGE_FLIP_ASYNC) 571 return vc4_async_page_flip(crtc, fb, event, flags); 572 else 573 return drm_atomic_helper_page_flip(crtc, fb, event, flags); 574 } 575 576 static struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc) 577 { 578 struct vc4_crtc_state *vc4_state; 579 580 vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL); 581 if (!vc4_state) 582 return NULL; 583 584 __drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base); 585 return &vc4_state->base; 586 } 587 588 static void vc4_crtc_destroy_state(struct drm_crtc *crtc, 589 struct drm_crtc_state *state) 590 { 591 struct vc4_dev *vc4 = to_vc4_dev(crtc->dev); 592 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); 593 594 if (vc4_state->mm.allocated) { 595 unsigned long flags; 596 597 spin_lock_irqsave(&vc4->hvs->mm_lock, flags); 598 drm_mm_remove_node(&vc4_state->mm); 599 spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags); 600 601 } 602 603 __drm_atomic_helper_crtc_destroy_state(crtc, state); 604 } 605 606 static const struct drm_crtc_funcs vc4_crtc_funcs = { 607 .set_config = drm_atomic_helper_set_config, 608 .destroy = vc4_crtc_destroy, 609 .page_flip = vc4_page_flip, 610 .set_property = NULL, 611 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */ 612 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */ 613 .reset = drm_atomic_helper_crtc_reset, 614 .atomic_duplicate_state = vc4_crtc_duplicate_state, 615 .atomic_destroy_state = vc4_crtc_destroy_state, 616 }; 617 618 static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { 619 .mode_set_nofb = vc4_crtc_mode_set_nofb, 620 .disable = vc4_crtc_disable, 621 .enable = vc4_crtc_enable, 622 .atomic_check = vc4_crtc_atomic_check, 623 .atomic_flush = vc4_crtc_atomic_flush, 624 }; 625 626 static const struct vc4_crtc_data pv0_data = { 627 .hvs_channel = 0, 628 .encoder0_type = VC4_ENCODER_TYPE_DSI0, 629 .encoder1_type = VC4_ENCODER_TYPE_DPI, 630 }; 631 632 static const struct vc4_crtc_data pv1_data = { 633 .hvs_channel = 2, 634 .encoder0_type = VC4_ENCODER_TYPE_DSI1, 635 .encoder1_type = VC4_ENCODER_TYPE_SMI, 636 }; 637 638 static const struct vc4_crtc_data pv2_data = { 639 .hvs_channel = 1, 640 .encoder0_type = VC4_ENCODER_TYPE_VEC, 641 .encoder1_type = VC4_ENCODER_TYPE_HDMI, 642 }; 643 644 static const struct of_device_id vc4_crtc_dt_match[] = { 645 { .compatible = "brcm,bcm2835-pixelvalve0", .data = &pv0_data }, 646 { .compatible = "brcm,bcm2835-pixelvalve1", .data = &pv1_data }, 647 { .compatible = "brcm,bcm2835-pixelvalve2", .data = &pv2_data }, 648 {} 649 }; 650 651 static void vc4_set_crtc_possible_masks(struct drm_device *drm, 652 struct drm_crtc *crtc) 653 { 654 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 655 struct drm_encoder *encoder; 656 657 drm_for_each_encoder(encoder, drm) { 658 struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); 659 660 if (vc4_encoder->type == vc4_crtc->data->encoder0_type) { 661 vc4_encoder->clock_select = 0; 662 encoder->possible_crtcs |= drm_crtc_mask(crtc); 663 } else if (vc4_encoder->type == vc4_crtc->data->encoder1_type) { 664 vc4_encoder->clock_select = 1; 665 encoder->possible_crtcs |= drm_crtc_mask(crtc); 666 } 667 } 668 } 669 670 static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) 671 { 672 struct platform_device *pdev = to_platform_device(dev); 673 struct drm_device *drm = dev_get_drvdata(master); 674 struct vc4_dev *vc4 = to_vc4_dev(drm); 675 struct vc4_crtc *vc4_crtc; 676 struct drm_crtc *crtc; 677 struct drm_plane *primary_plane, *cursor_plane, *destroy_plane, *temp; 678 const struct of_device_id *match; 679 int ret, i; 680 681 vc4_crtc = devm_kzalloc(dev, sizeof(*vc4_crtc), GFP_KERNEL); 682 if (!vc4_crtc) 683 return -ENOMEM; 684 crtc = &vc4_crtc->base; 685 686 match = of_match_device(vc4_crtc_dt_match, dev); 687 if (!match) 688 return -ENODEV; 689 vc4_crtc->data = match->data; 690 691 vc4_crtc->regs = vc4_ioremap_regs(pdev, 0); 692 if (IS_ERR(vc4_crtc->regs)) 693 return PTR_ERR(vc4_crtc->regs); 694 695 /* For now, we create just the primary and the legacy cursor 696 * planes. We should be able to stack more planes on easily, 697 * but to do that we would need to compute the bandwidth 698 * requirement of the plane configuration, and reject ones 699 * that will take too much. 700 */ 701 primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY); 702 if (IS_ERR(primary_plane)) { 703 dev_err(dev, "failed to construct primary plane\n"); 704 ret = PTR_ERR(primary_plane); 705 goto err; 706 } 707 708 drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, 709 &vc4_crtc_funcs, NULL); 710 drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs); 711 primary_plane->crtc = crtc; 712 vc4->crtc[drm_crtc_index(crtc)] = vc4_crtc; 713 vc4_crtc->channel = vc4_crtc->data->hvs_channel; 714 715 /* Set up some arbitrary number of planes. We're not limited 716 * by a set number of physical registers, just the space in 717 * the HVS (16k) and how small an plane can be (28 bytes). 718 * However, each plane we set up takes up some memory, and 719 * increases the cost of looping over planes, which atomic 720 * modesetting does quite a bit. As a result, we pick a 721 * modest number of planes to expose, that should hopefully 722 * still cover any sane usecase. 723 */ 724 for (i = 0; i < 8; i++) { 725 struct drm_plane *plane = 726 vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY); 727 728 if (IS_ERR(plane)) 729 continue; 730 731 plane->possible_crtcs = 1 << drm_crtc_index(crtc); 732 } 733 734 /* Set up the legacy cursor after overlay initialization, 735 * since we overlay planes on the CRTC in the order they were 736 * initialized. 737 */ 738 cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR); 739 if (!IS_ERR(cursor_plane)) { 740 cursor_plane->possible_crtcs = 1 << drm_crtc_index(crtc); 741 cursor_plane->crtc = crtc; 742 crtc->cursor = cursor_plane; 743 } 744 745 CRTC_WRITE(PV_INTEN, 0); 746 CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START); 747 ret = devm_request_irq(dev, platform_get_irq(pdev, 0), 748 vc4_crtc_irq_handler, 0, "vc4 crtc", vc4_crtc); 749 if (ret) 750 goto err_destroy_planes; 751 752 vc4_set_crtc_possible_masks(drm, crtc); 753 754 platform_set_drvdata(pdev, vc4_crtc); 755 756 return 0; 757 758 err_destroy_planes: 759 list_for_each_entry_safe(destroy_plane, temp, 760 &drm->mode_config.plane_list, head) { 761 if (destroy_plane->possible_crtcs == 1 << drm_crtc_index(crtc)) 762 destroy_plane->funcs->destroy(destroy_plane); 763 } 764 err: 765 return ret; 766 } 767 768 static void vc4_crtc_unbind(struct device *dev, struct device *master, 769 void *data) 770 { 771 struct platform_device *pdev = to_platform_device(dev); 772 struct vc4_crtc *vc4_crtc = dev_get_drvdata(dev); 773 774 vc4_crtc_destroy(&vc4_crtc->base); 775 776 CRTC_WRITE(PV_INTEN, 0); 777 778 platform_set_drvdata(pdev, NULL); 779 } 780 781 static const struct component_ops vc4_crtc_ops = { 782 .bind = vc4_crtc_bind, 783 .unbind = vc4_crtc_unbind, 784 }; 785 786 static int vc4_crtc_dev_probe(struct platform_device *pdev) 787 { 788 return component_add(&pdev->dev, &vc4_crtc_ops); 789 } 790 791 static int vc4_crtc_dev_remove(struct platform_device *pdev) 792 { 793 component_del(&pdev->dev, &vc4_crtc_ops); 794 return 0; 795 } 796 797 struct platform_driver vc4_crtc_driver = { 798 .probe = vc4_crtc_dev_probe, 799 .remove = vc4_crtc_dev_remove, 800 .driver = { 801 .name = "vc4_crtc", 802 .of_match_table = vc4_crtc_dt_match, 803 }, 804 }; 805