1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 Broadcom 4 */ 5 6 /** 7 * DOC: VC4 CRTC module 8 * 9 * In VC4, the Pixel Valve is what most closely corresponds to the 10 * DRM's concept of a CRTC. The PV generates video timings from the 11 * encoder's clock plus its configuration. It pulls scaled pixels from 12 * the HVS at that timing, and feeds it to the encoder. 13 * 14 * However, the DRM CRTC also collects the configuration of all the 15 * DRM planes attached to it. As a result, the CRTC is also 16 * responsible for writing the display list for the HVS channel that 17 * the CRTC will use. 18 * 19 * The 2835 has 3 different pixel valves. pv0 in the audio power 20 * domain feeds DSI0 or DPI, while pv1 feeds DS1 or SMI. pv2 in the 21 * image domain can feed either HDMI or the SDTV controller. The 22 * pixel valve chooses from the CPRMAN clocks (HSM for HDMI, VEC for 23 * SDTV, etc.) according to which output type is chosen in the mux. 24 * 25 * For power management, the pixel valve's registers are all clocked 26 * by the AXI clock, while the timings and FIFOs make use of the 27 * output-specific clock. Since the encoders also directly consume 28 * the CPRMAN clocks, and know what timings they need, they are the 29 * ones that set the clock. 30 */ 31 32 #include <linux/clk.h> 33 #include <linux/component.h> 34 #include <linux/of_device.h> 35 36 #include <drm/drm_atomic.h> 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_atomic_uapi.h> 39 #include <drm/drm_fb_cma_helper.h> 40 #include <drm/drm_print.h> 41 #include <drm/drm_probe_helper.h> 42 #include <drm/drm_vblank.h> 43 44 #include "vc4_drv.h" 45 #include "vc4_regs.h" 46 47 #define HVS_FIFO_LATENCY_PIX 6 48 49 #define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset)) 50 #define CRTC_READ(offset) readl(vc4_crtc->regs + (offset)) 51 52 static const struct debugfs_reg32 crtc_regs[] = { 53 VC4_REG32(PV_CONTROL), 54 VC4_REG32(PV_V_CONTROL), 55 VC4_REG32(PV_VSYNCD_EVEN), 56 VC4_REG32(PV_HORZA), 57 VC4_REG32(PV_HORZB), 58 VC4_REG32(PV_VERTA), 59 VC4_REG32(PV_VERTB), 60 VC4_REG32(PV_VERTA_EVEN), 61 VC4_REG32(PV_VERTB_EVEN), 62 VC4_REG32(PV_INTEN), 63 VC4_REG32(PV_INTSTAT), 64 VC4_REG32(PV_STAT), 65 VC4_REG32(PV_HACT_ACT), 66 }; 67 68 static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, 69 bool in_vblank_irq, 70 int *vpos, int *hpos, 71 ktime_t *stime, ktime_t *etime, 72 const struct drm_display_mode *mode) 73 { 74 struct drm_device *dev = crtc->dev; 75 struct vc4_dev *vc4 = to_vc4_dev(dev); 76 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 77 u32 val; 78 int fifo_lines; 79 int vblank_lines; 80 bool ret = false; 81 82 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 83 84 /* Get optional system timestamp before query. */ 85 if (stime) 86 *stime = ktime_get(); 87 88 /* 89 * Read vertical scanline which is currently composed for our 90 * pixelvalve by the HVS, and also the scaler status. 91 */ 92 val = HVS_READ(SCALER_DISPSTATX(vc4_crtc->channel)); 93 94 /* Get optional system timestamp after query. */ 95 if (etime) 96 *etime = ktime_get(); 97 98 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 99 100 /* Vertical position of hvs composed scanline. */ 101 *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE); 102 *hpos = 0; 103 104 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 105 *vpos /= 2; 106 107 /* Use hpos to correct for field offset in interlaced mode. */ 108 if (VC4_GET_FIELD(val, SCALER_DISPSTATX_FRAME_COUNT) % 2) 109 *hpos += mode->crtc_htotal / 2; 110 } 111 112 /* This is the offset we need for translating hvs -> pv scanout pos. */ 113 fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay; 114 115 if (fifo_lines > 0) 116 ret = true; 117 118 /* HVS more than fifo_lines into frame for compositing? */ 119 if (*vpos > fifo_lines) { 120 /* 121 * We are in active scanout and can get some meaningful results 122 * from HVS. The actual PV scanout can not trail behind more 123 * than fifo_lines as that is the fifo's capacity. Assume that 124 * in active scanout the HVS and PV work in lockstep wrt. HVS 125 * refilling the fifo and PV consuming from the fifo, ie. 126 * whenever the PV consumes and frees up a scanline in the 127 * fifo, the HVS will immediately refill it, therefore 128 * incrementing vpos. Therefore we choose HVS read position - 129 * fifo size in scanlines as a estimate of the real scanout 130 * position of the PV. 131 */ 132 *vpos -= fifo_lines + 1; 133 134 return ret; 135 } 136 137 /* 138 * Less: This happens when we are in vblank and the HVS, after getting 139 * the VSTART restart signal from the PV, just started refilling its 140 * fifo with new lines from the top-most lines of the new framebuffers. 141 * The PV does not scan out in vblank, so does not remove lines from 142 * the fifo, so the fifo will be full quickly and the HVS has to pause. 143 * We can't get meaningful readings wrt. scanline position of the PV 144 * and need to make things up in a approximative but consistent way. 145 */ 146 vblank_lines = mode->vtotal - mode->vdisplay; 147 148 if (in_vblank_irq) { 149 /* 150 * Assume the irq handler got called close to first 151 * line of vblank, so PV has about a full vblank 152 * scanlines to go, and as a base timestamp use the 153 * one taken at entry into vblank irq handler, so it 154 * is not affected by random delays due to lock 155 * contention on event_lock or vblank_time lock in 156 * the core. 157 */ 158 *vpos = -vblank_lines; 159 160 if (stime) 161 *stime = vc4_crtc->t_vblank; 162 if (etime) 163 *etime = vc4_crtc->t_vblank; 164 165 /* 166 * If the HVS fifo is not yet full then we know for certain 167 * we are at the very beginning of vblank, as the hvs just 168 * started refilling, and the stime and etime timestamps 169 * truly correspond to start of vblank. 170 * 171 * Unfortunately there's no way to report this to upper levels 172 * and make it more useful. 173 */ 174 } else { 175 /* 176 * No clue where we are inside vblank. Return a vpos of zero, 177 * which will cause calling code to just return the etime 178 * timestamp uncorrected. At least this is no worse than the 179 * standard fallback. 180 */ 181 *vpos = 0; 182 } 183 184 return ret; 185 } 186 187 static void vc4_crtc_destroy(struct drm_crtc *crtc) 188 { 189 drm_crtc_cleanup(crtc); 190 } 191 192 static void 193 vc4_crtc_lut_load(struct drm_crtc *crtc) 194 { 195 struct drm_device *dev = crtc->dev; 196 struct vc4_dev *vc4 = to_vc4_dev(dev); 197 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 198 u32 i; 199 200 /* The LUT memory is laid out with each HVS channel in order, 201 * each of which takes 256 writes for R, 256 for G, then 256 202 * for B. 203 */ 204 HVS_WRITE(SCALER_GAMADDR, 205 SCALER_GAMADDR_AUTOINC | 206 (vc4_crtc->channel * 3 * crtc->gamma_size)); 207 208 for (i = 0; i < crtc->gamma_size; i++) 209 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_r[i]); 210 for (i = 0; i < crtc->gamma_size; i++) 211 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_g[i]); 212 for (i = 0; i < crtc->gamma_size; i++) 213 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]); 214 } 215 216 static void 217 vc4_crtc_update_gamma_lut(struct drm_crtc *crtc) 218 { 219 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 220 struct drm_color_lut *lut = crtc->state->gamma_lut->data; 221 u32 length = drm_color_lut_size(crtc->state->gamma_lut); 222 u32 i; 223 224 for (i = 0; i < length; i++) { 225 vc4_crtc->lut_r[i] = drm_color_lut_extract(lut[i].red, 8); 226 vc4_crtc->lut_g[i] = drm_color_lut_extract(lut[i].green, 8); 227 vc4_crtc->lut_b[i] = drm_color_lut_extract(lut[i].blue, 8); 228 } 229 230 vc4_crtc_lut_load(crtc); 231 } 232 233 static u32 vc4_get_fifo_full_level(u32 format) 234 { 235 static const u32 fifo_len_bytes = 64; 236 237 switch (format) { 238 case PV_CONTROL_FORMAT_DSIV_16: 239 case PV_CONTROL_FORMAT_DSIC_16: 240 return fifo_len_bytes - 2 * HVS_FIFO_LATENCY_PIX; 241 case PV_CONTROL_FORMAT_DSIV_18: 242 return fifo_len_bytes - 14; 243 case PV_CONTROL_FORMAT_24: 244 case PV_CONTROL_FORMAT_DSIV_24: 245 default: 246 return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX; 247 } 248 } 249 250 /* 251 * Returns the encoder attached to the CRTC. 252 * 253 * VC4 can only scan out to one encoder at a time, while the DRM core 254 * allows drivers to push pixels to more than one encoder from the 255 * same CRTC. 256 */ 257 static struct drm_encoder *vc4_get_crtc_encoder(struct drm_crtc *crtc) 258 { 259 struct drm_connector *connector; 260 struct drm_connector_list_iter conn_iter; 261 262 drm_connector_list_iter_begin(crtc->dev, &conn_iter); 263 drm_for_each_connector_iter(connector, &conn_iter) { 264 if (connector->state->crtc == crtc) { 265 drm_connector_list_iter_end(&conn_iter); 266 return connector->encoder; 267 } 268 } 269 drm_connector_list_iter_end(&conn_iter); 270 271 return NULL; 272 } 273 274 static void vc4_crtc_config_pv(struct drm_crtc *crtc) 275 { 276 struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc); 277 struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); 278 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 279 struct drm_crtc_state *state = crtc->state; 280 struct drm_display_mode *mode = &state->adjusted_mode; 281 bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE; 282 u32 pixel_rep = (mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1; 283 bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 || 284 vc4_encoder->type == VC4_ENCODER_TYPE_DSI1); 285 u32 format = is_dsi ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24; 286 287 /* Reset the PV fifo. */ 288 CRTC_WRITE(PV_CONTROL, 0); 289 CRTC_WRITE(PV_CONTROL, PV_CONTROL_FIFO_CLR | PV_CONTROL_EN); 290 CRTC_WRITE(PV_CONTROL, 0); 291 292 CRTC_WRITE(PV_HORZA, 293 VC4_SET_FIELD((mode->htotal - 294 mode->hsync_end) * pixel_rep, 295 PV_HORZA_HBP) | 296 VC4_SET_FIELD((mode->hsync_end - 297 mode->hsync_start) * pixel_rep, 298 PV_HORZA_HSYNC)); 299 CRTC_WRITE(PV_HORZB, 300 VC4_SET_FIELD((mode->hsync_start - 301 mode->hdisplay) * pixel_rep, 302 PV_HORZB_HFP) | 303 VC4_SET_FIELD(mode->hdisplay * pixel_rep, PV_HORZB_HACTIVE)); 304 305 CRTC_WRITE(PV_VERTA, 306 VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end, 307 PV_VERTA_VBP) | 308 VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start, 309 PV_VERTA_VSYNC)); 310 CRTC_WRITE(PV_VERTB, 311 VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay, 312 PV_VERTB_VFP) | 313 VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE)); 314 315 if (interlace) { 316 CRTC_WRITE(PV_VERTA_EVEN, 317 VC4_SET_FIELD(mode->crtc_vtotal - 318 mode->crtc_vsync_end - 1, 319 PV_VERTA_VBP) | 320 VC4_SET_FIELD(mode->crtc_vsync_end - 321 mode->crtc_vsync_start, 322 PV_VERTA_VSYNC)); 323 CRTC_WRITE(PV_VERTB_EVEN, 324 VC4_SET_FIELD(mode->crtc_vsync_start - 325 mode->crtc_vdisplay, 326 PV_VERTB_VFP) | 327 VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE)); 328 329 /* We set up first field even mode for HDMI. VEC's 330 * NTSC mode would want first field odd instead, once 331 * we support it (to do so, set ODD_FIRST and put the 332 * delay in VSYNCD_EVEN instead). 333 */ 334 CRTC_WRITE(PV_V_CONTROL, 335 PV_VCONTROL_CONTINUOUS | 336 (is_dsi ? PV_VCONTROL_DSI : 0) | 337 PV_VCONTROL_INTERLACE | 338 VC4_SET_FIELD(mode->htotal * pixel_rep / 2, 339 PV_VCONTROL_ODD_DELAY)); 340 CRTC_WRITE(PV_VSYNCD_EVEN, 0); 341 } else { 342 CRTC_WRITE(PV_V_CONTROL, 343 PV_VCONTROL_CONTINUOUS | 344 (is_dsi ? PV_VCONTROL_DSI : 0)); 345 } 346 347 if (is_dsi) 348 CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep); 349 350 CRTC_WRITE(PV_CONTROL, 351 VC4_SET_FIELD(format, PV_CONTROL_FORMAT) | 352 VC4_SET_FIELD(vc4_get_fifo_full_level(format), 353 PV_CONTROL_FIFO_LEVEL) | 354 VC4_SET_FIELD(pixel_rep - 1, PV_CONTROL_PIXEL_REP) | 355 PV_CONTROL_CLR_AT_START | 356 PV_CONTROL_TRIGGER_UNDERFLOW | 357 PV_CONTROL_WAIT_HSTART | 358 VC4_SET_FIELD(vc4_encoder->clock_select, 359 PV_CONTROL_CLK_SELECT) | 360 PV_CONTROL_FIFO_CLR | 361 PV_CONTROL_EN); 362 } 363 364 static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc) 365 { 366 struct drm_device *dev = crtc->dev; 367 struct vc4_dev *vc4 = to_vc4_dev(dev); 368 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 369 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 370 struct drm_display_mode *mode = &crtc->state->adjusted_mode; 371 bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE; 372 bool debug_dump_regs = false; 373 374 if (debug_dump_regs) { 375 struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev); 376 dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n", 377 drm_crtc_index(crtc)); 378 drm_print_regset32(&p, &vc4_crtc->regset); 379 } 380 381 if (vc4_crtc->channel == 2) { 382 u32 dispctrl; 383 u32 dsp3_mux; 384 385 /* 386 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to 387 * FIFO X'. 388 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. 389 * 390 * DSP3 is connected to FIFO2 unless the transposer is 391 * enabled. In this case, FIFO 2 is directly accessed by the 392 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 393 * route. 394 */ 395 if (vc4_state->feed_txp) 396 dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); 397 else 398 dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); 399 400 dispctrl = HVS_READ(SCALER_DISPCTRL) & 401 ~SCALER_DISPCTRL_DSP3_MUX_MASK; 402 HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); 403 } 404 405 if (!vc4_state->feed_txp) 406 vc4_crtc_config_pv(crtc); 407 408 HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), 409 SCALER_DISPBKGND_AUTOHS | 410 SCALER_DISPBKGND_GAMMA | 411 (interlace ? SCALER_DISPBKGND_INTERLACE : 0)); 412 413 /* Reload the LUT, since the SRAMs would have been disabled if 414 * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once. 415 */ 416 vc4_crtc_lut_load(crtc); 417 418 if (debug_dump_regs) { 419 struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev); 420 dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs after:\n", 421 drm_crtc_index(crtc)); 422 drm_print_regset32(&p, &vc4_crtc->regset); 423 } 424 } 425 426 static void require_hvs_enabled(struct drm_device *dev) 427 { 428 struct vc4_dev *vc4 = to_vc4_dev(dev); 429 430 WARN_ON_ONCE((HVS_READ(SCALER_DISPCTRL) & SCALER_DISPCTRL_ENABLE) != 431 SCALER_DISPCTRL_ENABLE); 432 } 433 434 static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, 435 struct drm_crtc_state *old_state) 436 { 437 struct drm_device *dev = crtc->dev; 438 struct vc4_dev *vc4 = to_vc4_dev(dev); 439 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 440 u32 chan = vc4_crtc->channel; 441 int ret; 442 require_hvs_enabled(dev); 443 444 /* Disable vblank irq handling before crtc is disabled. */ 445 drm_crtc_vblank_off(crtc); 446 447 CRTC_WRITE(PV_V_CONTROL, 448 CRTC_READ(PV_V_CONTROL) & ~PV_VCONTROL_VIDEN); 449 ret = wait_for(!(CRTC_READ(PV_V_CONTROL) & PV_VCONTROL_VIDEN), 1); 450 WARN_ONCE(ret, "Timeout waiting for !PV_VCONTROL_VIDEN\n"); 451 452 if (HVS_READ(SCALER_DISPCTRLX(chan)) & 453 SCALER_DISPCTRLX_ENABLE) { 454 HVS_WRITE(SCALER_DISPCTRLX(chan), 455 SCALER_DISPCTRLX_RESET); 456 457 /* While the docs say that reset is self-clearing, it 458 * seems it doesn't actually. 459 */ 460 HVS_WRITE(SCALER_DISPCTRLX(chan), 0); 461 } 462 463 /* Once we leave, the scaler should be disabled and its fifo empty. */ 464 465 WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET); 466 467 WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)), 468 SCALER_DISPSTATX_MODE) != 469 SCALER_DISPSTATX_MODE_DISABLED); 470 471 WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) & 472 (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) != 473 SCALER_DISPSTATX_EMPTY); 474 475 /* 476 * Make sure we issue a vblank event after disabling the CRTC if 477 * someone was waiting it. 478 */ 479 if (crtc->state->event) { 480 unsigned long flags; 481 482 spin_lock_irqsave(&dev->event_lock, flags); 483 drm_crtc_send_vblank_event(crtc, crtc->state->event); 484 crtc->state->event = NULL; 485 spin_unlock_irqrestore(&dev->event_lock, flags); 486 } 487 } 488 489 void vc4_crtc_txp_armed(struct drm_crtc_state *state) 490 { 491 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); 492 493 vc4_state->txp_armed = true; 494 } 495 496 static void vc4_crtc_update_dlist(struct drm_crtc *crtc) 497 { 498 struct drm_device *dev = crtc->dev; 499 struct vc4_dev *vc4 = to_vc4_dev(dev); 500 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 501 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 502 503 if (crtc->state->event) { 504 unsigned long flags; 505 506 crtc->state->event->pipe = drm_crtc_index(crtc); 507 508 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 509 510 spin_lock_irqsave(&dev->event_lock, flags); 511 512 if (!vc4_state->feed_txp || vc4_state->txp_armed) { 513 vc4_crtc->event = crtc->state->event; 514 crtc->state->event = NULL; 515 } 516 517 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 518 vc4_state->mm.start); 519 520 spin_unlock_irqrestore(&dev->event_lock, flags); 521 } else { 522 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 523 vc4_state->mm.start); 524 } 525 } 526 527 static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, 528 struct drm_crtc_state *old_state) 529 { 530 struct drm_device *dev = crtc->dev; 531 struct vc4_dev *vc4 = to_vc4_dev(dev); 532 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 533 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 534 struct drm_display_mode *mode = &crtc->state->adjusted_mode; 535 536 require_hvs_enabled(dev); 537 538 /* Enable vblank irq handling before crtc is started otherwise 539 * drm_crtc_get_vblank() fails in vc4_crtc_update_dlist(). 540 */ 541 drm_crtc_vblank_on(crtc); 542 vc4_crtc_update_dlist(crtc); 543 544 /* Turn on the scaler, which will wait for vstart to start 545 * compositing. 546 * When feeding the transposer, we should operate in oneshot 547 * mode. 548 */ 549 HVS_WRITE(SCALER_DISPCTRLX(vc4_crtc->channel), 550 VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) | 551 VC4_SET_FIELD(mode->vdisplay, SCALER_DISPCTRLX_HEIGHT) | 552 SCALER_DISPCTRLX_ENABLE | 553 (vc4_state->feed_txp ? SCALER_DISPCTRLX_ONESHOT : 0)); 554 555 /* When feeding the transposer block the pixelvalve is unneeded and 556 * should not be enabled. 557 */ 558 if (!vc4_state->feed_txp) 559 CRTC_WRITE(PV_V_CONTROL, 560 CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); 561 } 562 563 static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc, 564 const struct drm_display_mode *mode) 565 { 566 /* Do not allow doublescan modes from user space */ 567 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) { 568 DRM_DEBUG_KMS("[CRTC:%d] Doublescan mode rejected.\n", 569 crtc->base.id); 570 return MODE_NO_DBLESCAN; 571 } 572 573 return MODE_OK; 574 } 575 576 void vc4_crtc_get_margins(struct drm_crtc_state *state, 577 unsigned int *left, unsigned int *right, 578 unsigned int *top, unsigned int *bottom) 579 { 580 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); 581 struct drm_connector_state *conn_state; 582 struct drm_connector *conn; 583 int i; 584 585 *left = vc4_state->margins.left; 586 *right = vc4_state->margins.right; 587 *top = vc4_state->margins.top; 588 *bottom = vc4_state->margins.bottom; 589 590 /* We have to interate over all new connector states because 591 * vc4_crtc_get_margins() might be called before 592 * vc4_crtc_atomic_check() which means margins info in vc4_crtc_state 593 * might be outdated. 594 */ 595 for_each_new_connector_in_state(state->state, conn, conn_state, i) { 596 if (conn_state->crtc != state->crtc) 597 continue; 598 599 *left = conn_state->tv.margins.left; 600 *right = conn_state->tv.margins.right; 601 *top = conn_state->tv.margins.top; 602 *bottom = conn_state->tv.margins.bottom; 603 break; 604 } 605 } 606 607 static int vc4_crtc_atomic_check(struct drm_crtc *crtc, 608 struct drm_crtc_state *state) 609 { 610 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); 611 struct drm_device *dev = crtc->dev; 612 struct vc4_dev *vc4 = to_vc4_dev(dev); 613 struct drm_plane *plane; 614 unsigned long flags; 615 const struct drm_plane_state *plane_state; 616 struct drm_connector *conn; 617 struct drm_connector_state *conn_state; 618 u32 dlist_count = 0; 619 int ret, i; 620 621 /* The pixelvalve can only feed one encoder (and encoders are 622 * 1:1 with connectors.) 623 */ 624 if (hweight32(state->connector_mask) > 1) 625 return -EINVAL; 626 627 drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state) 628 dlist_count += vc4_plane_dlist_size(plane_state); 629 630 dlist_count++; /* Account for SCALER_CTL0_END. */ 631 632 spin_lock_irqsave(&vc4->hvs->mm_lock, flags); 633 ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm, 634 dlist_count); 635 spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags); 636 if (ret) 637 return ret; 638 639 for_each_new_connector_in_state(state->state, conn, conn_state, i) { 640 if (conn_state->crtc != crtc) 641 continue; 642 643 /* The writeback connector is implemented using the transposer 644 * block which is directly taking its data from the HVS FIFO. 645 */ 646 if (conn->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) { 647 state->no_vblank = true; 648 vc4_state->feed_txp = true; 649 } else { 650 state->no_vblank = false; 651 vc4_state->feed_txp = false; 652 } 653 654 vc4_state->margins.left = conn_state->tv.margins.left; 655 vc4_state->margins.right = conn_state->tv.margins.right; 656 vc4_state->margins.top = conn_state->tv.margins.top; 657 vc4_state->margins.bottom = conn_state->tv.margins.bottom; 658 break; 659 } 660 661 return 0; 662 } 663 664 static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, 665 struct drm_crtc_state *old_state) 666 { 667 struct drm_device *dev = crtc->dev; 668 struct vc4_dev *vc4 = to_vc4_dev(dev); 669 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 670 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 671 struct drm_plane *plane; 672 struct vc4_plane_state *vc4_plane_state; 673 bool debug_dump_regs = false; 674 bool enable_bg_fill = false; 675 u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start; 676 u32 __iomem *dlist_next = dlist_start; 677 678 if (debug_dump_regs) { 679 DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc)); 680 vc4_hvs_dump_state(dev); 681 } 682 683 /* Copy all the active planes' dlist contents to the hardware dlist. */ 684 drm_atomic_crtc_for_each_plane(plane, crtc) { 685 /* Is this the first active plane? */ 686 if (dlist_next == dlist_start) { 687 /* We need to enable background fill when a plane 688 * could be alpha blending from the background, i.e. 689 * where no other plane is underneath. It suffices to 690 * consider the first active plane here since we set 691 * needs_bg_fill such that either the first plane 692 * already needs it or all planes on top blend from 693 * the first or a lower plane. 694 */ 695 vc4_plane_state = to_vc4_plane_state(plane->state); 696 enable_bg_fill = vc4_plane_state->needs_bg_fill; 697 } 698 699 dlist_next += vc4_plane_write_dlist(plane, dlist_next); 700 } 701 702 writel(SCALER_CTL0_END, dlist_next); 703 dlist_next++; 704 705 WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); 706 707 if (enable_bg_fill) 708 /* This sets a black background color fill, as is the case 709 * with other DRM drivers. 710 */ 711 HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), 712 HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel)) | 713 SCALER_DISPBKGND_FILL); 714 715 /* Only update DISPLIST if the CRTC was already running and is not 716 * being disabled. 717 * vc4_crtc_enable() takes care of updating the dlist just after 718 * re-enabling VBLANK interrupts and before enabling the engine. 719 * If the CRTC is being disabled, there's no point in updating this 720 * information. 721 */ 722 if (crtc->state->active && old_state->active) 723 vc4_crtc_update_dlist(crtc); 724 725 if (crtc->state->color_mgmt_changed) { 726 u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel)); 727 728 if (crtc->state->gamma_lut) { 729 vc4_crtc_update_gamma_lut(crtc); 730 dispbkgndx |= SCALER_DISPBKGND_GAMMA; 731 } else { 732 /* Unsetting DISPBKGND_GAMMA skips the gamma lut step 733 * in hardware, which is the same as a linear lut that 734 * DRM expects us to use in absence of a user lut. 735 */ 736 dispbkgndx &= ~SCALER_DISPBKGND_GAMMA; 737 } 738 HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), dispbkgndx); 739 } 740 741 if (debug_dump_regs) { 742 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); 743 vc4_hvs_dump_state(dev); 744 } 745 } 746 747 static int vc4_enable_vblank(struct drm_crtc *crtc) 748 { 749 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 750 751 CRTC_WRITE(PV_INTEN, PV_INT_VFP_START); 752 753 return 0; 754 } 755 756 static void vc4_disable_vblank(struct drm_crtc *crtc) 757 { 758 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 759 760 CRTC_WRITE(PV_INTEN, 0); 761 } 762 763 static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) 764 { 765 struct drm_crtc *crtc = &vc4_crtc->base; 766 struct drm_device *dev = crtc->dev; 767 struct vc4_dev *vc4 = to_vc4_dev(dev); 768 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 769 u32 chan = vc4_crtc->channel; 770 unsigned long flags; 771 772 spin_lock_irqsave(&dev->event_lock, flags); 773 if (vc4_crtc->event && 774 (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)) || 775 vc4_state->feed_txp)) { 776 drm_crtc_send_vblank_event(crtc, vc4_crtc->event); 777 vc4_crtc->event = NULL; 778 drm_crtc_vblank_put(crtc); 779 780 /* Wait for the page flip to unmask the underrun to ensure that 781 * the display list was updated by the hardware. Before that 782 * happens, the HVS will be using the previous display list with 783 * the CRTC and encoder already reconfigured, leading to 784 * underruns. This can be seen when reconfiguring the CRTC. 785 */ 786 vc4_hvs_unmask_underrun(dev, vc4_crtc->channel); 787 } 788 spin_unlock_irqrestore(&dev->event_lock, flags); 789 } 790 791 void vc4_crtc_handle_vblank(struct vc4_crtc *crtc) 792 { 793 crtc->t_vblank = ktime_get(); 794 drm_crtc_handle_vblank(&crtc->base); 795 vc4_crtc_handle_page_flip(crtc); 796 } 797 798 static irqreturn_t vc4_crtc_irq_handler(int irq, void *data) 799 { 800 struct vc4_crtc *vc4_crtc = data; 801 u32 stat = CRTC_READ(PV_INTSTAT); 802 irqreturn_t ret = IRQ_NONE; 803 804 if (stat & PV_INT_VFP_START) { 805 CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START); 806 vc4_crtc_handle_vblank(vc4_crtc); 807 ret = IRQ_HANDLED; 808 } 809 810 return ret; 811 } 812 813 struct vc4_async_flip_state { 814 struct drm_crtc *crtc; 815 struct drm_framebuffer *fb; 816 struct drm_framebuffer *old_fb; 817 struct drm_pending_vblank_event *event; 818 819 struct vc4_seqno_cb cb; 820 }; 821 822 /* Called when the V3D execution for the BO being flipped to is done, so that 823 * we can actually update the plane's address to point to it. 824 */ 825 static void 826 vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) 827 { 828 struct vc4_async_flip_state *flip_state = 829 container_of(cb, struct vc4_async_flip_state, cb); 830 struct drm_crtc *crtc = flip_state->crtc; 831 struct drm_device *dev = crtc->dev; 832 struct vc4_dev *vc4 = to_vc4_dev(dev); 833 struct drm_plane *plane = crtc->primary; 834 835 vc4_plane_async_set_fb(plane, flip_state->fb); 836 if (flip_state->event) { 837 unsigned long flags; 838 839 spin_lock_irqsave(&dev->event_lock, flags); 840 drm_crtc_send_vblank_event(crtc, flip_state->event); 841 spin_unlock_irqrestore(&dev->event_lock, flags); 842 } 843 844 drm_crtc_vblank_put(crtc); 845 drm_framebuffer_put(flip_state->fb); 846 847 /* Decrement the BO usecnt in order to keep the inc/dec calls balanced 848 * when the planes are updated through the async update path. 849 * FIXME: we should move to generic async-page-flip when it's 850 * available, so that we can get rid of this hand-made cleanup_fb() 851 * logic. 852 */ 853 if (flip_state->old_fb) { 854 struct drm_gem_cma_object *cma_bo; 855 struct vc4_bo *bo; 856 857 cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0); 858 bo = to_vc4_bo(&cma_bo->base); 859 vc4_bo_dec_usecnt(bo); 860 drm_framebuffer_put(flip_state->old_fb); 861 } 862 863 kfree(flip_state); 864 865 up(&vc4->async_modeset); 866 } 867 868 /* Implements async (non-vblank-synced) page flips. 869 * 870 * The page flip ioctl needs to return immediately, so we grab the 871 * modeset semaphore on the pipe, and queue the address update for 872 * when V3D is done with the BO being flipped to. 873 */ 874 static int vc4_async_page_flip(struct drm_crtc *crtc, 875 struct drm_framebuffer *fb, 876 struct drm_pending_vblank_event *event, 877 uint32_t flags) 878 { 879 struct drm_device *dev = crtc->dev; 880 struct vc4_dev *vc4 = to_vc4_dev(dev); 881 struct drm_plane *plane = crtc->primary; 882 int ret = 0; 883 struct vc4_async_flip_state *flip_state; 884 struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0); 885 struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); 886 887 /* Increment the BO usecnt here, so that we never end up with an 888 * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the 889 * plane is later updated through the non-async path. 890 * FIXME: we should move to generic async-page-flip when it's 891 * available, so that we can get rid of this hand-made prepare_fb() 892 * logic. 893 */ 894 ret = vc4_bo_inc_usecnt(bo); 895 if (ret) 896 return ret; 897 898 flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL); 899 if (!flip_state) { 900 vc4_bo_dec_usecnt(bo); 901 return -ENOMEM; 902 } 903 904 drm_framebuffer_get(fb); 905 flip_state->fb = fb; 906 flip_state->crtc = crtc; 907 flip_state->event = event; 908 909 /* Make sure all other async modesetes have landed. */ 910 ret = down_interruptible(&vc4->async_modeset); 911 if (ret) { 912 drm_framebuffer_put(fb); 913 vc4_bo_dec_usecnt(bo); 914 kfree(flip_state); 915 return ret; 916 } 917 918 /* Save the current FB before it's replaced by the new one in 919 * drm_atomic_set_fb_for_plane(). We'll need the old FB in 920 * vc4_async_page_flip_complete() to decrement the BO usecnt and keep 921 * it consistent. 922 * FIXME: we should move to generic async-page-flip when it's 923 * available, so that we can get rid of this hand-made cleanup_fb() 924 * logic. 925 */ 926 flip_state->old_fb = plane->state->fb; 927 if (flip_state->old_fb) 928 drm_framebuffer_get(flip_state->old_fb); 929 930 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 931 932 /* Immediately update the plane's legacy fb pointer, so that later 933 * modeset prep sees the state that will be present when the semaphore 934 * is released. 935 */ 936 drm_atomic_set_fb_for_plane(plane->state, fb); 937 938 vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno, 939 vc4_async_page_flip_complete); 940 941 /* Driver takes ownership of state on successful async commit. */ 942 return 0; 943 } 944 945 static int vc4_page_flip(struct drm_crtc *crtc, 946 struct drm_framebuffer *fb, 947 struct drm_pending_vblank_event *event, 948 uint32_t flags, 949 struct drm_modeset_acquire_ctx *ctx) 950 { 951 if (flags & DRM_MODE_PAGE_FLIP_ASYNC) 952 return vc4_async_page_flip(crtc, fb, event, flags); 953 else 954 return drm_atomic_helper_page_flip(crtc, fb, event, flags, ctx); 955 } 956 957 static struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc) 958 { 959 struct vc4_crtc_state *vc4_state, *old_vc4_state; 960 961 vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL); 962 if (!vc4_state) 963 return NULL; 964 965 old_vc4_state = to_vc4_crtc_state(crtc->state); 966 vc4_state->feed_txp = old_vc4_state->feed_txp; 967 vc4_state->margins = old_vc4_state->margins; 968 969 __drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base); 970 return &vc4_state->base; 971 } 972 973 static void vc4_crtc_destroy_state(struct drm_crtc *crtc, 974 struct drm_crtc_state *state) 975 { 976 struct vc4_dev *vc4 = to_vc4_dev(crtc->dev); 977 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); 978 979 if (drm_mm_node_allocated(&vc4_state->mm)) { 980 unsigned long flags; 981 982 spin_lock_irqsave(&vc4->hvs->mm_lock, flags); 983 drm_mm_remove_node(&vc4_state->mm); 984 spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags); 985 986 } 987 988 drm_atomic_helper_crtc_destroy_state(crtc, state); 989 } 990 991 static void 992 vc4_crtc_reset(struct drm_crtc *crtc) 993 { 994 if (crtc->state) 995 vc4_crtc_destroy_state(crtc, crtc->state); 996 997 crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL); 998 if (crtc->state) 999 crtc->state->crtc = crtc; 1000 } 1001 1002 static const struct drm_crtc_funcs vc4_crtc_funcs = { 1003 .set_config = drm_atomic_helper_set_config, 1004 .destroy = vc4_crtc_destroy, 1005 .page_flip = vc4_page_flip, 1006 .set_property = NULL, 1007 .cursor_set = NULL, /* handled by drm_mode_cursor_universal */ 1008 .cursor_move = NULL, /* handled by drm_mode_cursor_universal */ 1009 .reset = vc4_crtc_reset, 1010 .atomic_duplicate_state = vc4_crtc_duplicate_state, 1011 .atomic_destroy_state = vc4_crtc_destroy_state, 1012 .gamma_set = drm_atomic_helper_legacy_gamma_set, 1013 .enable_vblank = vc4_enable_vblank, 1014 .disable_vblank = vc4_disable_vblank, 1015 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 1016 }; 1017 1018 static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = { 1019 .mode_set_nofb = vc4_crtc_mode_set_nofb, 1020 .mode_valid = vc4_crtc_mode_valid, 1021 .atomic_check = vc4_crtc_atomic_check, 1022 .atomic_flush = vc4_crtc_atomic_flush, 1023 .atomic_enable = vc4_crtc_atomic_enable, 1024 .atomic_disable = vc4_crtc_atomic_disable, 1025 .get_scanout_position = vc4_crtc_get_scanout_position, 1026 }; 1027 1028 static const struct vc4_crtc_data bcm2835_pv0_data = { 1029 .hvs_channel = 0, 1030 .debugfs_name = "crtc0_regs", 1031 .encoder_types = { 1032 [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0, 1033 [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI, 1034 }, 1035 }; 1036 1037 static const struct vc4_crtc_data bcm2835_pv1_data = { 1038 .hvs_channel = 2, 1039 .debugfs_name = "crtc1_regs", 1040 .encoder_types = { 1041 [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1, 1042 [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI, 1043 }, 1044 }; 1045 1046 static const struct vc4_crtc_data bcm2835_pv2_data = { 1047 .hvs_channel = 1, 1048 .debugfs_name = "crtc2_regs", 1049 .encoder_types = { 1050 [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI, 1051 [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC, 1052 }, 1053 }; 1054 1055 static const struct of_device_id vc4_crtc_dt_match[] = { 1056 { .compatible = "brcm,bcm2835-pixelvalve0", .data = &bcm2835_pv0_data }, 1057 { .compatible = "brcm,bcm2835-pixelvalve1", .data = &bcm2835_pv1_data }, 1058 { .compatible = "brcm,bcm2835-pixelvalve2", .data = &bcm2835_pv2_data }, 1059 {} 1060 }; 1061 1062 static void vc4_set_crtc_possible_masks(struct drm_device *drm, 1063 struct drm_crtc *crtc) 1064 { 1065 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 1066 const struct vc4_crtc_data *crtc_data = vc4_crtc->data; 1067 const enum vc4_encoder_type *encoder_types = crtc_data->encoder_types; 1068 struct drm_encoder *encoder; 1069 1070 drm_for_each_encoder(encoder, drm) { 1071 struct vc4_encoder *vc4_encoder; 1072 int i; 1073 1074 /* HVS FIFO2 can feed the TXP IP. */ 1075 if (crtc_data->hvs_channel == 2 && 1076 encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) { 1077 encoder->possible_crtcs |= drm_crtc_mask(crtc); 1078 continue; 1079 } 1080 1081 vc4_encoder = to_vc4_encoder(encoder); 1082 for (i = 0; i < ARRAY_SIZE(crtc_data->encoder_types); i++) { 1083 if (vc4_encoder->type == encoder_types[i]) { 1084 vc4_encoder->clock_select = i; 1085 encoder->possible_crtcs |= drm_crtc_mask(crtc); 1086 break; 1087 } 1088 } 1089 } 1090 } 1091 1092 static void 1093 vc4_crtc_get_cob_allocation(struct vc4_crtc *vc4_crtc) 1094 { 1095 struct drm_device *drm = vc4_crtc->base.dev; 1096 struct vc4_dev *vc4 = to_vc4_dev(drm); 1097 u32 dispbase = HVS_READ(SCALER_DISPBASEX(vc4_crtc->channel)); 1098 /* Top/base are supposed to be 4-pixel aligned, but the 1099 * Raspberry Pi firmware fills the low bits (which are 1100 * presumably ignored). 1101 */ 1102 u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3; 1103 u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3; 1104 1105 vc4_crtc->cob_size = top - base + 4; 1106 } 1107 1108 static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) 1109 { 1110 struct platform_device *pdev = to_platform_device(dev); 1111 struct drm_device *drm = dev_get_drvdata(master); 1112 const struct vc4_crtc_data *pv_data; 1113 struct vc4_crtc *vc4_crtc; 1114 struct drm_crtc *crtc; 1115 struct drm_plane *primary_plane, *destroy_plane, *temp; 1116 int ret, i; 1117 1118 vc4_crtc = devm_kzalloc(dev, sizeof(*vc4_crtc), GFP_KERNEL); 1119 if (!vc4_crtc) 1120 return -ENOMEM; 1121 crtc = &vc4_crtc->base; 1122 1123 pv_data = of_device_get_match_data(dev); 1124 if (!pv_data) 1125 return -ENODEV; 1126 vc4_crtc->data = pv_data; 1127 vc4_crtc->pdev = pdev; 1128 1129 vc4_crtc->regs = vc4_ioremap_regs(pdev, 0); 1130 if (IS_ERR(vc4_crtc->regs)) 1131 return PTR_ERR(vc4_crtc->regs); 1132 1133 vc4_crtc->regset.base = vc4_crtc->regs; 1134 vc4_crtc->regset.regs = crtc_regs; 1135 vc4_crtc->regset.nregs = ARRAY_SIZE(crtc_regs); 1136 1137 /* For now, we create just the primary and the legacy cursor 1138 * planes. We should be able to stack more planes on easily, 1139 * but to do that we would need to compute the bandwidth 1140 * requirement of the plane configuration, and reject ones 1141 * that will take too much. 1142 */ 1143 primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY); 1144 if (IS_ERR(primary_plane)) { 1145 dev_err(dev, "failed to construct primary plane\n"); 1146 ret = PTR_ERR(primary_plane); 1147 goto err; 1148 } 1149 1150 drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, 1151 &vc4_crtc_funcs, NULL); 1152 drm_crtc_helper_add(crtc, &vc4_crtc_helper_funcs); 1153 vc4_crtc->channel = vc4_crtc->data->hvs_channel; 1154 drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r)); 1155 drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size); 1156 1157 /* We support CTM, but only for one CRTC at a time. It's therefore 1158 * implemented as private driver state in vc4_kms, not here. 1159 */ 1160 drm_crtc_enable_color_mgmt(crtc, 0, true, crtc->gamma_size); 1161 1162 vc4_crtc_get_cob_allocation(vc4_crtc); 1163 1164 CRTC_WRITE(PV_INTEN, 0); 1165 CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START); 1166 ret = devm_request_irq(dev, platform_get_irq(pdev, 0), 1167 vc4_crtc_irq_handler, 0, "vc4 crtc", vc4_crtc); 1168 if (ret) 1169 goto err_destroy_planes; 1170 1171 vc4_set_crtc_possible_masks(drm, crtc); 1172 1173 for (i = 0; i < crtc->gamma_size; i++) { 1174 vc4_crtc->lut_r[i] = i; 1175 vc4_crtc->lut_g[i] = i; 1176 vc4_crtc->lut_b[i] = i; 1177 } 1178 1179 platform_set_drvdata(pdev, vc4_crtc); 1180 1181 vc4_debugfs_add_regset32(drm, pv_data->debugfs_name, 1182 &vc4_crtc->regset); 1183 1184 return 0; 1185 1186 err_destroy_planes: 1187 list_for_each_entry_safe(destroy_plane, temp, 1188 &drm->mode_config.plane_list, head) { 1189 if (destroy_plane->possible_crtcs == drm_crtc_mask(crtc)) 1190 destroy_plane->funcs->destroy(destroy_plane); 1191 } 1192 err: 1193 return ret; 1194 } 1195 1196 static void vc4_crtc_unbind(struct device *dev, struct device *master, 1197 void *data) 1198 { 1199 struct platform_device *pdev = to_platform_device(dev); 1200 struct vc4_crtc *vc4_crtc = dev_get_drvdata(dev); 1201 1202 vc4_crtc_destroy(&vc4_crtc->base); 1203 1204 CRTC_WRITE(PV_INTEN, 0); 1205 1206 platform_set_drvdata(pdev, NULL); 1207 } 1208 1209 static const struct component_ops vc4_crtc_ops = { 1210 .bind = vc4_crtc_bind, 1211 .unbind = vc4_crtc_unbind, 1212 }; 1213 1214 static int vc4_crtc_dev_probe(struct platform_device *pdev) 1215 { 1216 return component_add(&pdev->dev, &vc4_crtc_ops); 1217 } 1218 1219 static int vc4_crtc_dev_remove(struct platform_device *pdev) 1220 { 1221 component_del(&pdev->dev, &vc4_crtc_ops); 1222 return 0; 1223 } 1224 1225 struct platform_driver vc4_crtc_driver = { 1226 .probe = vc4_crtc_dev_probe, 1227 .remove = vc4_crtc_dev_remove, 1228 .driver = { 1229 .name = "vc4_crtc", 1230 .of_match_table = vc4_crtc_dt_match, 1231 }, 1232 }; 1233