1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 Broadcom 4 */ 5 6 /** 7 * DOC: VC4 HVS module. 8 * 9 * The Hardware Video Scaler (HVS) is the piece of hardware that does 10 * translation, scaling, colorspace conversion, and compositing of 11 * pixels stored in framebuffers into a FIFO of pixels going out to 12 * the Pixel Valve (CRTC). It operates at the system clock rate (the 13 * system audio clock gate, specifically), which is much higher than 14 * the pixel clock rate. 15 * 16 * There is a single global HVS, with multiple output FIFOs that can 17 * be consumed by the PVs. This file just manages the resources for 18 * the HVS, while the vc4_crtc.c code actually drives HVS setup for 19 * each CRTC. 20 */ 21 22 #include <linux/component.h> 23 #include <linux/platform_device.h> 24 25 #include <drm/drm_atomic_helper.h> 26 #include <drm/drm_vblank.h> 27 28 #include "vc4_drv.h" 29 #include "vc4_regs.h" 30 31 static const struct debugfs_reg32 hvs_regs[] = { 32 VC4_REG32(SCALER_DISPCTRL), 33 VC4_REG32(SCALER_DISPSTAT), 34 VC4_REG32(SCALER_DISPID), 35 VC4_REG32(SCALER_DISPECTRL), 36 VC4_REG32(SCALER_DISPPROF), 37 VC4_REG32(SCALER_DISPDITHER), 38 VC4_REG32(SCALER_DISPEOLN), 39 VC4_REG32(SCALER_DISPLIST0), 40 VC4_REG32(SCALER_DISPLIST1), 41 VC4_REG32(SCALER_DISPLIST2), 42 VC4_REG32(SCALER_DISPLSTAT), 43 VC4_REG32(SCALER_DISPLACT0), 44 VC4_REG32(SCALER_DISPLACT1), 45 VC4_REG32(SCALER_DISPLACT2), 46 VC4_REG32(SCALER_DISPCTRL0), 47 VC4_REG32(SCALER_DISPBKGND0), 48 VC4_REG32(SCALER_DISPSTAT0), 49 VC4_REG32(SCALER_DISPBASE0), 50 VC4_REG32(SCALER_DISPCTRL1), 51 VC4_REG32(SCALER_DISPBKGND1), 52 VC4_REG32(SCALER_DISPSTAT1), 53 VC4_REG32(SCALER_DISPBASE1), 54 VC4_REG32(SCALER_DISPCTRL2), 55 VC4_REG32(SCALER_DISPBKGND2), 56 VC4_REG32(SCALER_DISPSTAT2), 57 VC4_REG32(SCALER_DISPBASE2), 58 VC4_REG32(SCALER_DISPALPHA2), 59 VC4_REG32(SCALER_OLEDOFFS), 60 VC4_REG32(SCALER_OLEDCOEF0), 61 VC4_REG32(SCALER_OLEDCOEF1), 62 VC4_REG32(SCALER_OLEDCOEF2), 63 }; 64 65 void vc4_hvs_dump_state(struct drm_device *dev) 66 { 67 struct vc4_dev *vc4 = to_vc4_dev(dev); 68 struct drm_printer p = drm_info_printer(&vc4->hvs->pdev->dev); 69 int i; 70 71 drm_print_regset32(&p, &vc4->hvs->regset); 72 73 DRM_INFO("HVS ctx:\n"); 74 for (i = 0; i < 64; i += 4) { 75 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n", 76 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D", 77 readl((u32 __iomem *)vc4->hvs->dlist + i + 0), 78 readl((u32 __iomem *)vc4->hvs->dlist + i + 1), 79 readl((u32 __iomem *)vc4->hvs->dlist + i + 2), 80 readl((u32 __iomem *)vc4->hvs->dlist + i + 3)); 81 } 82 } 83 84 static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data) 85 { 86 struct drm_info_node *node = m->private; 87 struct drm_device *dev = node->minor->dev; 88 struct vc4_dev *vc4 = to_vc4_dev(dev); 89 struct drm_printer p = drm_seq_file_printer(m); 90 91 drm_printf(&p, "%d\n", atomic_read(&vc4->underrun)); 92 93 return 0; 94 } 95 96 /* The filter kernel is composed of dwords each containing 3 9-bit 97 * signed integers packed next to each other. 98 */ 99 #define VC4_INT_TO_COEFF(coeff) (coeff & 0x1ff) 100 #define VC4_PPF_FILTER_WORD(c0, c1, c2) \ 101 ((((c0) & 0x1ff) << 0) | \ 102 (((c1) & 0x1ff) << 9) | \ 103 (((c2) & 0x1ff) << 18)) 104 105 /* The whole filter kernel is arranged as the coefficients 0-16 going 106 * up, then a pad, then 17-31 going down and reversed within the 107 * dwords. This means that a linear phase kernel (where it's 108 * symmetrical at the boundary between 15 and 16) has the last 5 109 * dwords matching the first 5, but reversed. 110 */ 111 #define VC4_LINEAR_PHASE_KERNEL(c0, c1, c2, c3, c4, c5, c6, c7, c8, \ 112 c9, c10, c11, c12, c13, c14, c15) \ 113 {VC4_PPF_FILTER_WORD(c0, c1, c2), \ 114 VC4_PPF_FILTER_WORD(c3, c4, c5), \ 115 VC4_PPF_FILTER_WORD(c6, c7, c8), \ 116 VC4_PPF_FILTER_WORD(c9, c10, c11), \ 117 VC4_PPF_FILTER_WORD(c12, c13, c14), \ 118 VC4_PPF_FILTER_WORD(c15, c15, 0)} 119 120 #define VC4_LINEAR_PHASE_KERNEL_DWORDS 6 121 #define VC4_KERNEL_DWORDS (VC4_LINEAR_PHASE_KERNEL_DWORDS * 2 - 1) 122 123 /* Recommended B=1/3, C=1/3 filter choice from Mitchell/Netravali. 124 * http://www.cs.utexas.edu/~fussell/courses/cs384g/lectures/mitchell/Mitchell.pdf 125 */ 126 static const u32 mitchell_netravali_1_3_1_3_kernel[] = 127 VC4_LINEAR_PHASE_KERNEL(0, -2, -6, -8, -10, -8, -3, 2, 18, 128 50, 82, 119, 155, 187, 213, 227); 129 130 static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs, 131 struct drm_mm_node *space, 132 const u32 *kernel) 133 { 134 int ret, i; 135 u32 __iomem *dst_kernel; 136 137 ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS); 138 if (ret) { 139 DRM_ERROR("Failed to allocate space for filter kernel: %d\n", 140 ret); 141 return ret; 142 } 143 144 dst_kernel = hvs->dlist + space->start; 145 146 for (i = 0; i < VC4_KERNEL_DWORDS; i++) { 147 if (i < VC4_LINEAR_PHASE_KERNEL_DWORDS) 148 writel(kernel[i], &dst_kernel[i]); 149 else { 150 writel(kernel[VC4_KERNEL_DWORDS - i - 1], 151 &dst_kernel[i]); 152 } 153 } 154 155 return 0; 156 } 157 158 static void vc4_hvs_lut_load(struct drm_crtc *crtc) 159 { 160 struct drm_device *dev = crtc->dev; 161 struct vc4_dev *vc4 = to_vc4_dev(dev); 162 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 163 u32 i; 164 165 /* The LUT memory is laid out with each HVS channel in order, 166 * each of which takes 256 writes for R, 256 for G, then 256 167 * for B. 168 */ 169 HVS_WRITE(SCALER_GAMADDR, 170 SCALER_GAMADDR_AUTOINC | 171 (vc4_crtc->channel * 3 * crtc->gamma_size)); 172 173 for (i = 0; i < crtc->gamma_size; i++) 174 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_r[i]); 175 for (i = 0; i < crtc->gamma_size; i++) 176 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_g[i]); 177 for (i = 0; i < crtc->gamma_size; i++) 178 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]); 179 } 180 181 static void vc4_hvs_update_gamma_lut(struct drm_crtc *crtc) 182 { 183 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 184 struct drm_color_lut *lut = crtc->state->gamma_lut->data; 185 u32 length = drm_color_lut_size(crtc->state->gamma_lut); 186 u32 i; 187 188 for (i = 0; i < length; i++) { 189 vc4_crtc->lut_r[i] = drm_color_lut_extract(lut[i].red, 8); 190 vc4_crtc->lut_g[i] = drm_color_lut_extract(lut[i].green, 8); 191 vc4_crtc->lut_b[i] = drm_color_lut_extract(lut[i].blue, 8); 192 } 193 194 vc4_hvs_lut_load(crtc); 195 } 196 197 int vc4_hvs_atomic_check(struct drm_crtc *crtc, 198 struct drm_crtc_state *state) 199 { 200 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); 201 struct drm_device *dev = crtc->dev; 202 struct vc4_dev *vc4 = to_vc4_dev(dev); 203 struct drm_plane *plane; 204 unsigned long flags; 205 const struct drm_plane_state *plane_state; 206 u32 dlist_count = 0; 207 int ret; 208 209 /* The pixelvalve can only feed one encoder (and encoders are 210 * 1:1 with connectors.) 211 */ 212 if (hweight32(state->connector_mask) > 1) 213 return -EINVAL; 214 215 drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state) 216 dlist_count += vc4_plane_dlist_size(plane_state); 217 218 dlist_count++; /* Account for SCALER_CTL0_END. */ 219 220 spin_lock_irqsave(&vc4->hvs->mm_lock, flags); 221 ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm, 222 dlist_count); 223 spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags); 224 if (ret) 225 return ret; 226 227 return 0; 228 } 229 230 static void vc4_hvs_update_dlist(struct drm_crtc *crtc) 231 { 232 struct drm_device *dev = crtc->dev; 233 struct vc4_dev *vc4 = to_vc4_dev(dev); 234 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 235 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 236 237 if (crtc->state->event) { 238 unsigned long flags; 239 240 crtc->state->event->pipe = drm_crtc_index(crtc); 241 242 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 243 244 spin_lock_irqsave(&dev->event_lock, flags); 245 246 if (!vc4_state->feed_txp || vc4_state->txp_armed) { 247 vc4_crtc->event = crtc->state->event; 248 crtc->state->event = NULL; 249 } 250 251 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 252 vc4_state->mm.start); 253 254 spin_unlock_irqrestore(&dev->event_lock, flags); 255 } else { 256 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 257 vc4_state->mm.start); 258 } 259 } 260 261 void vc4_hvs_atomic_enable(struct drm_crtc *crtc, 262 struct drm_crtc_state *old_state) 263 { 264 struct drm_device *dev = crtc->dev; 265 struct vc4_dev *vc4 = to_vc4_dev(dev); 266 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 267 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 268 struct drm_display_mode *mode = &crtc->state->adjusted_mode; 269 bool oneshot = vc4_state->feed_txp; 270 u32 dispctrl; 271 272 vc4_hvs_update_dlist(crtc); 273 274 /* Turn on the scaler, which will wait for vstart to start 275 * compositing. 276 * When feeding the transposer, we should operate in oneshot 277 * mode. 278 */ 279 dispctrl = SCALER_DISPCTRLX_ENABLE; 280 dispctrl |= VC4_SET_FIELD(mode->hdisplay, 281 SCALER_DISPCTRLX_WIDTH) | 282 VC4_SET_FIELD(mode->vdisplay, 283 SCALER_DISPCTRLX_HEIGHT) | 284 (oneshot ? SCALER_DISPCTRLX_ONESHOT : 0); 285 286 HVS_WRITE(SCALER_DISPCTRLX(vc4_crtc->channel), dispctrl); 287 } 288 289 void vc4_hvs_atomic_disable(struct drm_crtc *crtc, 290 struct drm_crtc_state *old_state) 291 { 292 struct drm_device *dev = crtc->dev; 293 struct vc4_dev *vc4 = to_vc4_dev(dev); 294 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 295 u32 chan = vc4_crtc->channel; 296 297 if (HVS_READ(SCALER_DISPCTRLX(chan)) & 298 SCALER_DISPCTRLX_ENABLE) { 299 HVS_WRITE(SCALER_DISPCTRLX(chan), 300 SCALER_DISPCTRLX_RESET); 301 302 /* While the docs say that reset is self-clearing, it 303 * seems it doesn't actually. 304 */ 305 HVS_WRITE(SCALER_DISPCTRLX(chan), 0); 306 } 307 308 /* Once we leave, the scaler should be disabled and its fifo empty. */ 309 310 WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET); 311 312 WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)), 313 SCALER_DISPSTATX_MODE) != 314 SCALER_DISPSTATX_MODE_DISABLED); 315 316 WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) & 317 (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) != 318 SCALER_DISPSTATX_EMPTY); 319 } 320 321 void vc4_hvs_atomic_flush(struct drm_crtc *crtc, 322 struct drm_crtc_state *old_state) 323 { 324 struct drm_device *dev = crtc->dev; 325 struct vc4_dev *vc4 = to_vc4_dev(dev); 326 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 327 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 328 struct drm_plane *plane; 329 struct vc4_plane_state *vc4_plane_state; 330 bool debug_dump_regs = false; 331 bool enable_bg_fill = false; 332 u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start; 333 u32 __iomem *dlist_next = dlist_start; 334 335 if (debug_dump_regs) { 336 DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc)); 337 vc4_hvs_dump_state(dev); 338 } 339 340 /* Copy all the active planes' dlist contents to the hardware dlist. */ 341 drm_atomic_crtc_for_each_plane(plane, crtc) { 342 /* Is this the first active plane? */ 343 if (dlist_next == dlist_start) { 344 /* We need to enable background fill when a plane 345 * could be alpha blending from the background, i.e. 346 * where no other plane is underneath. It suffices to 347 * consider the first active plane here since we set 348 * needs_bg_fill such that either the first plane 349 * already needs it or all planes on top blend from 350 * the first or a lower plane. 351 */ 352 vc4_plane_state = to_vc4_plane_state(plane->state); 353 enable_bg_fill = vc4_plane_state->needs_bg_fill; 354 } 355 356 dlist_next += vc4_plane_write_dlist(plane, dlist_next); 357 } 358 359 writel(SCALER_CTL0_END, dlist_next); 360 dlist_next++; 361 362 WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); 363 364 if (enable_bg_fill) 365 /* This sets a black background color fill, as is the case 366 * with other DRM drivers. 367 */ 368 HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), 369 HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel)) | 370 SCALER_DISPBKGND_FILL); 371 372 /* Only update DISPLIST if the CRTC was already running and is not 373 * being disabled. 374 * vc4_crtc_enable() takes care of updating the dlist just after 375 * re-enabling VBLANK interrupts and before enabling the engine. 376 * If the CRTC is being disabled, there's no point in updating this 377 * information. 378 */ 379 if (crtc->state->active && old_state->active) 380 vc4_hvs_update_dlist(crtc); 381 382 if (crtc->state->color_mgmt_changed) { 383 u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel)); 384 385 if (crtc->state->gamma_lut) { 386 vc4_hvs_update_gamma_lut(crtc); 387 dispbkgndx |= SCALER_DISPBKGND_GAMMA; 388 } else { 389 /* Unsetting DISPBKGND_GAMMA skips the gamma lut step 390 * in hardware, which is the same as a linear lut that 391 * DRM expects us to use in absence of a user lut. 392 */ 393 dispbkgndx &= ~SCALER_DISPBKGND_GAMMA; 394 } 395 HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), dispbkgndx); 396 } 397 398 if (debug_dump_regs) { 399 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); 400 vc4_hvs_dump_state(dev); 401 } 402 } 403 404 void vc4_hvs_mode_set_nofb(struct drm_crtc *crtc) 405 { 406 struct drm_device *dev = crtc->dev; 407 struct vc4_dev *vc4 = to_vc4_dev(dev); 408 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 409 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 410 struct drm_display_mode *mode = &crtc->state->adjusted_mode; 411 bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE; 412 413 if (vc4_crtc->data->hvs_channel == 2) { 414 u32 dispctrl; 415 u32 dsp3_mux; 416 417 /* 418 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to 419 * FIFO X'. 420 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. 421 * 422 * DSP3 is connected to FIFO2 unless the transposer is 423 * enabled. In this case, FIFO 2 is directly accessed by the 424 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 425 * route. 426 */ 427 if (vc4_state->feed_txp) 428 dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); 429 else 430 dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); 431 432 dispctrl = HVS_READ(SCALER_DISPCTRL) & 433 ~SCALER_DISPCTRL_DSP3_MUX_MASK; 434 HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); 435 } 436 437 HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel), 438 SCALER_DISPBKGND_AUTOHS | 439 SCALER_DISPBKGND_GAMMA | 440 (interlace ? SCALER_DISPBKGND_INTERLACE : 0)); 441 442 /* Reload the LUT, since the SRAMs would have been disabled if 443 * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once. 444 */ 445 vc4_hvs_lut_load(crtc); 446 } 447 448 void vc4_hvs_mask_underrun(struct drm_device *dev, int channel) 449 { 450 struct vc4_dev *vc4 = to_vc4_dev(dev); 451 u32 dispctrl = HVS_READ(SCALER_DISPCTRL); 452 453 dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel); 454 455 HVS_WRITE(SCALER_DISPCTRL, dispctrl); 456 } 457 458 void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel) 459 { 460 struct vc4_dev *vc4 = to_vc4_dev(dev); 461 u32 dispctrl = HVS_READ(SCALER_DISPCTRL); 462 463 dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel); 464 465 HVS_WRITE(SCALER_DISPSTAT, 466 SCALER_DISPSTAT_EUFLOW(channel)); 467 HVS_WRITE(SCALER_DISPCTRL, dispctrl); 468 } 469 470 static void vc4_hvs_report_underrun(struct drm_device *dev) 471 { 472 struct vc4_dev *vc4 = to_vc4_dev(dev); 473 474 atomic_inc(&vc4->underrun); 475 DRM_DEV_ERROR(dev->dev, "HVS underrun\n"); 476 } 477 478 static irqreturn_t vc4_hvs_irq_handler(int irq, void *data) 479 { 480 struct drm_device *dev = data; 481 struct vc4_dev *vc4 = to_vc4_dev(dev); 482 irqreturn_t irqret = IRQ_NONE; 483 int channel; 484 u32 control; 485 u32 status; 486 487 status = HVS_READ(SCALER_DISPSTAT); 488 control = HVS_READ(SCALER_DISPCTRL); 489 490 for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) { 491 /* Interrupt masking is not always honored, so check it here. */ 492 if (status & SCALER_DISPSTAT_EUFLOW(channel) && 493 control & SCALER_DISPCTRL_DSPEISLUR(channel)) { 494 vc4_hvs_mask_underrun(dev, channel); 495 vc4_hvs_report_underrun(dev); 496 497 irqret = IRQ_HANDLED; 498 } 499 } 500 501 /* Clear every per-channel interrupt flag. */ 502 HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_IRQMASK(0) | 503 SCALER_DISPSTAT_IRQMASK(1) | 504 SCALER_DISPSTAT_IRQMASK(2)); 505 506 return irqret; 507 } 508 509 static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) 510 { 511 struct platform_device *pdev = to_platform_device(dev); 512 struct drm_device *drm = dev_get_drvdata(master); 513 struct vc4_dev *vc4 = drm->dev_private; 514 struct vc4_hvs *hvs = NULL; 515 int ret; 516 u32 dispctrl; 517 518 hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL); 519 if (!hvs) 520 return -ENOMEM; 521 522 hvs->pdev = pdev; 523 524 hvs->regs = vc4_ioremap_regs(pdev, 0); 525 if (IS_ERR(hvs->regs)) 526 return PTR_ERR(hvs->regs); 527 528 hvs->regset.base = hvs->regs; 529 hvs->regset.regs = hvs_regs; 530 hvs->regset.nregs = ARRAY_SIZE(hvs_regs); 531 532 hvs->dlist = hvs->regs + SCALER_DLIST_START; 533 534 spin_lock_init(&hvs->mm_lock); 535 536 /* Set up the HVS display list memory manager. We never 537 * overwrite the setup from the bootloader (just 128b out of 538 * our 16K), since we don't want to scramble the screen when 539 * transitioning from the firmware's boot setup to runtime. 540 */ 541 drm_mm_init(&hvs->dlist_mm, 542 HVS_BOOTLOADER_DLIST_END, 543 (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END); 544 545 /* Set up the HVS LBM memory manager. We could have some more 546 * complicated data structure that allowed reuse of LBM areas 547 * between planes when they don't overlap on the screen, but 548 * for now we just allocate globally. 549 */ 550 drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024); 551 552 /* Upload filter kernels. We only have the one for now, so we 553 * keep it around for the lifetime of the driver. 554 */ 555 ret = vc4_hvs_upload_linear_kernel(hvs, 556 &hvs->mitchell_netravali_filter, 557 mitchell_netravali_1_3_1_3_kernel); 558 if (ret) 559 return ret; 560 561 vc4->hvs = hvs; 562 563 dispctrl = HVS_READ(SCALER_DISPCTRL); 564 565 dispctrl |= SCALER_DISPCTRL_ENABLE; 566 dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) | 567 SCALER_DISPCTRL_DISPEIRQ(1) | 568 SCALER_DISPCTRL_DISPEIRQ(2); 569 570 /* Set DSP3 (PV1) to use HVS channel 2, which would otherwise 571 * be unused. 572 */ 573 dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK; 574 dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ | 575 SCALER_DISPCTRL_SLVWREIRQ | 576 SCALER_DISPCTRL_SLVRDEIRQ | 577 SCALER_DISPCTRL_DSPEIEOF(0) | 578 SCALER_DISPCTRL_DSPEIEOF(1) | 579 SCALER_DISPCTRL_DSPEIEOF(2) | 580 SCALER_DISPCTRL_DSPEIEOLN(0) | 581 SCALER_DISPCTRL_DSPEIEOLN(1) | 582 SCALER_DISPCTRL_DSPEIEOLN(2) | 583 SCALER_DISPCTRL_DSPEISLUR(0) | 584 SCALER_DISPCTRL_DSPEISLUR(1) | 585 SCALER_DISPCTRL_DSPEISLUR(2) | 586 SCALER_DISPCTRL_SCLEIRQ); 587 dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); 588 589 HVS_WRITE(SCALER_DISPCTRL, dispctrl); 590 591 ret = devm_request_irq(dev, platform_get_irq(pdev, 0), 592 vc4_hvs_irq_handler, 0, "vc4 hvs", drm); 593 if (ret) 594 return ret; 595 596 vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset); 597 vc4_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun, 598 NULL); 599 600 return 0; 601 } 602 603 static void vc4_hvs_unbind(struct device *dev, struct device *master, 604 void *data) 605 { 606 struct drm_device *drm = dev_get_drvdata(master); 607 struct vc4_dev *vc4 = drm->dev_private; 608 609 if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter)) 610 drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter); 611 612 drm_mm_takedown(&vc4->hvs->dlist_mm); 613 drm_mm_takedown(&vc4->hvs->lbm_mm); 614 615 vc4->hvs = NULL; 616 } 617 618 static const struct component_ops vc4_hvs_ops = { 619 .bind = vc4_hvs_bind, 620 .unbind = vc4_hvs_unbind, 621 }; 622 623 static int vc4_hvs_dev_probe(struct platform_device *pdev) 624 { 625 return component_add(&pdev->dev, &vc4_hvs_ops); 626 } 627 628 static int vc4_hvs_dev_remove(struct platform_device *pdev) 629 { 630 component_del(&pdev->dev, &vc4_hvs_ops); 631 return 0; 632 } 633 634 static const struct of_device_id vc4_hvs_dt_match[] = { 635 { .compatible = "brcm,bcm2835-hvs" }, 636 {} 637 }; 638 639 struct platform_driver vc4_hvs_driver = { 640 .probe = vc4_hvs_dev_probe, 641 .remove = vc4_hvs_dev_remove, 642 .driver = { 643 .name = "vc4_hvs", 644 .of_match_table = vc4_hvs_dt_match, 645 }, 646 }; 647