1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 Broadcom 4 */ 5 6 /** 7 * DOC: VC4 plane module 8 * 9 * Each DRM plane is a layer of pixels being scanned out by the HVS. 10 * 11 * At atomic modeset check time, we compute the HVS display element 12 * state that would be necessary for displaying the plane (giving us a 13 * chance to figure out if a plane configuration is invalid), then at 14 * atomic flush time the CRTC will ask us to write our element state 15 * into the region of the HVS that it has allocated for us. 16 */ 17 18 #include <drm/drm_atomic.h> 19 #include <drm/drm_atomic_helper.h> 20 #include <drm/drm_fb_cma_helper.h> 21 #include <drm/drm_plane_helper.h> 22 #include <drm/drm_atomic_uapi.h> 23 24 #include "uapi/drm/vc4_drm.h" 25 #include "vc4_drv.h" 26 #include "vc4_regs.h" 27 28 static const struct hvs_format { 29 u32 drm; /* DRM_FORMAT_* */ 30 u32 hvs; /* HVS_FORMAT_* */ 31 u32 pixel_order; 32 } hvs_formats[] = { 33 { 34 .drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, 35 .pixel_order = HVS_PIXEL_ORDER_ABGR, 36 }, 37 { 38 .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, 39 .pixel_order = HVS_PIXEL_ORDER_ABGR, 40 }, 41 { 42 .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, 43 .pixel_order = HVS_PIXEL_ORDER_ARGB, 44 }, 45 { 46 .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, 47 .pixel_order = HVS_PIXEL_ORDER_ARGB, 48 }, 49 { 50 .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565, 51 .pixel_order = HVS_PIXEL_ORDER_XRGB, 52 }, 53 { 54 .drm = DRM_FORMAT_BGR565, .hvs = HVS_PIXEL_FORMAT_RGB565, 55 .pixel_order = HVS_PIXEL_ORDER_XBGR, 56 }, 57 { 58 .drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, 59 .pixel_order = HVS_PIXEL_ORDER_ABGR, 60 }, 61 { 62 .drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, 63 .pixel_order = HVS_PIXEL_ORDER_ABGR, 64 }, 65 { 66 .drm = DRM_FORMAT_RGB888, .hvs = HVS_PIXEL_FORMAT_RGB888, 67 .pixel_order = HVS_PIXEL_ORDER_XRGB, 68 }, 69 { 70 .drm = DRM_FORMAT_BGR888, .hvs = HVS_PIXEL_FORMAT_RGB888, 71 .pixel_order = HVS_PIXEL_ORDER_XBGR, 72 }, 73 { 74 .drm = DRM_FORMAT_YUV422, 75 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, 76 .pixel_order = HVS_PIXEL_ORDER_XYCBCR, 77 }, 78 { 79 .drm = DRM_FORMAT_YVU422, 80 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, 81 .pixel_order = HVS_PIXEL_ORDER_XYCRCB, 82 }, 83 { 84 .drm = DRM_FORMAT_YUV420, 85 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE, 86 .pixel_order = HVS_PIXEL_ORDER_XYCBCR, 87 }, 88 { 89 .drm = DRM_FORMAT_YVU420, 90 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE, 91 .pixel_order = HVS_PIXEL_ORDER_XYCRCB, 92 }, 93 { 94 .drm = DRM_FORMAT_NV12, 95 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE, 96 .pixel_order = HVS_PIXEL_ORDER_XYCBCR, 97 }, 98 { 99 .drm = DRM_FORMAT_NV21, 100 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE, 101 .pixel_order = HVS_PIXEL_ORDER_XYCRCB, 102 }, 103 { 104 .drm = DRM_FORMAT_NV16, 105 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE, 106 .pixel_order = HVS_PIXEL_ORDER_XYCBCR, 107 }, 108 { 109 .drm = DRM_FORMAT_NV61, 110 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE, 111 .pixel_order = HVS_PIXEL_ORDER_XYCRCB, 112 }, 113 }; 114 115 static const struct hvs_format *vc4_get_hvs_format(u32 drm_format) 116 { 117 unsigned i; 118 119 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { 120 if (hvs_formats[i].drm == drm_format) 121 return &hvs_formats[i]; 122 } 123 124 return NULL; 125 } 126 127 static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst) 128 { 129 if (dst == src) 130 return VC4_SCALING_NONE; 131 if (3 * dst >= 2 * src) 132 return VC4_SCALING_PPF; 133 else 134 return VC4_SCALING_TPZ; 135 } 136 137 static bool plane_enabled(struct drm_plane_state *state) 138 { 139 return state->fb && state->crtc; 140 } 141 142 static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) 143 { 144 struct vc4_plane_state *vc4_state; 145 146 if (WARN_ON(!plane->state)) 147 return NULL; 148 149 vc4_state = kmemdup(plane->state, sizeof(*vc4_state), GFP_KERNEL); 150 if (!vc4_state) 151 return NULL; 152 153 memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm)); 154 vc4_state->dlist_initialized = 0; 155 156 __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base); 157 158 if (vc4_state->dlist) { 159 vc4_state->dlist = kmemdup(vc4_state->dlist, 160 vc4_state->dlist_count * 4, 161 GFP_KERNEL); 162 if (!vc4_state->dlist) { 163 kfree(vc4_state); 164 return NULL; 165 } 166 vc4_state->dlist_size = vc4_state->dlist_count; 167 } 168 169 return &vc4_state->base; 170 } 171 172 static void vc4_plane_destroy_state(struct drm_plane *plane, 173 struct drm_plane_state *state) 174 { 175 struct vc4_dev *vc4 = to_vc4_dev(plane->dev); 176 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 177 178 if (vc4_state->lbm.allocated) { 179 unsigned long irqflags; 180 181 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); 182 drm_mm_remove_node(&vc4_state->lbm); 183 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); 184 } 185 186 kfree(vc4_state->dlist); 187 __drm_atomic_helper_plane_destroy_state(&vc4_state->base); 188 kfree(state); 189 } 190 191 /* Called during init to allocate the plane's atomic state. */ 192 static void vc4_plane_reset(struct drm_plane *plane) 193 { 194 struct vc4_plane_state *vc4_state; 195 196 WARN_ON(plane->state); 197 198 vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL); 199 if (!vc4_state) 200 return; 201 202 __drm_atomic_helper_plane_reset(plane, &vc4_state->base); 203 } 204 205 static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) 206 { 207 if (vc4_state->dlist_count == vc4_state->dlist_size) { 208 u32 new_size = max(4u, vc4_state->dlist_count * 2); 209 u32 *new_dlist = kmalloc_array(new_size, 4, GFP_KERNEL); 210 211 if (!new_dlist) 212 return; 213 memcpy(new_dlist, vc4_state->dlist, vc4_state->dlist_count * 4); 214 215 kfree(vc4_state->dlist); 216 vc4_state->dlist = new_dlist; 217 vc4_state->dlist_size = new_size; 218 } 219 220 vc4_state->dlist[vc4_state->dlist_count++] = val; 221 } 222 223 /* Returns the scl0/scl1 field based on whether the dimensions need to 224 * be up/down/non-scaled. 225 * 226 * This is a replication of a table from the spec. 227 */ 228 static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane) 229 { 230 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 231 232 switch (vc4_state->x_scaling[plane] << 2 | vc4_state->y_scaling[plane]) { 233 case VC4_SCALING_PPF << 2 | VC4_SCALING_PPF: 234 return SCALER_CTL0_SCL_H_PPF_V_PPF; 235 case VC4_SCALING_TPZ << 2 | VC4_SCALING_PPF: 236 return SCALER_CTL0_SCL_H_TPZ_V_PPF; 237 case VC4_SCALING_PPF << 2 | VC4_SCALING_TPZ: 238 return SCALER_CTL0_SCL_H_PPF_V_TPZ; 239 case VC4_SCALING_TPZ << 2 | VC4_SCALING_TPZ: 240 return SCALER_CTL0_SCL_H_TPZ_V_TPZ; 241 case VC4_SCALING_PPF << 2 | VC4_SCALING_NONE: 242 return SCALER_CTL0_SCL_H_PPF_V_NONE; 243 case VC4_SCALING_NONE << 2 | VC4_SCALING_PPF: 244 return SCALER_CTL0_SCL_H_NONE_V_PPF; 245 case VC4_SCALING_NONE << 2 | VC4_SCALING_TPZ: 246 return SCALER_CTL0_SCL_H_NONE_V_TPZ; 247 case VC4_SCALING_TPZ << 2 | VC4_SCALING_NONE: 248 return SCALER_CTL0_SCL_H_TPZ_V_NONE; 249 default: 250 case VC4_SCALING_NONE << 2 | VC4_SCALING_NONE: 251 /* The unity case is independently handled by 252 * SCALER_CTL0_UNITY. 253 */ 254 return 0; 255 } 256 } 257 258 static int vc4_plane_margins_adj(struct drm_plane_state *pstate) 259 { 260 struct vc4_plane_state *vc4_pstate = to_vc4_plane_state(pstate); 261 unsigned int left, right, top, bottom, adjhdisplay, adjvdisplay; 262 struct drm_crtc_state *crtc_state; 263 264 crtc_state = drm_atomic_get_new_crtc_state(pstate->state, 265 pstate->crtc); 266 267 vc4_crtc_get_margins(crtc_state, &left, &right, &top, &bottom); 268 if (!left && !right && !top && !bottom) 269 return 0; 270 271 if (left + right >= crtc_state->mode.hdisplay || 272 top + bottom >= crtc_state->mode.vdisplay) 273 return -EINVAL; 274 275 adjhdisplay = crtc_state->mode.hdisplay - (left + right); 276 vc4_pstate->crtc_x = DIV_ROUND_CLOSEST(vc4_pstate->crtc_x * 277 adjhdisplay, 278 crtc_state->mode.hdisplay); 279 vc4_pstate->crtc_x += left; 280 if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - left) 281 vc4_pstate->crtc_x = crtc_state->mode.hdisplay - left; 282 283 adjvdisplay = crtc_state->mode.vdisplay - (top + bottom); 284 vc4_pstate->crtc_y = DIV_ROUND_CLOSEST(vc4_pstate->crtc_y * 285 adjvdisplay, 286 crtc_state->mode.vdisplay); 287 vc4_pstate->crtc_y += top; 288 if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - top) 289 vc4_pstate->crtc_y = crtc_state->mode.vdisplay - top; 290 291 vc4_pstate->crtc_w = DIV_ROUND_CLOSEST(vc4_pstate->crtc_w * 292 adjhdisplay, 293 crtc_state->mode.hdisplay); 294 vc4_pstate->crtc_h = DIV_ROUND_CLOSEST(vc4_pstate->crtc_h * 295 adjvdisplay, 296 crtc_state->mode.vdisplay); 297 298 if (!vc4_pstate->crtc_w || !vc4_pstate->crtc_h) 299 return -EINVAL; 300 301 return 0; 302 } 303 304 static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) 305 { 306 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 307 struct drm_framebuffer *fb = state->fb; 308 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); 309 u32 subpixel_src_mask = (1 << 16) - 1; 310 int num_planes = fb->format->num_planes; 311 struct drm_crtc_state *crtc_state; 312 u32 h_subsample = fb->format->hsub; 313 u32 v_subsample = fb->format->vsub; 314 int i, ret; 315 316 crtc_state = drm_atomic_get_existing_crtc_state(state->state, 317 state->crtc); 318 if (!crtc_state) { 319 DRM_DEBUG_KMS("Invalid crtc state\n"); 320 return -EINVAL; 321 } 322 323 ret = drm_atomic_helper_check_plane_state(state, crtc_state, 1, 324 INT_MAX, true, true); 325 if (ret) 326 return ret; 327 328 for (i = 0; i < num_planes; i++) 329 vc4_state->offsets[i] = bo->paddr + fb->offsets[i]; 330 331 /* We don't support subpixel source positioning for scaling. */ 332 if ((state->src.x1 & subpixel_src_mask) || 333 (state->src.x2 & subpixel_src_mask) || 334 (state->src.y1 & subpixel_src_mask) || 335 (state->src.y2 & subpixel_src_mask)) { 336 return -EINVAL; 337 } 338 339 vc4_state->src_x = state->src.x1 >> 16; 340 vc4_state->src_y = state->src.y1 >> 16; 341 vc4_state->src_w[0] = (state->src.x2 - state->src.x1) >> 16; 342 vc4_state->src_h[0] = (state->src.y2 - state->src.y1) >> 16; 343 344 vc4_state->crtc_x = state->dst.x1; 345 vc4_state->crtc_y = state->dst.y1; 346 vc4_state->crtc_w = state->dst.x2 - state->dst.x1; 347 vc4_state->crtc_h = state->dst.y2 - state->dst.y1; 348 349 ret = vc4_plane_margins_adj(state); 350 if (ret) 351 return ret; 352 353 vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0], 354 vc4_state->crtc_w); 355 vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], 356 vc4_state->crtc_h); 357 358 vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && 359 vc4_state->y_scaling[0] == VC4_SCALING_NONE); 360 361 if (num_planes > 1) { 362 vc4_state->is_yuv = true; 363 364 vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample; 365 vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample; 366 367 vc4_state->x_scaling[1] = 368 vc4_get_scaling_mode(vc4_state->src_w[1], 369 vc4_state->crtc_w); 370 vc4_state->y_scaling[1] = 371 vc4_get_scaling_mode(vc4_state->src_h[1], 372 vc4_state->crtc_h); 373 374 /* YUV conversion requires that horizontal scaling be enabled 375 * on the UV plane even if vc4_get_scaling_mode() returned 376 * VC4_SCALING_NONE (which can happen when the down-scaling 377 * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this 378 * case. 379 */ 380 if (vc4_state->x_scaling[1] == VC4_SCALING_NONE) 381 vc4_state->x_scaling[1] = VC4_SCALING_PPF; 382 } else { 383 vc4_state->is_yuv = false; 384 vc4_state->x_scaling[1] = VC4_SCALING_NONE; 385 vc4_state->y_scaling[1] = VC4_SCALING_NONE; 386 } 387 388 return 0; 389 } 390 391 static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst) 392 { 393 u32 scale, recip; 394 395 scale = (1 << 16) * src / dst; 396 397 /* The specs note that while the reciprocal would be defined 398 * as (1<<32)/scale, ~0 is close enough. 399 */ 400 recip = ~0 / scale; 401 402 vc4_dlist_write(vc4_state, 403 VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) | 404 VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE)); 405 vc4_dlist_write(vc4_state, 406 VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP)); 407 } 408 409 static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst) 410 { 411 u32 scale = (1 << 16) * src / dst; 412 413 vc4_dlist_write(vc4_state, 414 SCALER_PPF_AGC | 415 VC4_SET_FIELD(scale, SCALER_PPF_SCALE) | 416 VC4_SET_FIELD(0, SCALER_PPF_IPHASE)); 417 } 418 419 static u32 vc4_lbm_size(struct drm_plane_state *state) 420 { 421 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 422 /* This is the worst case number. One of the two sizes will 423 * be used depending on the scaling configuration. 424 */ 425 u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w); 426 u32 lbm; 427 428 /* LBM is not needed when there's no vertical scaling. */ 429 if (vc4_state->y_scaling[0] == VC4_SCALING_NONE && 430 vc4_state->y_scaling[1] == VC4_SCALING_NONE) 431 return 0; 432 433 if (!vc4_state->is_yuv) { 434 if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ) 435 lbm = pix_per_line * 8; 436 else { 437 /* In special cases, this multiplier might be 12. */ 438 lbm = pix_per_line * 16; 439 } 440 } else { 441 /* There are cases for this going down to a multiplier 442 * of 2, but according to the firmware source, the 443 * table in the docs is somewhat wrong. 444 */ 445 lbm = pix_per_line * 16; 446 } 447 448 lbm = roundup(lbm, 32); 449 450 return lbm; 451 } 452 453 static void vc4_write_scaling_parameters(struct drm_plane_state *state, 454 int channel) 455 { 456 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 457 458 /* Ch0 H-PPF Word 0: Scaling Parameters */ 459 if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) { 460 vc4_write_ppf(vc4_state, 461 vc4_state->src_w[channel], vc4_state->crtc_w); 462 } 463 464 /* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */ 465 if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) { 466 vc4_write_ppf(vc4_state, 467 vc4_state->src_h[channel], vc4_state->crtc_h); 468 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 469 } 470 471 /* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */ 472 if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) { 473 vc4_write_tpz(vc4_state, 474 vc4_state->src_w[channel], vc4_state->crtc_w); 475 } 476 477 /* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */ 478 if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) { 479 vc4_write_tpz(vc4_state, 480 vc4_state->src_h[channel], vc4_state->crtc_h); 481 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 482 } 483 } 484 485 static void vc4_plane_calc_load(struct drm_plane_state *state) 486 { 487 unsigned int hvs_load_shift, vrefresh, i; 488 struct drm_framebuffer *fb = state->fb; 489 struct vc4_plane_state *vc4_state; 490 struct drm_crtc_state *crtc_state; 491 unsigned int vscale_factor; 492 493 vc4_state = to_vc4_plane_state(state); 494 crtc_state = drm_atomic_get_existing_crtc_state(state->state, 495 state->crtc); 496 vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode); 497 498 /* The HVS is able to process 2 pixels/cycle when scaling the source, 499 * 4 pixels/cycle otherwise. 500 * Alpha blending step seems to be pipelined and it's always operating 501 * at 4 pixels/cycle, so the limiting aspect here seems to be the 502 * scaler block. 503 * HVS load is expressed in clk-cycles/sec (AKA Hz). 504 */ 505 if (vc4_state->x_scaling[0] != VC4_SCALING_NONE || 506 vc4_state->x_scaling[1] != VC4_SCALING_NONE || 507 vc4_state->y_scaling[0] != VC4_SCALING_NONE || 508 vc4_state->y_scaling[1] != VC4_SCALING_NONE) 509 hvs_load_shift = 1; 510 else 511 hvs_load_shift = 2; 512 513 vc4_state->membus_load = 0; 514 vc4_state->hvs_load = 0; 515 for (i = 0; i < fb->format->num_planes; i++) { 516 /* Even if the bandwidth/plane required for a single frame is 517 * 518 * vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh 519 * 520 * when downscaling, we have to read more pixels per line in 521 * the time frame reserved for a single line, so the bandwidth 522 * demand can be punctually higher. To account for that, we 523 * calculate the down-scaling factor and multiply the plane 524 * load by this number. We're likely over-estimating the read 525 * demand, but that's better than under-estimating it. 526 */ 527 vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i], 528 vc4_state->crtc_h); 529 vc4_state->membus_load += vc4_state->src_w[i] * 530 vc4_state->src_h[i] * vscale_factor * 531 fb->format->cpp[i]; 532 vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w; 533 } 534 535 vc4_state->hvs_load *= vrefresh; 536 vc4_state->hvs_load >>= hvs_load_shift; 537 vc4_state->membus_load *= vrefresh; 538 } 539 540 static int vc4_plane_allocate_lbm(struct drm_plane_state *state) 541 { 542 struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev); 543 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 544 unsigned long irqflags; 545 u32 lbm_size; 546 547 lbm_size = vc4_lbm_size(state); 548 if (!lbm_size) 549 return 0; 550 551 if (WARN_ON(!vc4_state->lbm_offset)) 552 return -EINVAL; 553 554 /* Allocate the LBM memory that the HVS will use for temporary 555 * storage due to our scaling/format conversion. 556 */ 557 if (!vc4_state->lbm.allocated) { 558 int ret; 559 560 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); 561 ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm, 562 &vc4_state->lbm, 563 lbm_size, 32, 0, 0); 564 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); 565 566 if (ret) 567 return ret; 568 } else { 569 WARN_ON_ONCE(lbm_size != vc4_state->lbm.size); 570 } 571 572 vc4_state->dlist[vc4_state->lbm_offset] = vc4_state->lbm.start; 573 574 return 0; 575 } 576 577 /* Writes out a full display list for an active plane to the plane's 578 * private dlist state. 579 */ 580 static int vc4_plane_mode_set(struct drm_plane *plane, 581 struct drm_plane_state *state) 582 { 583 struct vc4_dev *vc4 = to_vc4_dev(plane->dev); 584 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 585 struct drm_framebuffer *fb = state->fb; 586 u32 ctl0_offset = vc4_state->dlist_count; 587 const struct hvs_format *format = vc4_get_hvs_format(fb->format->format); 588 u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier); 589 int num_planes = fb->format->num_planes; 590 u32 h_subsample = fb->format->hsub; 591 u32 v_subsample = fb->format->vsub; 592 bool mix_plane_alpha; 593 bool covers_screen; 594 u32 scl0, scl1, pitch0; 595 u32 tiling, src_y; 596 u32 hvs_format = format->hvs; 597 unsigned int rotation; 598 int ret, i; 599 600 if (vc4_state->dlist_initialized) 601 return 0; 602 603 ret = vc4_plane_setup_clipping_and_scaling(state); 604 if (ret) 605 return ret; 606 607 /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB 608 * and 4:4:4, scl1 should be set to scl0 so both channels of 609 * the scaler do the same thing. For YUV, the Y plane needs 610 * to be put in channel 1 and Cb/Cr in channel 0, so we swap 611 * the scl fields here. 612 */ 613 if (num_planes == 1) { 614 scl0 = vc4_get_scl_field(state, 0); 615 scl1 = scl0; 616 } else { 617 scl0 = vc4_get_scl_field(state, 1); 618 scl1 = vc4_get_scl_field(state, 0); 619 } 620 621 rotation = drm_rotation_simplify(state->rotation, 622 DRM_MODE_ROTATE_0 | 623 DRM_MODE_REFLECT_X | 624 DRM_MODE_REFLECT_Y); 625 626 /* We must point to the last line when Y reflection is enabled. */ 627 src_y = vc4_state->src_y; 628 if (rotation & DRM_MODE_REFLECT_Y) 629 src_y += vc4_state->src_h[0] - 1; 630 631 switch (base_format_mod) { 632 case DRM_FORMAT_MOD_LINEAR: 633 tiling = SCALER_CTL0_TILING_LINEAR; 634 pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH); 635 636 /* Adjust the base pointer to the first pixel to be scanned 637 * out. 638 */ 639 for (i = 0; i < num_planes; i++) { 640 vc4_state->offsets[i] += src_y / 641 (i ? v_subsample : 1) * 642 fb->pitches[i]; 643 644 vc4_state->offsets[i] += vc4_state->src_x / 645 (i ? h_subsample : 1) * 646 fb->format->cpp[i]; 647 } 648 649 break; 650 651 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: { 652 u32 tile_size_shift = 12; /* T tiles are 4kb */ 653 /* Whole-tile offsets, mostly for setting the pitch. */ 654 u32 tile_w_shift = fb->format->cpp[0] == 2 ? 6 : 5; 655 u32 tile_h_shift = 5; /* 16 and 32bpp are 32 pixels high */ 656 u32 tile_w_mask = (1 << tile_w_shift) - 1; 657 /* The height mask on 32-bit-per-pixel tiles is 63, i.e. twice 658 * the height (in pixels) of a 4k tile. 659 */ 660 u32 tile_h_mask = (2 << tile_h_shift) - 1; 661 /* For T-tiled, the FB pitch is "how many bytes from one row to 662 * the next, such that 663 * 664 * pitch * tile_h == tile_size * tiles_per_row 665 */ 666 u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift); 667 u32 tiles_l = vc4_state->src_x >> tile_w_shift; 668 u32 tiles_r = tiles_w - tiles_l; 669 u32 tiles_t = src_y >> tile_h_shift; 670 /* Intra-tile offsets, which modify the base address (the 671 * SCALER_PITCH0_TILE_Y_OFFSET tells HVS how to walk from that 672 * base address). 673 */ 674 u32 tile_y = (src_y >> 4) & 1; 675 u32 subtile_y = (src_y >> 2) & 3; 676 u32 utile_y = src_y & 3; 677 u32 x_off = vc4_state->src_x & tile_w_mask; 678 u32 y_off = src_y & tile_h_mask; 679 680 /* When Y reflection is requested we must set the 681 * SCALER_PITCH0_TILE_LINE_DIR flag to tell HVS that all lines 682 * after the initial one should be fetched in descending order, 683 * which makes sense since we start from the last line and go 684 * backward. 685 * Don't know why we need y_off = max_y_off - y_off, but it's 686 * definitely required (I guess it's also related to the "going 687 * backward" situation). 688 */ 689 if (rotation & DRM_MODE_REFLECT_Y) { 690 y_off = tile_h_mask - y_off; 691 pitch0 = SCALER_PITCH0_TILE_LINE_DIR; 692 } else { 693 pitch0 = 0; 694 } 695 696 tiling = SCALER_CTL0_TILING_256B_OR_T; 697 pitch0 |= (VC4_SET_FIELD(x_off, SCALER_PITCH0_SINK_PIX) | 698 VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) | 699 VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) | 700 VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R)); 701 vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift); 702 vc4_state->offsets[0] += subtile_y << 8; 703 vc4_state->offsets[0] += utile_y << 4; 704 705 /* Rows of tiles alternate left-to-right and right-to-left. */ 706 if (tiles_t & 1) { 707 pitch0 |= SCALER_PITCH0_TILE_INITIAL_LINE_DIR; 708 vc4_state->offsets[0] += (tiles_w - tiles_l) << 709 tile_size_shift; 710 vc4_state->offsets[0] -= (1 + !tile_y) << 10; 711 } else { 712 vc4_state->offsets[0] += tiles_l << tile_size_shift; 713 vc4_state->offsets[0] += tile_y << 10; 714 } 715 716 break; 717 } 718 719 case DRM_FORMAT_MOD_BROADCOM_SAND64: 720 case DRM_FORMAT_MOD_BROADCOM_SAND128: 721 case DRM_FORMAT_MOD_BROADCOM_SAND256: { 722 uint32_t param = fourcc_mod_broadcom_param(fb->modifier); 723 u32 tile_w, tile, x_off, pix_per_tile; 724 725 hvs_format = HVS_PIXEL_FORMAT_H264; 726 727 switch (base_format_mod) { 728 case DRM_FORMAT_MOD_BROADCOM_SAND64: 729 tiling = SCALER_CTL0_TILING_64B; 730 tile_w = 64; 731 break; 732 case DRM_FORMAT_MOD_BROADCOM_SAND128: 733 tiling = SCALER_CTL0_TILING_128B; 734 tile_w = 128; 735 break; 736 case DRM_FORMAT_MOD_BROADCOM_SAND256: 737 tiling = SCALER_CTL0_TILING_256B_OR_T; 738 tile_w = 256; 739 break; 740 default: 741 break; 742 } 743 744 if (param > SCALER_TILE_HEIGHT_MASK) { 745 DRM_DEBUG_KMS("SAND height too large (%d)\n", param); 746 return -EINVAL; 747 } 748 749 pix_per_tile = tile_w / fb->format->cpp[0]; 750 tile = vc4_state->src_x / pix_per_tile; 751 x_off = vc4_state->src_x % pix_per_tile; 752 753 /* Adjust the base pointer to the first pixel to be scanned 754 * out. 755 */ 756 for (i = 0; i < num_planes; i++) { 757 vc4_state->offsets[i] += param * tile_w * tile; 758 vc4_state->offsets[i] += src_y / 759 (i ? v_subsample : 1) * 760 tile_w; 761 vc4_state->offsets[i] += x_off / 762 (i ? h_subsample : 1) * 763 fb->format->cpp[i]; 764 } 765 766 pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT); 767 break; 768 } 769 770 default: 771 DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx", 772 (long long)fb->modifier); 773 return -EINVAL; 774 } 775 776 /* Control word */ 777 vc4_dlist_write(vc4_state, 778 SCALER_CTL0_VALID | 779 (rotation & DRM_MODE_REFLECT_X ? SCALER_CTL0_HFLIP : 0) | 780 (rotation & DRM_MODE_REFLECT_Y ? SCALER_CTL0_VFLIP : 0) | 781 VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) | 782 (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) | 783 (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) | 784 VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) | 785 (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) | 786 VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) | 787 VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1)); 788 789 /* Position Word 0: Image Positions and Alpha Value */ 790 vc4_state->pos0_offset = vc4_state->dlist_count; 791 vc4_dlist_write(vc4_state, 792 VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) | 793 VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) | 794 VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y)); 795 796 /* Position Word 1: Scaled Image Dimensions. */ 797 if (!vc4_state->is_unity) { 798 vc4_dlist_write(vc4_state, 799 VC4_SET_FIELD(vc4_state->crtc_w, 800 SCALER_POS1_SCL_WIDTH) | 801 VC4_SET_FIELD(vc4_state->crtc_h, 802 SCALER_POS1_SCL_HEIGHT)); 803 } 804 805 /* Don't waste cycles mixing with plane alpha if the set alpha 806 * is opaque or there is no per-pixel alpha information. 807 * In any case we use the alpha property value as the fixed alpha. 808 */ 809 mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE && 810 fb->format->has_alpha; 811 812 /* Position Word 2: Source Image Size, Alpha */ 813 vc4_state->pos2_offset = vc4_state->dlist_count; 814 vc4_dlist_write(vc4_state, 815 VC4_SET_FIELD(fb->format->has_alpha ? 816 SCALER_POS2_ALPHA_MODE_PIPELINE : 817 SCALER_POS2_ALPHA_MODE_FIXED, 818 SCALER_POS2_ALPHA_MODE) | 819 (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) | 820 (fb->format->has_alpha ? SCALER_POS2_ALPHA_PREMULT : 0) | 821 VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) | 822 VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT)); 823 824 /* Position Word 3: Context. Written by the HVS. */ 825 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 826 827 828 /* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers 829 * 830 * The pointers may be any byte address. 831 */ 832 vc4_state->ptr0_offset = vc4_state->dlist_count; 833 for (i = 0; i < num_planes; i++) 834 vc4_dlist_write(vc4_state, vc4_state->offsets[i]); 835 836 /* Pointer Context Word 0/1/2: Written by the HVS */ 837 for (i = 0; i < num_planes; i++) 838 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 839 840 /* Pitch word 0 */ 841 vc4_dlist_write(vc4_state, pitch0); 842 843 /* Pitch word 1/2 */ 844 for (i = 1; i < num_planes; i++) { 845 if (hvs_format != HVS_PIXEL_FORMAT_H264) { 846 vc4_dlist_write(vc4_state, 847 VC4_SET_FIELD(fb->pitches[i], 848 SCALER_SRC_PITCH)); 849 } else { 850 vc4_dlist_write(vc4_state, pitch0); 851 } 852 } 853 854 /* Colorspace conversion words */ 855 if (vc4_state->is_yuv) { 856 vc4_dlist_write(vc4_state, SCALER_CSC0_ITR_R_601_5); 857 vc4_dlist_write(vc4_state, SCALER_CSC1_ITR_R_601_5); 858 vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); 859 } 860 861 vc4_state->lbm_offset = 0; 862 863 if (vc4_state->x_scaling[0] != VC4_SCALING_NONE || 864 vc4_state->x_scaling[1] != VC4_SCALING_NONE || 865 vc4_state->y_scaling[0] != VC4_SCALING_NONE || 866 vc4_state->y_scaling[1] != VC4_SCALING_NONE) { 867 /* Reserve a slot for the LBM Base Address. The real value will 868 * be set when calling vc4_plane_allocate_lbm(). 869 */ 870 if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || 871 vc4_state->y_scaling[1] != VC4_SCALING_NONE) 872 vc4_state->lbm_offset = vc4_state->dlist_count++; 873 874 if (num_planes > 1) { 875 /* Emit Cb/Cr as channel 0 and Y as channel 876 * 1. This matches how we set up scl0/scl1 877 * above. 878 */ 879 vc4_write_scaling_parameters(state, 1); 880 } 881 vc4_write_scaling_parameters(state, 0); 882 883 /* If any PPF setup was done, then all the kernel 884 * pointers get uploaded. 885 */ 886 if (vc4_state->x_scaling[0] == VC4_SCALING_PPF || 887 vc4_state->y_scaling[0] == VC4_SCALING_PPF || 888 vc4_state->x_scaling[1] == VC4_SCALING_PPF || 889 vc4_state->y_scaling[1] == VC4_SCALING_PPF) { 890 u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start, 891 SCALER_PPF_KERNEL_OFFSET); 892 893 /* HPPF plane 0 */ 894 vc4_dlist_write(vc4_state, kernel); 895 /* VPPF plane 0 */ 896 vc4_dlist_write(vc4_state, kernel); 897 /* HPPF plane 1 */ 898 vc4_dlist_write(vc4_state, kernel); 899 /* VPPF plane 1 */ 900 vc4_dlist_write(vc4_state, kernel); 901 } 902 } 903 904 vc4_state->dlist[ctl0_offset] |= 905 VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE); 906 907 /* crtc_* are already clipped coordinates. */ 908 covers_screen = vc4_state->crtc_x == 0 && vc4_state->crtc_y == 0 && 909 vc4_state->crtc_w == state->crtc->mode.hdisplay && 910 vc4_state->crtc_h == state->crtc->mode.vdisplay; 911 /* Background fill might be necessary when the plane has per-pixel 912 * alpha content or a non-opaque plane alpha and could blend from the 913 * background or does not cover the entire screen. 914 */ 915 vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen || 916 state->alpha != DRM_BLEND_ALPHA_OPAQUE; 917 918 /* Flag the dlist as initialized to avoid checking it twice in case 919 * the async update check already called vc4_plane_mode_set() and 920 * decided to fallback to sync update because async update was not 921 * possible. 922 */ 923 vc4_state->dlist_initialized = 1; 924 925 vc4_plane_calc_load(state); 926 927 return 0; 928 } 929 930 /* If a modeset involves changing the setup of a plane, the atomic 931 * infrastructure will call this to validate a proposed plane setup. 932 * However, if a plane isn't getting updated, this (and the 933 * corresponding vc4_plane_atomic_update) won't get called. Thus, we 934 * compute the dlist here and have all active plane dlists get updated 935 * in the CRTC's flush. 936 */ 937 static int vc4_plane_atomic_check(struct drm_plane *plane, 938 struct drm_plane_state *state) 939 { 940 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 941 int ret; 942 943 vc4_state->dlist_count = 0; 944 945 if (!plane_enabled(state)) 946 return 0; 947 948 ret = vc4_plane_mode_set(plane, state); 949 if (ret) 950 return ret; 951 952 return vc4_plane_allocate_lbm(state); 953 } 954 955 static void vc4_plane_atomic_update(struct drm_plane *plane, 956 struct drm_plane_state *old_state) 957 { 958 /* No contents here. Since we don't know where in the CRTC's 959 * dlist we should be stored, our dlist is uploaded to the 960 * hardware with vc4_plane_write_dlist() at CRTC atomic_flush 961 * time. 962 */ 963 } 964 965 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist) 966 { 967 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); 968 int i; 969 970 vc4_state->hw_dlist = dlist; 971 972 /* Can't memcpy_toio() because it needs to be 32-bit writes. */ 973 for (i = 0; i < vc4_state->dlist_count; i++) 974 writel(vc4_state->dlist[i], &dlist[i]); 975 976 return vc4_state->dlist_count; 977 } 978 979 u32 vc4_plane_dlist_size(const struct drm_plane_state *state) 980 { 981 const struct vc4_plane_state *vc4_state = 982 container_of(state, typeof(*vc4_state), base); 983 984 return vc4_state->dlist_count; 985 } 986 987 /* Updates the plane to immediately (well, once the FIFO needs 988 * refilling) scan out from at a new framebuffer. 989 */ 990 void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb) 991 { 992 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); 993 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); 994 uint32_t addr; 995 996 /* We're skipping the address adjustment for negative origin, 997 * because this is only called on the primary plane. 998 */ 999 WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0); 1000 addr = bo->paddr + fb->offsets[0]; 1001 1002 /* Write the new address into the hardware immediately. The 1003 * scanout will start from this address as soon as the FIFO 1004 * needs to refill with pixels. 1005 */ 1006 writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]); 1007 1008 /* Also update the CPU-side dlist copy, so that any later 1009 * atomic updates that don't do a new modeset on our plane 1010 * also use our updated address. 1011 */ 1012 vc4_state->dlist[vc4_state->ptr0_offset] = addr; 1013 } 1014 1015 static void vc4_plane_atomic_async_update(struct drm_plane *plane, 1016 struct drm_plane_state *state) 1017 { 1018 struct vc4_plane_state *vc4_state, *new_vc4_state; 1019 1020 swap(plane->state->fb, state->fb); 1021 plane->state->crtc_x = state->crtc_x; 1022 plane->state->crtc_y = state->crtc_y; 1023 plane->state->crtc_w = state->crtc_w; 1024 plane->state->crtc_h = state->crtc_h; 1025 plane->state->src_x = state->src_x; 1026 plane->state->src_y = state->src_y; 1027 plane->state->src_w = state->src_w; 1028 plane->state->src_h = state->src_h; 1029 plane->state->src_h = state->src_h; 1030 plane->state->alpha = state->alpha; 1031 plane->state->pixel_blend_mode = state->pixel_blend_mode; 1032 plane->state->rotation = state->rotation; 1033 plane->state->zpos = state->zpos; 1034 plane->state->normalized_zpos = state->normalized_zpos; 1035 plane->state->color_encoding = state->color_encoding; 1036 plane->state->color_range = state->color_range; 1037 plane->state->src = state->src; 1038 plane->state->dst = state->dst; 1039 plane->state->visible = state->visible; 1040 1041 new_vc4_state = to_vc4_plane_state(state); 1042 vc4_state = to_vc4_plane_state(plane->state); 1043 1044 vc4_state->crtc_x = new_vc4_state->crtc_x; 1045 vc4_state->crtc_y = new_vc4_state->crtc_y; 1046 vc4_state->crtc_h = new_vc4_state->crtc_h; 1047 vc4_state->crtc_w = new_vc4_state->crtc_w; 1048 vc4_state->src_x = new_vc4_state->src_x; 1049 vc4_state->src_y = new_vc4_state->src_y; 1050 memcpy(vc4_state->src_w, new_vc4_state->src_w, 1051 sizeof(vc4_state->src_w)); 1052 memcpy(vc4_state->src_h, new_vc4_state->src_h, 1053 sizeof(vc4_state->src_h)); 1054 memcpy(vc4_state->x_scaling, new_vc4_state->x_scaling, 1055 sizeof(vc4_state->x_scaling)); 1056 memcpy(vc4_state->y_scaling, new_vc4_state->y_scaling, 1057 sizeof(vc4_state->y_scaling)); 1058 vc4_state->is_unity = new_vc4_state->is_unity; 1059 vc4_state->is_yuv = new_vc4_state->is_yuv; 1060 memcpy(vc4_state->offsets, new_vc4_state->offsets, 1061 sizeof(vc4_state->offsets)); 1062 vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill; 1063 1064 /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */ 1065 vc4_state->dlist[vc4_state->pos0_offset] = 1066 new_vc4_state->dlist[vc4_state->pos0_offset]; 1067 vc4_state->dlist[vc4_state->pos2_offset] = 1068 new_vc4_state->dlist[vc4_state->pos2_offset]; 1069 vc4_state->dlist[vc4_state->ptr0_offset] = 1070 new_vc4_state->dlist[vc4_state->ptr0_offset]; 1071 1072 /* Note that we can't just call vc4_plane_write_dlist() 1073 * because that would smash the context data that the HVS is 1074 * currently using. 1075 */ 1076 writel(vc4_state->dlist[vc4_state->pos0_offset], 1077 &vc4_state->hw_dlist[vc4_state->pos0_offset]); 1078 writel(vc4_state->dlist[vc4_state->pos2_offset], 1079 &vc4_state->hw_dlist[vc4_state->pos2_offset]); 1080 writel(vc4_state->dlist[vc4_state->ptr0_offset], 1081 &vc4_state->hw_dlist[vc4_state->ptr0_offset]); 1082 } 1083 1084 static int vc4_plane_atomic_async_check(struct drm_plane *plane, 1085 struct drm_plane_state *state) 1086 { 1087 struct vc4_plane_state *old_vc4_state, *new_vc4_state; 1088 int ret; 1089 u32 i; 1090 1091 ret = vc4_plane_mode_set(plane, state); 1092 if (ret) 1093 return ret; 1094 1095 old_vc4_state = to_vc4_plane_state(plane->state); 1096 new_vc4_state = to_vc4_plane_state(state); 1097 if (old_vc4_state->dlist_count != new_vc4_state->dlist_count || 1098 old_vc4_state->pos0_offset != new_vc4_state->pos0_offset || 1099 old_vc4_state->pos2_offset != new_vc4_state->pos2_offset || 1100 old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset || 1101 vc4_lbm_size(plane->state) != vc4_lbm_size(state)) 1102 return -EINVAL; 1103 1104 /* Only pos0, pos2 and ptr0 DWORDS can be updated in an async update 1105 * if anything else has changed, fallback to a sync update. 1106 */ 1107 for (i = 0; i < new_vc4_state->dlist_count; i++) { 1108 if (i == new_vc4_state->pos0_offset || 1109 i == new_vc4_state->pos2_offset || 1110 i == new_vc4_state->ptr0_offset || 1111 (new_vc4_state->lbm_offset && 1112 i == new_vc4_state->lbm_offset)) 1113 continue; 1114 1115 if (new_vc4_state->dlist[i] != old_vc4_state->dlist[i]) 1116 return -EINVAL; 1117 } 1118 1119 return 0; 1120 } 1121 1122 static int vc4_prepare_fb(struct drm_plane *plane, 1123 struct drm_plane_state *state) 1124 { 1125 struct vc4_bo *bo; 1126 struct dma_fence *fence; 1127 int ret; 1128 1129 if (!state->fb) 1130 return 0; 1131 1132 bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); 1133 1134 fence = reservation_object_get_excl_rcu(bo->base.base.resv); 1135 drm_atomic_set_fence_for_plane(state, fence); 1136 1137 if (plane->state->fb == state->fb) 1138 return 0; 1139 1140 ret = vc4_bo_inc_usecnt(bo); 1141 if (ret) 1142 return ret; 1143 1144 return 0; 1145 } 1146 1147 static void vc4_cleanup_fb(struct drm_plane *plane, 1148 struct drm_plane_state *state) 1149 { 1150 struct vc4_bo *bo; 1151 1152 if (plane->state->fb == state->fb || !state->fb) 1153 return; 1154 1155 bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); 1156 vc4_bo_dec_usecnt(bo); 1157 } 1158 1159 static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { 1160 .atomic_check = vc4_plane_atomic_check, 1161 .atomic_update = vc4_plane_atomic_update, 1162 .prepare_fb = vc4_prepare_fb, 1163 .cleanup_fb = vc4_cleanup_fb, 1164 .atomic_async_check = vc4_plane_atomic_async_check, 1165 .atomic_async_update = vc4_plane_atomic_async_update, 1166 }; 1167 1168 static void vc4_plane_destroy(struct drm_plane *plane) 1169 { 1170 drm_plane_cleanup(plane); 1171 } 1172 1173 static bool vc4_format_mod_supported(struct drm_plane *plane, 1174 uint32_t format, 1175 uint64_t modifier) 1176 { 1177 /* Support T_TILING for RGB formats only. */ 1178 switch (format) { 1179 case DRM_FORMAT_XRGB8888: 1180 case DRM_FORMAT_ARGB8888: 1181 case DRM_FORMAT_ABGR8888: 1182 case DRM_FORMAT_XBGR8888: 1183 case DRM_FORMAT_RGB565: 1184 case DRM_FORMAT_BGR565: 1185 case DRM_FORMAT_ARGB1555: 1186 case DRM_FORMAT_XRGB1555: 1187 switch (fourcc_mod_broadcom_mod(modifier)) { 1188 case DRM_FORMAT_MOD_LINEAR: 1189 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: 1190 return true; 1191 default: 1192 return false; 1193 } 1194 case DRM_FORMAT_NV12: 1195 case DRM_FORMAT_NV21: 1196 switch (fourcc_mod_broadcom_mod(modifier)) { 1197 case DRM_FORMAT_MOD_LINEAR: 1198 case DRM_FORMAT_MOD_BROADCOM_SAND64: 1199 case DRM_FORMAT_MOD_BROADCOM_SAND128: 1200 case DRM_FORMAT_MOD_BROADCOM_SAND256: 1201 return true; 1202 default: 1203 return false; 1204 } 1205 case DRM_FORMAT_YUV422: 1206 case DRM_FORMAT_YVU422: 1207 case DRM_FORMAT_YUV420: 1208 case DRM_FORMAT_YVU420: 1209 case DRM_FORMAT_NV16: 1210 case DRM_FORMAT_NV61: 1211 default: 1212 return (modifier == DRM_FORMAT_MOD_LINEAR); 1213 } 1214 } 1215 1216 static const struct drm_plane_funcs vc4_plane_funcs = { 1217 .update_plane = drm_atomic_helper_update_plane, 1218 .disable_plane = drm_atomic_helper_disable_plane, 1219 .destroy = vc4_plane_destroy, 1220 .set_property = NULL, 1221 .reset = vc4_plane_reset, 1222 .atomic_duplicate_state = vc4_plane_duplicate_state, 1223 .atomic_destroy_state = vc4_plane_destroy_state, 1224 .format_mod_supported = vc4_format_mod_supported, 1225 }; 1226 1227 struct drm_plane *vc4_plane_init(struct drm_device *dev, 1228 enum drm_plane_type type) 1229 { 1230 struct drm_plane *plane = NULL; 1231 struct vc4_plane *vc4_plane; 1232 u32 formats[ARRAY_SIZE(hvs_formats)]; 1233 int ret = 0; 1234 unsigned i; 1235 static const uint64_t modifiers[] = { 1236 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED, 1237 DRM_FORMAT_MOD_BROADCOM_SAND128, 1238 DRM_FORMAT_MOD_BROADCOM_SAND64, 1239 DRM_FORMAT_MOD_BROADCOM_SAND256, 1240 DRM_FORMAT_MOD_LINEAR, 1241 DRM_FORMAT_MOD_INVALID 1242 }; 1243 1244 vc4_plane = devm_kzalloc(dev->dev, sizeof(*vc4_plane), 1245 GFP_KERNEL); 1246 if (!vc4_plane) 1247 return ERR_PTR(-ENOMEM); 1248 1249 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) 1250 formats[i] = hvs_formats[i].drm; 1251 1252 plane = &vc4_plane->base; 1253 ret = drm_universal_plane_init(dev, plane, 0, 1254 &vc4_plane_funcs, 1255 formats, ARRAY_SIZE(formats), 1256 modifiers, type, NULL); 1257 1258 drm_plane_helper_add(plane, &vc4_plane_helper_funcs); 1259 1260 drm_plane_create_alpha_property(plane); 1261 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, 1262 DRM_MODE_ROTATE_0 | 1263 DRM_MODE_ROTATE_180 | 1264 DRM_MODE_REFLECT_X | 1265 DRM_MODE_REFLECT_Y); 1266 1267 return plane; 1268 } 1269