1 /* 2 * Copyright (C) 2015 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 /** 10 * DOC: VC4 plane module 11 * 12 * Each DRM plane is a layer of pixels being scanned out by the HVS. 13 * 14 * At atomic modeset check time, we compute the HVS display element 15 * state that would be necessary for displaying the plane (giving us a 16 * chance to figure out if a plane configuration is invalid), then at 17 * atomic flush time the CRTC will ask us to write our element state 18 * into the region of the HVS that it has allocated for us. 19 */ 20 21 #include <drm/drm_atomic.h> 22 #include <drm/drm_atomic_helper.h> 23 #include <drm/drm_fb_cma_helper.h> 24 #include <drm/drm_plane_helper.h> 25 26 #include "uapi/drm/vc4_drm.h" 27 #include "vc4_drv.h" 28 #include "vc4_regs.h" 29 30 static const struct hvs_format { 31 u32 drm; /* DRM_FORMAT_* */ 32 u32 hvs; /* HVS_FORMAT_* */ 33 u32 pixel_order; 34 } hvs_formats[] = { 35 { 36 .drm = DRM_FORMAT_XRGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, 37 .pixel_order = HVS_PIXEL_ORDER_ABGR, 38 }, 39 { 40 .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, 41 .pixel_order = HVS_PIXEL_ORDER_ABGR, 42 }, 43 { 44 .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, 45 .pixel_order = HVS_PIXEL_ORDER_ARGB, 46 }, 47 { 48 .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888, 49 .pixel_order = HVS_PIXEL_ORDER_ARGB, 50 }, 51 { 52 .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565, 53 .pixel_order = HVS_PIXEL_ORDER_XRGB, 54 }, 55 { 56 .drm = DRM_FORMAT_BGR565, .hvs = HVS_PIXEL_FORMAT_RGB565, 57 .pixel_order = HVS_PIXEL_ORDER_XBGR, 58 }, 59 { 60 .drm = DRM_FORMAT_ARGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, 61 .pixel_order = HVS_PIXEL_ORDER_ABGR, 62 }, 63 { 64 .drm = DRM_FORMAT_XRGB1555, .hvs = HVS_PIXEL_FORMAT_RGBA5551, 65 .pixel_order = HVS_PIXEL_ORDER_ABGR, 66 }, 67 { 68 .drm = DRM_FORMAT_RGB888, .hvs = HVS_PIXEL_FORMAT_RGB888, 69 .pixel_order = HVS_PIXEL_ORDER_XRGB, 70 }, 71 { 72 .drm = DRM_FORMAT_BGR888, .hvs = HVS_PIXEL_FORMAT_RGB888, 73 .pixel_order = HVS_PIXEL_ORDER_XBGR, 74 }, 75 { 76 .drm = DRM_FORMAT_YUV422, 77 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, 78 .pixel_order = HVS_PIXEL_ORDER_XYCBCR, 79 }, 80 { 81 .drm = DRM_FORMAT_YVU422, 82 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, 83 .pixel_order = HVS_PIXEL_ORDER_XYCRCB, 84 }, 85 { 86 .drm = DRM_FORMAT_YUV420, 87 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE, 88 .pixel_order = HVS_PIXEL_ORDER_XYCBCR, 89 }, 90 { 91 .drm = DRM_FORMAT_YVU420, 92 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE, 93 .pixel_order = HVS_PIXEL_ORDER_XYCRCB, 94 }, 95 { 96 .drm = DRM_FORMAT_NV12, 97 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE, 98 .pixel_order = HVS_PIXEL_ORDER_XYCBCR, 99 }, 100 { 101 .drm = DRM_FORMAT_NV21, 102 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE, 103 .pixel_order = HVS_PIXEL_ORDER_XYCRCB, 104 }, 105 { 106 .drm = DRM_FORMAT_NV16, 107 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE, 108 .pixel_order = HVS_PIXEL_ORDER_XYCBCR, 109 }, 110 { 111 .drm = DRM_FORMAT_NV61, 112 .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE, 113 .pixel_order = HVS_PIXEL_ORDER_XYCRCB, 114 }, 115 }; 116 117 static const struct hvs_format *vc4_get_hvs_format(u32 drm_format) 118 { 119 unsigned i; 120 121 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { 122 if (hvs_formats[i].drm == drm_format) 123 return &hvs_formats[i]; 124 } 125 126 return NULL; 127 } 128 129 static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst) 130 { 131 if (dst > src) 132 return VC4_SCALING_PPF; 133 else if (dst < src) 134 return VC4_SCALING_TPZ; 135 else 136 return VC4_SCALING_NONE; 137 } 138 139 static bool plane_enabled(struct drm_plane_state *state) 140 { 141 return state->fb && state->crtc; 142 } 143 144 static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) 145 { 146 struct vc4_plane_state *vc4_state; 147 148 if (WARN_ON(!plane->state)) 149 return NULL; 150 151 vc4_state = kmemdup(plane->state, sizeof(*vc4_state), GFP_KERNEL); 152 if (!vc4_state) 153 return NULL; 154 155 memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm)); 156 157 __drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base); 158 159 if (vc4_state->dlist) { 160 vc4_state->dlist = kmemdup(vc4_state->dlist, 161 vc4_state->dlist_count * 4, 162 GFP_KERNEL); 163 if (!vc4_state->dlist) { 164 kfree(vc4_state); 165 return NULL; 166 } 167 vc4_state->dlist_size = vc4_state->dlist_count; 168 } 169 170 return &vc4_state->base; 171 } 172 173 static void vc4_plane_destroy_state(struct drm_plane *plane, 174 struct drm_plane_state *state) 175 { 176 struct vc4_dev *vc4 = to_vc4_dev(plane->dev); 177 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 178 179 if (vc4_state->lbm.allocated) { 180 unsigned long irqflags; 181 182 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); 183 drm_mm_remove_node(&vc4_state->lbm); 184 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); 185 } 186 187 kfree(vc4_state->dlist); 188 __drm_atomic_helper_plane_destroy_state(&vc4_state->base); 189 kfree(state); 190 } 191 192 /* Called during init to allocate the plane's atomic state. */ 193 static void vc4_plane_reset(struct drm_plane *plane) 194 { 195 struct vc4_plane_state *vc4_state; 196 197 WARN_ON(plane->state); 198 199 vc4_state = kzalloc(sizeof(*vc4_state), GFP_KERNEL); 200 if (!vc4_state) 201 return; 202 203 plane->state = &vc4_state->base; 204 plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE; 205 vc4_state->base.plane = plane; 206 } 207 208 static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) 209 { 210 if (vc4_state->dlist_count == vc4_state->dlist_size) { 211 u32 new_size = max(4u, vc4_state->dlist_count * 2); 212 u32 *new_dlist = kmalloc_array(new_size, 4, GFP_KERNEL); 213 214 if (!new_dlist) 215 return; 216 memcpy(new_dlist, vc4_state->dlist, vc4_state->dlist_count * 4); 217 218 kfree(vc4_state->dlist); 219 vc4_state->dlist = new_dlist; 220 vc4_state->dlist_size = new_size; 221 } 222 223 vc4_state->dlist[vc4_state->dlist_count++] = val; 224 } 225 226 /* Returns the scl0/scl1 field based on whether the dimensions need to 227 * be up/down/non-scaled. 228 * 229 * This is a replication of a table from the spec. 230 */ 231 static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane) 232 { 233 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 234 235 switch (vc4_state->x_scaling[plane] << 2 | vc4_state->y_scaling[plane]) { 236 case VC4_SCALING_PPF << 2 | VC4_SCALING_PPF: 237 return SCALER_CTL0_SCL_H_PPF_V_PPF; 238 case VC4_SCALING_TPZ << 2 | VC4_SCALING_PPF: 239 return SCALER_CTL0_SCL_H_TPZ_V_PPF; 240 case VC4_SCALING_PPF << 2 | VC4_SCALING_TPZ: 241 return SCALER_CTL0_SCL_H_PPF_V_TPZ; 242 case VC4_SCALING_TPZ << 2 | VC4_SCALING_TPZ: 243 return SCALER_CTL0_SCL_H_TPZ_V_TPZ; 244 case VC4_SCALING_PPF << 2 | VC4_SCALING_NONE: 245 return SCALER_CTL0_SCL_H_PPF_V_NONE; 246 case VC4_SCALING_NONE << 2 | VC4_SCALING_PPF: 247 return SCALER_CTL0_SCL_H_NONE_V_PPF; 248 case VC4_SCALING_NONE << 2 | VC4_SCALING_TPZ: 249 return SCALER_CTL0_SCL_H_NONE_V_TPZ; 250 case VC4_SCALING_TPZ << 2 | VC4_SCALING_NONE: 251 return SCALER_CTL0_SCL_H_TPZ_V_NONE; 252 default: 253 case VC4_SCALING_NONE << 2 | VC4_SCALING_NONE: 254 /* The unity case is independently handled by 255 * SCALER_CTL0_UNITY. 256 */ 257 return 0; 258 } 259 } 260 261 static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) 262 { 263 struct drm_plane *plane = state->plane; 264 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 265 struct drm_framebuffer *fb = state->fb; 266 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); 267 u32 subpixel_src_mask = (1 << 16) - 1; 268 u32 format = fb->format->format; 269 int num_planes = fb->format->num_planes; 270 u32 h_subsample = 1; 271 u32 v_subsample = 1; 272 int i; 273 274 for (i = 0; i < num_planes; i++) 275 vc4_state->offsets[i] = bo->paddr + fb->offsets[i]; 276 277 /* We don't support subpixel source positioning for scaling. */ 278 if ((state->src_x & subpixel_src_mask) || 279 (state->src_y & subpixel_src_mask) || 280 (state->src_w & subpixel_src_mask) || 281 (state->src_h & subpixel_src_mask)) { 282 return -EINVAL; 283 } 284 285 vc4_state->src_x = state->src_x >> 16; 286 vc4_state->src_y = state->src_y >> 16; 287 vc4_state->src_w[0] = state->src_w >> 16; 288 vc4_state->src_h[0] = state->src_h >> 16; 289 290 vc4_state->crtc_x = state->crtc_x; 291 vc4_state->crtc_y = state->crtc_y; 292 vc4_state->crtc_w = state->crtc_w; 293 vc4_state->crtc_h = state->crtc_h; 294 295 vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0], 296 vc4_state->crtc_w); 297 vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], 298 vc4_state->crtc_h); 299 300 if (num_planes > 1) { 301 vc4_state->is_yuv = true; 302 303 h_subsample = drm_format_horz_chroma_subsampling(format); 304 v_subsample = drm_format_vert_chroma_subsampling(format); 305 vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample; 306 vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample; 307 308 vc4_state->x_scaling[1] = 309 vc4_get_scaling_mode(vc4_state->src_w[1], 310 vc4_state->crtc_w); 311 vc4_state->y_scaling[1] = 312 vc4_get_scaling_mode(vc4_state->src_h[1], 313 vc4_state->crtc_h); 314 315 /* YUV conversion requires that scaling be enabled, 316 * even on a plane that's otherwise 1:1. Choose TPZ 317 * for simplicity. 318 */ 319 if (vc4_state->x_scaling[0] == VC4_SCALING_NONE) 320 vc4_state->x_scaling[0] = VC4_SCALING_TPZ; 321 if (vc4_state->y_scaling[0] == VC4_SCALING_NONE) 322 vc4_state->y_scaling[0] = VC4_SCALING_TPZ; 323 } 324 325 vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE && 326 vc4_state->y_scaling[0] == VC4_SCALING_NONE && 327 vc4_state->x_scaling[1] == VC4_SCALING_NONE && 328 vc4_state->y_scaling[1] == VC4_SCALING_NONE); 329 330 /* No configuring scaling on the cursor plane, since it gets 331 non-vblank-synced updates, and scaling requires requires 332 LBM changes which have to be vblank-synced. 333 */ 334 if (plane->type == DRM_PLANE_TYPE_CURSOR && !vc4_state->is_unity) 335 return -EINVAL; 336 337 /* Clamp the on-screen start x/y to 0. The hardware doesn't 338 * support negative y, and negative x wastes bandwidth. 339 */ 340 if (vc4_state->crtc_x < 0) { 341 for (i = 0; i < num_planes; i++) { 342 u32 cpp = fb->format->cpp[i]; 343 u32 subs = ((i == 0) ? 1 : h_subsample); 344 345 vc4_state->offsets[i] += (cpp * 346 (-vc4_state->crtc_x) / subs); 347 } 348 vc4_state->src_w[0] += vc4_state->crtc_x; 349 vc4_state->src_w[1] += vc4_state->crtc_x / h_subsample; 350 vc4_state->crtc_x = 0; 351 } 352 353 if (vc4_state->crtc_y < 0) { 354 for (i = 0; i < num_planes; i++) { 355 u32 subs = ((i == 0) ? 1 : v_subsample); 356 357 vc4_state->offsets[i] += (fb->pitches[i] * 358 (-vc4_state->crtc_y) / subs); 359 } 360 vc4_state->src_h[0] += vc4_state->crtc_y; 361 vc4_state->src_h[1] += vc4_state->crtc_y / v_subsample; 362 vc4_state->crtc_y = 0; 363 } 364 365 return 0; 366 } 367 368 static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst) 369 { 370 u32 scale, recip; 371 372 scale = (1 << 16) * src / dst; 373 374 /* The specs note that while the reciprocal would be defined 375 * as (1<<32)/scale, ~0 is close enough. 376 */ 377 recip = ~0 / scale; 378 379 vc4_dlist_write(vc4_state, 380 VC4_SET_FIELD(scale, SCALER_TPZ0_SCALE) | 381 VC4_SET_FIELD(0, SCALER_TPZ0_IPHASE)); 382 vc4_dlist_write(vc4_state, 383 VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP)); 384 } 385 386 static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst) 387 { 388 u32 scale = (1 << 16) * src / dst; 389 390 vc4_dlist_write(vc4_state, 391 SCALER_PPF_AGC | 392 VC4_SET_FIELD(scale, SCALER_PPF_SCALE) | 393 VC4_SET_FIELD(0, SCALER_PPF_IPHASE)); 394 } 395 396 static u32 vc4_lbm_size(struct drm_plane_state *state) 397 { 398 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 399 /* This is the worst case number. One of the two sizes will 400 * be used depending on the scaling configuration. 401 */ 402 u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w); 403 u32 lbm; 404 405 if (!vc4_state->is_yuv) { 406 if (vc4_state->is_unity) 407 return 0; 408 else if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ) 409 lbm = pix_per_line * 8; 410 else { 411 /* In special cases, this multiplier might be 12. */ 412 lbm = pix_per_line * 16; 413 } 414 } else { 415 /* There are cases for this going down to a multiplier 416 * of 2, but according to the firmware source, the 417 * table in the docs is somewhat wrong. 418 */ 419 lbm = pix_per_line * 16; 420 } 421 422 lbm = roundup(lbm, 32); 423 424 return lbm; 425 } 426 427 static void vc4_write_scaling_parameters(struct drm_plane_state *state, 428 int channel) 429 { 430 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 431 432 /* Ch0 H-PPF Word 0: Scaling Parameters */ 433 if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) { 434 vc4_write_ppf(vc4_state, 435 vc4_state->src_w[channel], vc4_state->crtc_w); 436 } 437 438 /* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */ 439 if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) { 440 vc4_write_ppf(vc4_state, 441 vc4_state->src_h[channel], vc4_state->crtc_h); 442 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 443 } 444 445 /* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */ 446 if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) { 447 vc4_write_tpz(vc4_state, 448 vc4_state->src_w[channel], vc4_state->crtc_w); 449 } 450 451 /* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */ 452 if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) { 453 vc4_write_tpz(vc4_state, 454 vc4_state->src_h[channel], vc4_state->crtc_h); 455 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 456 } 457 } 458 459 /* Writes out a full display list for an active plane to the plane's 460 * private dlist state. 461 */ 462 static int vc4_plane_mode_set(struct drm_plane *plane, 463 struct drm_plane_state *state) 464 { 465 struct vc4_dev *vc4 = to_vc4_dev(plane->dev); 466 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 467 struct drm_framebuffer *fb = state->fb; 468 u32 ctl0_offset = vc4_state->dlist_count; 469 const struct hvs_format *format = vc4_get_hvs_format(fb->format->format); 470 int num_planes = drm_format_num_planes(format->drm); 471 bool mix_plane_alpha; 472 bool covers_screen; 473 u32 scl0, scl1, pitch0; 474 u32 lbm_size, tiling; 475 unsigned long irqflags; 476 int ret, i; 477 478 ret = vc4_plane_setup_clipping_and_scaling(state); 479 if (ret) 480 return ret; 481 482 /* Allocate the LBM memory that the HVS will use for temporary 483 * storage due to our scaling/format conversion. 484 */ 485 lbm_size = vc4_lbm_size(state); 486 if (lbm_size) { 487 if (!vc4_state->lbm.allocated) { 488 spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); 489 ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm, 490 &vc4_state->lbm, 491 lbm_size, 32, 0, 0); 492 spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); 493 } else { 494 WARN_ON_ONCE(lbm_size != vc4_state->lbm.size); 495 } 496 } 497 498 if (ret) 499 return ret; 500 501 /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB 502 * and 4:4:4, scl1 should be set to scl0 so both channels of 503 * the scaler do the same thing. For YUV, the Y plane needs 504 * to be put in channel 1 and Cb/Cr in channel 0, so we swap 505 * the scl fields here. 506 */ 507 if (num_planes == 1) { 508 scl0 = vc4_get_scl_field(state, 0); 509 scl1 = scl0; 510 } else { 511 scl0 = vc4_get_scl_field(state, 1); 512 scl1 = vc4_get_scl_field(state, 0); 513 } 514 515 switch (fb->modifier) { 516 case DRM_FORMAT_MOD_LINEAR: 517 tiling = SCALER_CTL0_TILING_LINEAR; 518 pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH); 519 break; 520 521 case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: { 522 /* For T-tiled, the FB pitch is "how many bytes from 523 * one row to the next, such that pitch * tile_h == 524 * tile_size * tiles_per_row." 525 */ 526 u32 tile_size_shift = 12; /* T tiles are 4kb */ 527 u32 tile_h_shift = 5; /* 16 and 32bpp are 32 pixels high */ 528 u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift); 529 530 tiling = SCALER_CTL0_TILING_256B_OR_T; 531 532 pitch0 = (VC4_SET_FIELD(0, SCALER_PITCH0_TILE_Y_OFFSET) | 533 VC4_SET_FIELD(0, SCALER_PITCH0_TILE_WIDTH_L) | 534 VC4_SET_FIELD(tiles_w, SCALER_PITCH0_TILE_WIDTH_R)); 535 break; 536 } 537 538 default: 539 DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx", 540 (long long)fb->modifier); 541 return -EINVAL; 542 } 543 544 /* Control word */ 545 vc4_dlist_write(vc4_state, 546 SCALER_CTL0_VALID | 547 (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) | 548 (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) | 549 VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) | 550 (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) | 551 VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) | 552 VC4_SET_FIELD(scl1, SCALER_CTL0_SCL1)); 553 554 /* Position Word 0: Image Positions and Alpha Value */ 555 vc4_state->pos0_offset = vc4_state->dlist_count; 556 vc4_dlist_write(vc4_state, 557 VC4_SET_FIELD(state->alpha >> 8, SCALER_POS0_FIXED_ALPHA) | 558 VC4_SET_FIELD(vc4_state->crtc_x, SCALER_POS0_START_X) | 559 VC4_SET_FIELD(vc4_state->crtc_y, SCALER_POS0_START_Y)); 560 561 /* Position Word 1: Scaled Image Dimensions. */ 562 if (!vc4_state->is_unity) { 563 vc4_dlist_write(vc4_state, 564 VC4_SET_FIELD(vc4_state->crtc_w, 565 SCALER_POS1_SCL_WIDTH) | 566 VC4_SET_FIELD(vc4_state->crtc_h, 567 SCALER_POS1_SCL_HEIGHT)); 568 } 569 570 /* Don't waste cycles mixing with plane alpha if the set alpha 571 * is opaque or there is no per-pixel alpha information. 572 * In any case we use the alpha property value as the fixed alpha. 573 */ 574 mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE && 575 fb->format->has_alpha; 576 577 /* Position Word 2: Source Image Size, Alpha */ 578 vc4_state->pos2_offset = vc4_state->dlist_count; 579 vc4_dlist_write(vc4_state, 580 VC4_SET_FIELD(fb->format->has_alpha ? 581 SCALER_POS2_ALPHA_MODE_PIPELINE : 582 SCALER_POS2_ALPHA_MODE_FIXED, 583 SCALER_POS2_ALPHA_MODE) | 584 (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) | 585 (fb->format->has_alpha ? SCALER_POS2_ALPHA_PREMULT : 0) | 586 VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) | 587 VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT)); 588 589 /* Position Word 3: Context. Written by the HVS. */ 590 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 591 592 593 /* Pointer Word 0/1/2: RGB / Y / Cb / Cr Pointers 594 * 595 * The pointers may be any byte address. 596 */ 597 vc4_state->ptr0_offset = vc4_state->dlist_count; 598 for (i = 0; i < num_planes; i++) 599 vc4_dlist_write(vc4_state, vc4_state->offsets[i]); 600 601 /* Pointer Context Word 0/1/2: Written by the HVS */ 602 for (i = 0; i < num_planes; i++) 603 vc4_dlist_write(vc4_state, 0xc0c0c0c0); 604 605 /* Pitch word 0 */ 606 vc4_dlist_write(vc4_state, pitch0); 607 608 /* Pitch word 1/2 */ 609 for (i = 1; i < num_planes; i++) { 610 vc4_dlist_write(vc4_state, 611 VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH)); 612 } 613 614 /* Colorspace conversion words */ 615 if (vc4_state->is_yuv) { 616 vc4_dlist_write(vc4_state, SCALER_CSC0_ITR_R_601_5); 617 vc4_dlist_write(vc4_state, SCALER_CSC1_ITR_R_601_5); 618 vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); 619 } 620 621 if (!vc4_state->is_unity) { 622 /* LBM Base Address. */ 623 if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || 624 vc4_state->y_scaling[1] != VC4_SCALING_NONE) { 625 vc4_dlist_write(vc4_state, vc4_state->lbm.start); 626 } 627 628 if (num_planes > 1) { 629 /* Emit Cb/Cr as channel 0 and Y as channel 630 * 1. This matches how we set up scl0/scl1 631 * above. 632 */ 633 vc4_write_scaling_parameters(state, 1); 634 } 635 vc4_write_scaling_parameters(state, 0); 636 637 /* If any PPF setup was done, then all the kernel 638 * pointers get uploaded. 639 */ 640 if (vc4_state->x_scaling[0] == VC4_SCALING_PPF || 641 vc4_state->y_scaling[0] == VC4_SCALING_PPF || 642 vc4_state->x_scaling[1] == VC4_SCALING_PPF || 643 vc4_state->y_scaling[1] == VC4_SCALING_PPF) { 644 u32 kernel = VC4_SET_FIELD(vc4->hvs->mitchell_netravali_filter.start, 645 SCALER_PPF_KERNEL_OFFSET); 646 647 /* HPPF plane 0 */ 648 vc4_dlist_write(vc4_state, kernel); 649 /* VPPF plane 0 */ 650 vc4_dlist_write(vc4_state, kernel); 651 /* HPPF plane 1 */ 652 vc4_dlist_write(vc4_state, kernel); 653 /* VPPF plane 1 */ 654 vc4_dlist_write(vc4_state, kernel); 655 } 656 } 657 658 vc4_state->dlist[ctl0_offset] |= 659 VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE); 660 661 /* crtc_* are already clipped coordinates. */ 662 covers_screen = vc4_state->crtc_x == 0 && vc4_state->crtc_y == 0 && 663 vc4_state->crtc_w == state->crtc->mode.hdisplay && 664 vc4_state->crtc_h == state->crtc->mode.vdisplay; 665 /* Background fill might be necessary when the plane has per-pixel 666 * alpha content or a non-opaque plane alpha and could blend from the 667 * background or does not cover the entire screen. 668 */ 669 vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen || 670 state->alpha != DRM_BLEND_ALPHA_OPAQUE; 671 672 return 0; 673 } 674 675 /* If a modeset involves changing the setup of a plane, the atomic 676 * infrastructure will call this to validate a proposed plane setup. 677 * However, if a plane isn't getting updated, this (and the 678 * corresponding vc4_plane_atomic_update) won't get called. Thus, we 679 * compute the dlist here and have all active plane dlists get updated 680 * in the CRTC's flush. 681 */ 682 static int vc4_plane_atomic_check(struct drm_plane *plane, 683 struct drm_plane_state *state) 684 { 685 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 686 687 vc4_state->dlist_count = 0; 688 689 if (plane_enabled(state)) 690 return vc4_plane_mode_set(plane, state); 691 else 692 return 0; 693 } 694 695 static void vc4_plane_atomic_update(struct drm_plane *plane, 696 struct drm_plane_state *old_state) 697 { 698 /* No contents here. Since we don't know where in the CRTC's 699 * dlist we should be stored, our dlist is uploaded to the 700 * hardware with vc4_plane_write_dlist() at CRTC atomic_flush 701 * time. 702 */ 703 } 704 705 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist) 706 { 707 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); 708 int i; 709 710 vc4_state->hw_dlist = dlist; 711 712 /* Can't memcpy_toio() because it needs to be 32-bit writes. */ 713 for (i = 0; i < vc4_state->dlist_count; i++) 714 writel(vc4_state->dlist[i], &dlist[i]); 715 716 return vc4_state->dlist_count; 717 } 718 719 u32 vc4_plane_dlist_size(const struct drm_plane_state *state) 720 { 721 const struct vc4_plane_state *vc4_state = 722 container_of(state, typeof(*vc4_state), base); 723 724 return vc4_state->dlist_count; 725 } 726 727 /* Updates the plane to immediately (well, once the FIFO needs 728 * refilling) scan out from at a new framebuffer. 729 */ 730 void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb) 731 { 732 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); 733 struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); 734 uint32_t addr; 735 736 /* We're skipping the address adjustment for negative origin, 737 * because this is only called on the primary plane. 738 */ 739 WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0); 740 addr = bo->paddr + fb->offsets[0]; 741 742 /* Write the new address into the hardware immediately. The 743 * scanout will start from this address as soon as the FIFO 744 * needs to refill with pixels. 745 */ 746 writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]); 747 748 /* Also update the CPU-side dlist copy, so that any later 749 * atomic updates that don't do a new modeset on our plane 750 * also use our updated address. 751 */ 752 vc4_state->dlist[vc4_state->ptr0_offset] = addr; 753 } 754 755 static void vc4_plane_atomic_async_update(struct drm_plane *plane, 756 struct drm_plane_state *state) 757 { 758 struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); 759 760 if (plane->state->fb != state->fb) { 761 vc4_plane_async_set_fb(plane, state->fb); 762 drm_atomic_set_fb_for_plane(plane->state, state->fb); 763 } 764 765 /* Set the cursor's position on the screen. This is the 766 * expected change from the drm_mode_cursor_universal() 767 * helper. 768 */ 769 plane->state->crtc_x = state->crtc_x; 770 plane->state->crtc_y = state->crtc_y; 771 772 /* Allow changing the start position within the cursor BO, if 773 * that matters. 774 */ 775 plane->state->src_x = state->src_x; 776 plane->state->src_y = state->src_y; 777 778 /* Update the display list based on the new crtc_x/y. */ 779 vc4_plane_atomic_check(plane, plane->state); 780 781 /* Note that we can't just call vc4_plane_write_dlist() 782 * because that would smash the context data that the HVS is 783 * currently using. 784 */ 785 writel(vc4_state->dlist[vc4_state->pos0_offset], 786 &vc4_state->hw_dlist[vc4_state->pos0_offset]); 787 writel(vc4_state->dlist[vc4_state->pos2_offset], 788 &vc4_state->hw_dlist[vc4_state->pos2_offset]); 789 writel(vc4_state->dlist[vc4_state->ptr0_offset], 790 &vc4_state->hw_dlist[vc4_state->ptr0_offset]); 791 } 792 793 static int vc4_plane_atomic_async_check(struct drm_plane *plane, 794 struct drm_plane_state *state) 795 { 796 /* No configuring new scaling in the fast path. */ 797 if (plane->state->crtc_w != state->crtc_w || 798 plane->state->crtc_h != state->crtc_h || 799 plane->state->src_w != state->src_w || 800 plane->state->src_h != state->src_h) 801 return -EINVAL; 802 803 return 0; 804 } 805 806 static int vc4_prepare_fb(struct drm_plane *plane, 807 struct drm_plane_state *state) 808 { 809 struct vc4_bo *bo; 810 struct dma_fence *fence; 811 int ret; 812 813 if ((plane->state->fb == state->fb) || !state->fb) 814 return 0; 815 816 bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); 817 818 ret = vc4_bo_inc_usecnt(bo); 819 if (ret) 820 return ret; 821 822 fence = reservation_object_get_excl_rcu(bo->resv); 823 drm_atomic_set_fence_for_plane(state, fence); 824 825 return 0; 826 } 827 828 static void vc4_cleanup_fb(struct drm_plane *plane, 829 struct drm_plane_state *state) 830 { 831 struct vc4_bo *bo; 832 833 if (plane->state->fb == state->fb || !state->fb) 834 return; 835 836 bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base); 837 vc4_bo_dec_usecnt(bo); 838 } 839 840 static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { 841 .atomic_check = vc4_plane_atomic_check, 842 .atomic_update = vc4_plane_atomic_update, 843 .prepare_fb = vc4_prepare_fb, 844 .cleanup_fb = vc4_cleanup_fb, 845 .atomic_async_check = vc4_plane_atomic_async_check, 846 .atomic_async_update = vc4_plane_atomic_async_update, 847 }; 848 849 static void vc4_plane_destroy(struct drm_plane *plane) 850 { 851 drm_plane_helper_disable(plane); 852 drm_plane_cleanup(plane); 853 } 854 855 static bool vc4_format_mod_supported(struct drm_plane *plane, 856 uint32_t format, 857 uint64_t modifier) 858 { 859 /* Support T_TILING for RGB formats only. */ 860 switch (format) { 861 case DRM_FORMAT_XRGB8888: 862 case DRM_FORMAT_ARGB8888: 863 case DRM_FORMAT_ABGR8888: 864 case DRM_FORMAT_XBGR8888: 865 case DRM_FORMAT_RGB565: 866 case DRM_FORMAT_BGR565: 867 case DRM_FORMAT_ARGB1555: 868 case DRM_FORMAT_XRGB1555: 869 return true; 870 case DRM_FORMAT_YUV422: 871 case DRM_FORMAT_YVU422: 872 case DRM_FORMAT_YUV420: 873 case DRM_FORMAT_YVU420: 874 case DRM_FORMAT_NV12: 875 case DRM_FORMAT_NV16: 876 default: 877 return (modifier == DRM_FORMAT_MOD_LINEAR); 878 } 879 } 880 881 static const struct drm_plane_funcs vc4_plane_funcs = { 882 .update_plane = drm_atomic_helper_update_plane, 883 .disable_plane = drm_atomic_helper_disable_plane, 884 .destroy = vc4_plane_destroy, 885 .set_property = NULL, 886 .reset = vc4_plane_reset, 887 .atomic_duplicate_state = vc4_plane_duplicate_state, 888 .atomic_destroy_state = vc4_plane_destroy_state, 889 .format_mod_supported = vc4_format_mod_supported, 890 }; 891 892 struct drm_plane *vc4_plane_init(struct drm_device *dev, 893 enum drm_plane_type type) 894 { 895 struct drm_plane *plane = NULL; 896 struct vc4_plane *vc4_plane; 897 u32 formats[ARRAY_SIZE(hvs_formats)]; 898 u32 num_formats = 0; 899 int ret = 0; 900 unsigned i; 901 static const uint64_t modifiers[] = { 902 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED, 903 DRM_FORMAT_MOD_LINEAR, 904 DRM_FORMAT_MOD_INVALID 905 }; 906 907 vc4_plane = devm_kzalloc(dev->dev, sizeof(*vc4_plane), 908 GFP_KERNEL); 909 if (!vc4_plane) 910 return ERR_PTR(-ENOMEM); 911 912 for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { 913 /* Don't allow YUV in cursor planes, since that means 914 * tuning on the scaler, which we don't allow for the 915 * cursor. 916 */ 917 if (type != DRM_PLANE_TYPE_CURSOR || 918 hvs_formats[i].hvs < HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE) { 919 formats[num_formats++] = hvs_formats[i].drm; 920 } 921 } 922 plane = &vc4_plane->base; 923 ret = drm_universal_plane_init(dev, plane, 0, 924 &vc4_plane_funcs, 925 formats, num_formats, 926 modifiers, type, NULL); 927 928 drm_plane_helper_add(plane, &vc4_plane_helper_funcs); 929 930 drm_plane_create_alpha_property(plane); 931 932 return plane; 933 } 934