1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. 4 * Author: Liviu Dudau <Liviu.Dudau@arm.com> 5 * 6 * ARM Mali DP plane manipulation routines. 7 */ 8 9 #include <linux/iommu.h> 10 #include <linux/platform_device.h> 11 12 #include <drm/drm_atomic.h> 13 #include <drm/drm_atomic_helper.h> 14 #include <drm/drm_drv.h> 15 #include <drm/drm_fb_cma_helper.h> 16 #include <drm/drm_fourcc.h> 17 #include <drm/drm_gem_cma_helper.h> 18 #include <drm/drm_gem_framebuffer_helper.h> 19 #include <drm/drm_plane_helper.h> 20 #include <drm/drm_print.h> 21 22 #include "malidp_hw.h" 23 #include "malidp_drv.h" 24 25 /* Layer specific register offsets */ 26 #define MALIDP_LAYER_FORMAT 0x000 27 #define LAYER_FORMAT_MASK 0x3f 28 #define MALIDP_LAYER_CONTROL 0x004 29 #define LAYER_ENABLE (1 << 0) 30 #define LAYER_FLOWCFG_MASK 7 31 #define LAYER_FLOWCFG(x) (((x) & LAYER_FLOWCFG_MASK) << 1) 32 #define LAYER_FLOWCFG_SCALE_SE 3 33 #define LAYER_ROT_OFFSET 8 34 #define LAYER_H_FLIP (1 << 10) 35 #define LAYER_V_FLIP (1 << 11) 36 #define LAYER_ROT_MASK (0xf << 8) 37 #define LAYER_COMP_MASK (0x3 << 12) 38 #define LAYER_COMP_PIXEL (0x3 << 12) 39 #define LAYER_COMP_PLANE (0x2 << 12) 40 #define LAYER_PMUL_ENABLE (0x1 << 14) 41 #define LAYER_ALPHA_OFFSET (16) 42 #define LAYER_ALPHA_MASK (0xff) 43 #define LAYER_ALPHA(x) (((x) & LAYER_ALPHA_MASK) << LAYER_ALPHA_OFFSET) 44 #define MALIDP_LAYER_COMPOSE 0x008 45 #define MALIDP_LAYER_SIZE 0x00c 46 #define LAYER_H_VAL(x) (((x) & 0x1fff) << 0) 47 #define LAYER_V_VAL(x) (((x) & 0x1fff) << 16) 48 #define MALIDP_LAYER_COMP_SIZE 0x010 49 #define MALIDP_LAYER_OFFSET 0x014 50 #define MALIDP550_LS_ENABLE 0x01c 51 #define MALIDP550_LS_R1_IN_SIZE 0x020 52 53 #define MODIFIERS_COUNT_MAX 15 54 55 /* 56 * This 4-entry look-up-table is used to determine the full 8-bit alpha value 57 * for formats with 1- or 2-bit alpha channels. 58 * We set it to give 100%/0% opacity for 1-bit formats and 100%/66%/33%/0% 59 * opacity for 2-bit formats. 60 */ 61 #define MALIDP_ALPHA_LUT 0xffaa5500 62 63 /* page sizes the MMU prefetcher can support */ 64 #define MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES (SZ_4K | SZ_64K) 65 #define MALIDP_MMU_PREFETCH_FULL_PGSIZES (SZ_1M | SZ_2M) 66 67 /* readahead for partial-frame prefetch */ 68 #define MALIDP_MMU_PREFETCH_READAHEAD 8 69 70 static void malidp_de_plane_destroy(struct drm_plane *plane) 71 { 72 struct malidp_plane *mp = to_malidp_plane(plane); 73 74 drm_plane_cleanup(plane); 75 kfree(mp); 76 } 77 78 /* 79 * Replicate what the default ->reset hook does: free the state pointer and 80 * allocate a new empty object. We just need enough space to store 81 * a malidp_plane_state instead of a drm_plane_state. 82 */ 83 static void malidp_plane_reset(struct drm_plane *plane) 84 { 85 struct malidp_plane_state *state = to_malidp_plane_state(plane->state); 86 87 if (state) 88 __drm_atomic_helper_plane_destroy_state(&state->base); 89 kfree(state); 90 plane->state = NULL; 91 state = kzalloc(sizeof(*state), GFP_KERNEL); 92 if (state) 93 __drm_atomic_helper_plane_reset(plane, &state->base); 94 } 95 96 static struct 97 drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane) 98 { 99 struct malidp_plane_state *state, *m_state; 100 101 if (!plane->state) 102 return NULL; 103 104 state = kmalloc(sizeof(*state), GFP_KERNEL); 105 if (!state) 106 return NULL; 107 108 m_state = to_malidp_plane_state(plane->state); 109 __drm_atomic_helper_plane_duplicate_state(plane, &state->base); 110 state->rotmem_size = m_state->rotmem_size; 111 state->format = m_state->format; 112 state->n_planes = m_state->n_planes; 113 114 state->mmu_prefetch_mode = m_state->mmu_prefetch_mode; 115 state->mmu_prefetch_pgsize = m_state->mmu_prefetch_pgsize; 116 117 return &state->base; 118 } 119 120 static void malidp_destroy_plane_state(struct drm_plane *plane, 121 struct drm_plane_state *state) 122 { 123 struct malidp_plane_state *m_state = to_malidp_plane_state(state); 124 125 __drm_atomic_helper_plane_destroy_state(state); 126 kfree(m_state); 127 } 128 129 static const char * const prefetch_mode_names[] = { 130 [MALIDP_PREFETCH_MODE_NONE] = "MMU_PREFETCH_NONE", 131 [MALIDP_PREFETCH_MODE_PARTIAL] = "MMU_PREFETCH_PARTIAL", 132 [MALIDP_PREFETCH_MODE_FULL] = "MMU_PREFETCH_FULL", 133 }; 134 135 static void malidp_plane_atomic_print_state(struct drm_printer *p, 136 const struct drm_plane_state *state) 137 { 138 struct malidp_plane_state *ms = to_malidp_plane_state(state); 139 140 drm_printf(p, "\trotmem_size=%u\n", ms->rotmem_size); 141 drm_printf(p, "\tformat_id=%u\n", ms->format); 142 drm_printf(p, "\tn_planes=%u\n", ms->n_planes); 143 drm_printf(p, "\tmmu_prefetch_mode=%s\n", 144 prefetch_mode_names[ms->mmu_prefetch_mode]); 145 drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize); 146 } 147 148 bool malidp_format_mod_supported(struct drm_device *drm, 149 u32 format, u64 modifier) 150 { 151 const struct drm_format_info *info; 152 const u64 *modifiers; 153 struct malidp_drm *malidp = drm->dev_private; 154 const struct malidp_hw_regmap *map = &malidp->dev->hw->map; 155 156 if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID)) 157 return false; 158 159 /* Some pixel formats are supported without any modifier */ 160 if (modifier == DRM_FORMAT_MOD_LINEAR) { 161 /* 162 * However these pixel formats need to be supported with 163 * modifiers only 164 */ 165 return !malidp_hw_format_is_afbc_only(format); 166 } 167 168 if (!fourcc_mod_is_vendor(modifier, ARM)) { 169 DRM_ERROR("Unknown modifier (not Arm)\n"); 170 return false; 171 } 172 173 if (modifier & 174 ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) { 175 DRM_DEBUG_KMS("Unsupported modifiers\n"); 176 return false; 177 } 178 179 modifiers = malidp_format_modifiers; 180 181 /* SPLIT buffers must use SPARSE layout */ 182 if (WARN_ON_ONCE((modifier & AFBC_SPLIT) && !(modifier & AFBC_SPARSE))) 183 return false; 184 185 /* CBR only applies to YUV formats, where YTR should be always 0 */ 186 if (WARN_ON_ONCE((modifier & AFBC_CBR) && (modifier & AFBC_YTR))) 187 return false; 188 189 while (*modifiers != DRM_FORMAT_MOD_INVALID) { 190 if (*modifiers == modifier) 191 break; 192 193 modifiers++; 194 } 195 196 /* return false, if the modifier was not found */ 197 if (*modifiers == DRM_FORMAT_MOD_INVALID) { 198 DRM_DEBUG_KMS("Unsupported modifier\n"); 199 return false; 200 } 201 202 info = drm_format_info(format); 203 204 if (info->num_planes != 1) { 205 DRM_DEBUG_KMS("AFBC buffers expect one plane\n"); 206 return false; 207 } 208 209 if (malidp_hw_format_is_linear_only(format) == true) { 210 DRM_DEBUG_KMS("Given format (0x%x) is supported is linear mode only\n", 211 format); 212 return false; 213 } 214 215 /* 216 * RGB formats need to provide YTR modifier and YUV formats should not 217 * provide YTR modifier. 218 */ 219 if (!(info->is_yuv) != !!(modifier & AFBC_FORMAT_MOD_YTR)) { 220 DRM_DEBUG_KMS("AFBC_FORMAT_MOD_YTR is %s for %s formats\n", 221 info->is_yuv ? "disallowed" : "mandatory", 222 info->is_yuv ? "YUV" : "RGB"); 223 return false; 224 } 225 226 if (modifier & AFBC_SPLIT) { 227 if (!info->is_yuv) { 228 if (info->cpp[0] <= 2) { 229 DRM_DEBUG_KMS("RGB formats <= 16bpp are not supported with SPLIT\n"); 230 return false; 231 } 232 } 233 234 if ((info->hsub != 1) || (info->vsub != 1)) { 235 if (!(format == DRM_FORMAT_YUV420_10BIT && 236 (map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) { 237 DRM_DEBUG_KMS("Formats which are sub-sampled should never be split\n"); 238 return false; 239 } 240 } 241 } 242 243 if (modifier & AFBC_CBR) { 244 if ((info->hsub == 1) || (info->vsub == 1)) { 245 DRM_DEBUG_KMS("Formats which are not sub-sampled should not have CBR set\n"); 246 return false; 247 } 248 } 249 250 return true; 251 } 252 253 static bool malidp_format_mod_supported_per_plane(struct drm_plane *plane, 254 u32 format, u64 modifier) 255 { 256 return malidp_format_mod_supported(plane->dev, format, modifier); 257 } 258 259 static const struct drm_plane_funcs malidp_de_plane_funcs = { 260 .update_plane = drm_atomic_helper_update_plane, 261 .disable_plane = drm_atomic_helper_disable_plane, 262 .destroy = malidp_de_plane_destroy, 263 .reset = malidp_plane_reset, 264 .atomic_duplicate_state = malidp_duplicate_plane_state, 265 .atomic_destroy_state = malidp_destroy_plane_state, 266 .atomic_print_state = malidp_plane_atomic_print_state, 267 .format_mod_supported = malidp_format_mod_supported_per_plane, 268 }; 269 270 static int malidp_se_check_scaling(struct malidp_plane *mp, 271 struct drm_plane_state *state) 272 { 273 struct drm_crtc_state *crtc_state = 274 drm_atomic_get_existing_crtc_state(state->state, state->crtc); 275 struct malidp_crtc_state *mc; 276 u32 src_w, src_h; 277 int ret; 278 279 if (!crtc_state) 280 return -EINVAL; 281 282 mc = to_malidp_crtc_state(crtc_state); 283 284 ret = drm_atomic_helper_check_plane_state(state, crtc_state, 285 0, INT_MAX, true, true); 286 if (ret) 287 return ret; 288 289 if (state->rotation & MALIDP_ROTATED_MASK) { 290 src_w = state->src_h >> 16; 291 src_h = state->src_w >> 16; 292 } else { 293 src_w = state->src_w >> 16; 294 src_h = state->src_h >> 16; 295 } 296 297 if ((state->crtc_w == src_w) && (state->crtc_h == src_h)) { 298 /* Scaling not necessary for this plane. */ 299 mc->scaled_planes_mask &= ~(mp->layer->id); 300 return 0; 301 } 302 303 if (mp->layer->id & (DE_SMART | DE_GRAPHICS2)) 304 return -EINVAL; 305 306 mc->scaled_planes_mask |= mp->layer->id; 307 /* Defer scaling requirements calculation to the crtc check. */ 308 return 0; 309 } 310 311 static u32 malidp_get_pgsize_bitmap(struct malidp_plane *mp) 312 { 313 struct iommu_domain *mmu_dom; 314 315 mmu_dom = iommu_get_domain_for_dev(mp->base.dev->dev); 316 if (mmu_dom) 317 return mmu_dom->pgsize_bitmap; 318 319 return 0; 320 } 321 322 /* 323 * Check if the framebuffer is entirely made up of pages at least pgsize in 324 * size. Only a heuristic: assumes that each scatterlist entry has been aligned 325 * to the largest page size smaller than its length and that the MMU maps to 326 * the largest page size possible. 327 */ 328 static bool malidp_check_pages_threshold(struct malidp_plane_state *ms, 329 u32 pgsize) 330 { 331 int i; 332 333 for (i = 0; i < ms->n_planes; i++) { 334 struct drm_gem_object *obj; 335 struct drm_gem_cma_object *cma_obj; 336 struct sg_table *sgt; 337 struct scatterlist *sgl; 338 339 obj = drm_gem_fb_get_obj(ms->base.fb, i); 340 cma_obj = to_drm_gem_cma_obj(obj); 341 342 if (cma_obj->sgt) 343 sgt = cma_obj->sgt; 344 else 345 sgt = obj->funcs->get_sg_table(obj); 346 347 if (!sgt) 348 return false; 349 350 sgl = sgt->sgl; 351 352 while (sgl) { 353 if (sgl->length < pgsize) { 354 if (!cma_obj->sgt) 355 kfree(sgt); 356 return false; 357 } 358 359 sgl = sg_next(sgl); 360 } 361 if (!cma_obj->sgt) 362 kfree(sgt); 363 } 364 365 return true; 366 } 367 368 /* 369 * Check if it is possible to enable partial-frame MMU prefetch given the 370 * current format, AFBC state and rotation. 371 */ 372 static bool malidp_partial_prefetch_supported(u32 format, u64 modifier, 373 unsigned int rotation) 374 { 375 bool afbc, sparse; 376 377 /* rotation and horizontal flip not supported for partial prefetch */ 378 if (rotation & (DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | 379 DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X)) 380 return false; 381 382 afbc = modifier & DRM_FORMAT_MOD_ARM_AFBC(0); 383 sparse = modifier & AFBC_FORMAT_MOD_SPARSE; 384 385 switch (format) { 386 case DRM_FORMAT_ARGB2101010: 387 case DRM_FORMAT_RGBA1010102: 388 case DRM_FORMAT_BGRA1010102: 389 case DRM_FORMAT_ARGB8888: 390 case DRM_FORMAT_RGBA8888: 391 case DRM_FORMAT_BGRA8888: 392 case DRM_FORMAT_XRGB8888: 393 case DRM_FORMAT_XBGR8888: 394 case DRM_FORMAT_RGBX8888: 395 case DRM_FORMAT_BGRX8888: 396 case DRM_FORMAT_RGB888: 397 case DRM_FORMAT_RGBA5551: 398 case DRM_FORMAT_RGB565: 399 /* always supported */ 400 return true; 401 402 case DRM_FORMAT_ABGR2101010: 403 case DRM_FORMAT_ABGR8888: 404 case DRM_FORMAT_ABGR1555: 405 case DRM_FORMAT_BGR565: 406 /* supported, but if AFBC then must be sparse mode */ 407 return (!afbc) || (afbc && sparse); 408 409 case DRM_FORMAT_BGR888: 410 /* supported, but not for AFBC */ 411 return !afbc; 412 413 case DRM_FORMAT_YUYV: 414 case DRM_FORMAT_UYVY: 415 case DRM_FORMAT_NV12: 416 case DRM_FORMAT_YUV420: 417 /* not supported */ 418 return false; 419 420 default: 421 return false; 422 } 423 } 424 425 /* 426 * Select the preferred MMU prefetch mode. Full-frame prefetch is preferred as 427 * long as the framebuffer is all large pages. Otherwise partial-frame prefetch 428 * is selected as long as it is supported for the current format. The selected 429 * page size for prefetch is returned in pgsize_bitmap. 430 */ 431 static enum mmu_prefetch_mode malidp_mmu_prefetch_select_mode 432 (struct malidp_plane_state *ms, u32 *pgsize_bitmap) 433 { 434 u32 pgsizes; 435 436 /* get the full-frame prefetch page size(s) supported by the MMU */ 437 pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_FULL_PGSIZES; 438 439 while (pgsizes) { 440 u32 largest_pgsize = 1 << __fls(pgsizes); 441 442 if (malidp_check_pages_threshold(ms, largest_pgsize)) { 443 *pgsize_bitmap = largest_pgsize; 444 return MALIDP_PREFETCH_MODE_FULL; 445 } 446 447 pgsizes -= largest_pgsize; 448 } 449 450 /* get the partial-frame prefetch page size(s) supported by the MMU */ 451 pgsizes = *pgsize_bitmap & MALIDP_MMU_PREFETCH_PARTIAL_PGSIZES; 452 453 if (malidp_partial_prefetch_supported(ms->base.fb->format->format, 454 ms->base.fb->modifier, 455 ms->base.rotation)) { 456 /* partial prefetch using the smallest page size */ 457 *pgsize_bitmap = 1 << __ffs(pgsizes); 458 return MALIDP_PREFETCH_MODE_PARTIAL; 459 } 460 *pgsize_bitmap = 0; 461 return MALIDP_PREFETCH_MODE_NONE; 462 } 463 464 static u32 malidp_calc_mmu_control_value(enum mmu_prefetch_mode mode, 465 u8 readahead, u8 n_planes, u32 pgsize) 466 { 467 u32 mmu_ctrl = 0; 468 469 if (mode != MALIDP_PREFETCH_MODE_NONE) { 470 mmu_ctrl |= MALIDP_MMU_CTRL_EN; 471 472 if (mode == MALIDP_PREFETCH_MODE_PARTIAL) { 473 mmu_ctrl |= MALIDP_MMU_CTRL_MODE; 474 mmu_ctrl |= MALIDP_MMU_CTRL_PP_NUM_REQ(readahead); 475 } 476 477 if (pgsize == SZ_64K || pgsize == SZ_2M) { 478 int i; 479 480 for (i = 0; i < n_planes; i++) 481 mmu_ctrl |= MALIDP_MMU_CTRL_PX_PS(i); 482 } 483 } 484 485 return mmu_ctrl; 486 } 487 488 static void malidp_de_prefetch_settings(struct malidp_plane *mp, 489 struct malidp_plane_state *ms) 490 { 491 if (!mp->layer->mmu_ctrl_offset) 492 return; 493 494 /* get the page sizes supported by the MMU */ 495 ms->mmu_prefetch_pgsize = malidp_get_pgsize_bitmap(mp); 496 ms->mmu_prefetch_mode = 497 malidp_mmu_prefetch_select_mode(ms, &ms->mmu_prefetch_pgsize); 498 } 499 500 static int malidp_de_plane_check(struct drm_plane *plane, 501 struct drm_atomic_state *state) 502 { 503 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 504 plane); 505 struct malidp_plane *mp = to_malidp_plane(plane); 506 struct malidp_plane_state *ms = to_malidp_plane_state(new_plane_state); 507 bool rotated = new_plane_state->rotation & MALIDP_ROTATED_MASK; 508 struct drm_framebuffer *fb; 509 u16 pixel_alpha = new_plane_state->pixel_blend_mode; 510 int i, ret; 511 unsigned int block_w, block_h; 512 513 if (!new_plane_state->crtc || WARN_ON(!new_plane_state->fb)) 514 return 0; 515 516 fb = new_plane_state->fb; 517 518 ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map, 519 mp->layer->id, fb->format->format, 520 !!fb->modifier); 521 if (ms->format == MALIDP_INVALID_FORMAT_ID) 522 return -EINVAL; 523 524 ms->n_planes = fb->format->num_planes; 525 for (i = 0; i < ms->n_planes; i++) { 526 u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated); 527 528 if (((fb->pitches[i] * drm_format_info_block_height(fb->format, i)) 529 & (alignment - 1)) && !(fb->modifier)) { 530 DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n", 531 fb->pitches[i], i); 532 return -EINVAL; 533 } 534 } 535 536 block_w = drm_format_info_block_width(fb->format, 0); 537 block_h = drm_format_info_block_height(fb->format, 0); 538 if (fb->width % block_w || fb->height % block_h) { 539 DRM_DEBUG_KMS("Buffer width/height needs to be a multiple of tile sizes"); 540 return -EINVAL; 541 } 542 if ((new_plane_state->src_x >> 16) % block_w || (new_plane_state->src_y >> 16) % block_h) { 543 DRM_DEBUG_KMS("Plane src_x/src_y needs to be a multiple of tile sizes"); 544 return -EINVAL; 545 } 546 547 if ((new_plane_state->crtc_w > mp->hwdev->max_line_size) || 548 (new_plane_state->crtc_h > mp->hwdev->max_line_size) || 549 (new_plane_state->crtc_w < mp->hwdev->min_line_size) || 550 (new_plane_state->crtc_h < mp->hwdev->min_line_size)) 551 return -EINVAL; 552 553 /* 554 * DP550/650 video layers can accept 3 plane formats only if 555 * fb->pitches[1] == fb->pitches[2] since they don't have a 556 * third plane stride register. 557 */ 558 if (ms->n_planes == 3 && 559 !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) && 560 (new_plane_state->fb->pitches[1] != new_plane_state->fb->pitches[2])) 561 return -EINVAL; 562 563 ret = malidp_se_check_scaling(mp, new_plane_state); 564 if (ret) 565 return ret; 566 567 /* validate the rotation constraints for each layer */ 568 if (new_plane_state->rotation != DRM_MODE_ROTATE_0) { 569 if (mp->layer->rot == ROTATE_NONE) 570 return -EINVAL; 571 if ((mp->layer->rot == ROTATE_COMPRESSED) && !(fb->modifier)) 572 return -EINVAL; 573 /* 574 * packed RGB888 / BGR888 can't be rotated or flipped 575 * unless they are stored in a compressed way 576 */ 577 if ((fb->format->format == DRM_FORMAT_RGB888 || 578 fb->format->format == DRM_FORMAT_BGR888) && !(fb->modifier)) 579 return -EINVAL; 580 } 581 582 /* SMART layer does not support AFBC */ 583 if (mp->layer->id == DE_SMART && fb->modifier) { 584 DRM_ERROR("AFBC framebuffer not supported in SMART layer"); 585 return -EINVAL; 586 } 587 588 ms->rotmem_size = 0; 589 if (new_plane_state->rotation & MALIDP_ROTATED_MASK) { 590 int val; 591 592 val = mp->hwdev->hw->rotmem_required(mp->hwdev, new_plane_state->crtc_w, 593 new_plane_state->crtc_h, 594 fb->format->format, 595 !!(fb->modifier)); 596 if (val < 0) 597 return val; 598 599 ms->rotmem_size = val; 600 } 601 602 /* HW can't support plane + pixel blending */ 603 if ((new_plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE) && 604 (pixel_alpha != DRM_MODE_BLEND_PIXEL_NONE) && 605 fb->format->has_alpha) 606 return -EINVAL; 607 608 malidp_de_prefetch_settings(mp, ms); 609 610 return 0; 611 } 612 613 static void malidp_de_set_plane_pitches(struct malidp_plane *mp, 614 int num_planes, unsigned int pitches[3]) 615 { 616 int i; 617 int num_strides = num_planes; 618 619 if (!mp->layer->stride_offset) 620 return; 621 622 if (num_planes == 3) 623 num_strides = (mp->hwdev->hw->features & 624 MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2; 625 626 /* 627 * The drm convention for pitch is that it needs to cover width * cpp, 628 * but our hardware wants the pitch/stride to cover all rows included 629 * in a tile. 630 */ 631 for (i = 0; i < num_strides; ++i) { 632 unsigned int block_h = drm_format_info_block_height(mp->base.state->fb->format, i); 633 634 malidp_hw_write(mp->hwdev, pitches[i] * block_h, 635 mp->layer->base + 636 mp->layer->stride_offset + i * 4); 637 } 638 } 639 640 static const s16 641 malidp_yuv2rgb_coeffs[][DRM_COLOR_RANGE_MAX][MALIDP_COLORADJ_NUM_COEFFS] = { 642 [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_LIMITED_RANGE] = { 643 1192, 0, 1634, 644 1192, -401, -832, 645 1192, 2066, 0, 646 64, 512, 512 647 }, 648 [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_FULL_RANGE] = { 649 1024, 0, 1436, 650 1024, -352, -731, 651 1024, 1815, 0, 652 0, 512, 512 653 }, 654 [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_LIMITED_RANGE] = { 655 1192, 0, 1836, 656 1192, -218, -546, 657 1192, 2163, 0, 658 64, 512, 512 659 }, 660 [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_FULL_RANGE] = { 661 1024, 0, 1613, 662 1024, -192, -479, 663 1024, 1900, 0, 664 0, 512, 512 665 }, 666 [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_LIMITED_RANGE] = { 667 1024, 0, 1476, 668 1024, -165, -572, 669 1024, 1884, 0, 670 0, 512, 512 671 }, 672 [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_FULL_RANGE] = { 673 1024, 0, 1510, 674 1024, -168, -585, 675 1024, 1927, 0, 676 0, 512, 512 677 } 678 }; 679 680 static void malidp_de_set_color_encoding(struct malidp_plane *plane, 681 enum drm_color_encoding enc, 682 enum drm_color_range range) 683 { 684 unsigned int i; 685 686 for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) { 687 /* coefficients are signed, two's complement values */ 688 malidp_hw_write(plane->hwdev, malidp_yuv2rgb_coeffs[enc][range][i], 689 plane->layer->base + plane->layer->yuv2rgb_offset + 690 i * 4); 691 } 692 } 693 694 static void malidp_de_set_mmu_control(struct malidp_plane *mp, 695 struct malidp_plane_state *ms) 696 { 697 u32 mmu_ctrl; 698 699 /* check hardware supports MMU prefetch */ 700 if (!mp->layer->mmu_ctrl_offset) 701 return; 702 703 mmu_ctrl = malidp_calc_mmu_control_value(ms->mmu_prefetch_mode, 704 MALIDP_MMU_PREFETCH_READAHEAD, 705 ms->n_planes, 706 ms->mmu_prefetch_pgsize); 707 708 malidp_hw_write(mp->hwdev, mmu_ctrl, 709 mp->layer->base + mp->layer->mmu_ctrl_offset); 710 } 711 712 static void malidp_set_plane_base_addr(struct drm_framebuffer *fb, 713 struct malidp_plane *mp, 714 int plane_index) 715 { 716 dma_addr_t paddr; 717 u16 ptr; 718 struct drm_plane *plane = &mp->base; 719 bool afbc = fb->modifier ? true : false; 720 721 ptr = mp->layer->ptr + (plane_index << 4); 722 723 /* 724 * drm_fb_cma_get_gem_addr() alters the physical base address of the 725 * framebuffer as per the plane's src_x, src_y co-ordinates (ie to 726 * take care of source cropping). 727 * For AFBC, this is not needed as the cropping is handled by _AD_CROP_H 728 * and _AD_CROP_V registers. 729 */ 730 if (!afbc) { 731 paddr = drm_fb_cma_get_gem_addr(fb, plane->state, 732 plane_index); 733 } else { 734 struct drm_gem_cma_object *obj; 735 736 obj = drm_fb_cma_get_gem_obj(fb, plane_index); 737 738 if (WARN_ON(!obj)) 739 return; 740 paddr = obj->paddr; 741 } 742 743 malidp_hw_write(mp->hwdev, lower_32_bits(paddr), ptr); 744 malidp_hw_write(mp->hwdev, upper_32_bits(paddr), ptr + 4); 745 } 746 747 static void malidp_de_set_plane_afbc(struct drm_plane *plane) 748 { 749 struct malidp_plane *mp; 750 u32 src_w, src_h, val = 0, src_x, src_y; 751 struct drm_framebuffer *fb = plane->state->fb; 752 753 mp = to_malidp_plane(plane); 754 755 /* no afbc_decoder_offset means AFBC is not supported on this plane */ 756 if (!mp->layer->afbc_decoder_offset) 757 return; 758 759 if (!fb->modifier) { 760 malidp_hw_write(mp->hwdev, 0, mp->layer->afbc_decoder_offset); 761 return; 762 } 763 764 /* convert src values from Q16 fixed point to integer */ 765 src_w = plane->state->src_w >> 16; 766 src_h = plane->state->src_h >> 16; 767 src_x = plane->state->src_x >> 16; 768 src_y = plane->state->src_y >> 16; 769 770 val = ((fb->width - (src_x + src_w)) << MALIDP_AD_CROP_RIGHT_OFFSET) | 771 src_x; 772 malidp_hw_write(mp->hwdev, val, 773 mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_H); 774 775 val = ((fb->height - (src_y + src_h)) << MALIDP_AD_CROP_BOTTOM_OFFSET) | 776 src_y; 777 malidp_hw_write(mp->hwdev, val, 778 mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_V); 779 780 val = MALIDP_AD_EN; 781 if (fb->modifier & AFBC_FORMAT_MOD_SPLIT) 782 val |= MALIDP_AD_BS; 783 if (fb->modifier & AFBC_FORMAT_MOD_YTR) 784 val |= MALIDP_AD_YTR; 785 786 malidp_hw_write(mp->hwdev, val, mp->layer->afbc_decoder_offset); 787 } 788 789 static void malidp_de_plane_update(struct drm_plane *plane, 790 struct drm_atomic_state *state) 791 { 792 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, 793 plane); 794 struct malidp_plane *mp; 795 struct malidp_plane_state *ms = to_malidp_plane_state(plane->state); 796 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 797 plane); 798 u16 pixel_alpha = new_state->pixel_blend_mode; 799 u8 plane_alpha = new_state->alpha >> 8; 800 u32 src_w, src_h, dest_w, dest_h, val; 801 int i; 802 struct drm_framebuffer *fb = plane->state->fb; 803 804 mp = to_malidp_plane(plane); 805 806 /* 807 * For AFBC framebuffer, use the framebuffer width and height for 808 * configuring layer input size register. 809 */ 810 if (fb->modifier) { 811 src_w = fb->width; 812 src_h = fb->height; 813 } else { 814 /* convert src values from Q16 fixed point to integer */ 815 src_w = new_state->src_w >> 16; 816 src_h = new_state->src_h >> 16; 817 } 818 819 dest_w = new_state->crtc_w; 820 dest_h = new_state->crtc_h; 821 822 val = malidp_hw_read(mp->hwdev, mp->layer->base); 823 val = (val & ~LAYER_FORMAT_MASK) | ms->format; 824 malidp_hw_write(mp->hwdev, val, mp->layer->base); 825 826 for (i = 0; i < ms->n_planes; i++) 827 malidp_set_plane_base_addr(fb, mp, i); 828 829 malidp_de_set_mmu_control(mp, ms); 830 831 malidp_de_set_plane_pitches(mp, ms->n_planes, 832 new_state->fb->pitches); 833 834 if ((plane->state->color_encoding != old_state->color_encoding) || 835 (plane->state->color_range != old_state->color_range)) 836 malidp_de_set_color_encoding(mp, plane->state->color_encoding, 837 plane->state->color_range); 838 839 malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h), 840 mp->layer->base + MALIDP_LAYER_SIZE); 841 842 malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h), 843 mp->layer->base + MALIDP_LAYER_COMP_SIZE); 844 845 malidp_hw_write(mp->hwdev, LAYER_H_VAL(new_state->crtc_x) | 846 LAYER_V_VAL(new_state->crtc_y), 847 mp->layer->base + MALIDP_LAYER_OFFSET); 848 849 if (mp->layer->id == DE_SMART) { 850 /* 851 * Enable the first rectangle in the SMART layer to be 852 * able to use it as a drm plane. 853 */ 854 malidp_hw_write(mp->hwdev, 1, 855 mp->layer->base + MALIDP550_LS_ENABLE); 856 malidp_hw_write(mp->hwdev, 857 LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h), 858 mp->layer->base + MALIDP550_LS_R1_IN_SIZE); 859 } 860 861 malidp_de_set_plane_afbc(plane); 862 863 /* first clear the rotation bits */ 864 val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL); 865 val &= ~LAYER_ROT_MASK; 866 867 /* setup the rotation and axis flip bits */ 868 if (new_state->rotation & DRM_MODE_ROTATE_MASK) 869 val |= ilog2(plane->state->rotation & DRM_MODE_ROTATE_MASK) << 870 LAYER_ROT_OFFSET; 871 if (new_state->rotation & DRM_MODE_REFLECT_X) 872 val |= LAYER_H_FLIP; 873 if (new_state->rotation & DRM_MODE_REFLECT_Y) 874 val |= LAYER_V_FLIP; 875 876 val &= ~(LAYER_COMP_MASK | LAYER_PMUL_ENABLE | LAYER_ALPHA(0xff)); 877 878 if (new_state->alpha != DRM_BLEND_ALPHA_OPAQUE) { 879 val |= LAYER_COMP_PLANE; 880 } else if (new_state->fb->format->has_alpha) { 881 /* We only care about blend mode if the format has alpha */ 882 switch (pixel_alpha) { 883 case DRM_MODE_BLEND_PREMULTI: 884 val |= LAYER_COMP_PIXEL | LAYER_PMUL_ENABLE; 885 break; 886 case DRM_MODE_BLEND_COVERAGE: 887 val |= LAYER_COMP_PIXEL; 888 break; 889 } 890 } 891 val |= LAYER_ALPHA(plane_alpha); 892 893 val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK); 894 if (new_state->crtc) { 895 struct malidp_crtc_state *m = 896 to_malidp_crtc_state(new_state->crtc->state); 897 898 if (m->scaler_config.scale_enable && 899 m->scaler_config.plane_src_id == mp->layer->id) 900 val |= LAYER_FLOWCFG(LAYER_FLOWCFG_SCALE_SE); 901 } 902 903 /* set the 'enable layer' bit */ 904 val |= LAYER_ENABLE; 905 906 malidp_hw_write(mp->hwdev, val, 907 mp->layer->base + MALIDP_LAYER_CONTROL); 908 } 909 910 static void malidp_de_plane_disable(struct drm_plane *plane, 911 struct drm_atomic_state *state) 912 { 913 struct malidp_plane *mp = to_malidp_plane(plane); 914 915 malidp_hw_clearbits(mp->hwdev, 916 LAYER_ENABLE | LAYER_FLOWCFG(LAYER_FLOWCFG_MASK), 917 mp->layer->base + MALIDP_LAYER_CONTROL); 918 } 919 920 static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = { 921 .atomic_check = malidp_de_plane_check, 922 .atomic_update = malidp_de_plane_update, 923 .atomic_disable = malidp_de_plane_disable, 924 }; 925 926 static const uint64_t linear_only_modifiers[] = { 927 DRM_FORMAT_MOD_LINEAR, 928 DRM_FORMAT_MOD_INVALID 929 }; 930 931 int malidp_de_planes_init(struct drm_device *drm) 932 { 933 struct malidp_drm *malidp = drm->dev_private; 934 const struct malidp_hw_regmap *map = &malidp->dev->hw->map; 935 struct malidp_plane *plane = NULL; 936 enum drm_plane_type plane_type; 937 unsigned long crtcs = BIT(drm->mode_config.num_crtc); 938 unsigned long flags = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | DRM_MODE_ROTATE_180 | 939 DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; 940 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | 941 BIT(DRM_MODE_BLEND_PREMULTI) | 942 BIT(DRM_MODE_BLEND_COVERAGE); 943 u32 *formats; 944 int ret, i = 0, j = 0, n; 945 u64 supported_modifiers[MODIFIERS_COUNT_MAX]; 946 const u64 *modifiers; 947 948 modifiers = malidp_format_modifiers; 949 950 if (!(map->features & MALIDP_DEVICE_AFBC_SUPPORT_SPLIT)) { 951 /* 952 * Since our hardware does not support SPLIT, so build the list 953 * of supported modifiers excluding SPLIT ones. 954 */ 955 while (*modifiers != DRM_FORMAT_MOD_INVALID) { 956 if (!(*modifiers & AFBC_SPLIT)) 957 supported_modifiers[j++] = *modifiers; 958 959 modifiers++; 960 } 961 supported_modifiers[j++] = DRM_FORMAT_MOD_INVALID; 962 modifiers = supported_modifiers; 963 } 964 965 formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL); 966 if (!formats) { 967 ret = -ENOMEM; 968 goto cleanup; 969 } 970 971 for (i = 0; i < map->n_layers; i++) { 972 u8 id = map->layers[i].id; 973 974 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 975 if (!plane) { 976 ret = -ENOMEM; 977 goto cleanup; 978 } 979 980 /* build the list of DRM supported formats based on the map */ 981 for (n = 0, j = 0; j < map->n_pixel_formats; j++) { 982 if ((map->pixel_formats[j].layer & id) == id) 983 formats[n++] = map->pixel_formats[j].format; 984 } 985 986 plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY : 987 DRM_PLANE_TYPE_OVERLAY; 988 989 /* 990 * All the layers except smart layer supports AFBC modifiers. 991 */ 992 ret = drm_universal_plane_init(drm, &plane->base, crtcs, 993 &malidp_de_plane_funcs, formats, n, 994 (id == DE_SMART) ? linear_only_modifiers : modifiers, 995 plane_type, NULL); 996 997 if (ret < 0) 998 goto cleanup; 999 1000 drm_plane_helper_add(&plane->base, 1001 &malidp_de_plane_helper_funcs); 1002 plane->hwdev = malidp->dev; 1003 plane->layer = &map->layers[i]; 1004 1005 drm_plane_create_alpha_property(&plane->base); 1006 drm_plane_create_blend_mode_property(&plane->base, blend_caps); 1007 1008 if (id == DE_SMART) { 1009 /* Skip the features which the SMART layer doesn't have. */ 1010 continue; 1011 } 1012 1013 drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, flags); 1014 malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT, 1015 plane->layer->base + MALIDP_LAYER_COMPOSE); 1016 1017 /* Attach the YUV->RGB property only to video layers */ 1018 if (id & (DE_VIDEO1 | DE_VIDEO2)) { 1019 /* default encoding for YUV->RGB is BT601 NARROW */ 1020 enum drm_color_encoding enc = DRM_COLOR_YCBCR_BT601; 1021 enum drm_color_range range = DRM_COLOR_YCBCR_LIMITED_RANGE; 1022 1023 ret = drm_plane_create_color_properties(&plane->base, 1024 BIT(DRM_COLOR_YCBCR_BT601) | \ 1025 BIT(DRM_COLOR_YCBCR_BT709) | \ 1026 BIT(DRM_COLOR_YCBCR_BT2020), 1027 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | \ 1028 BIT(DRM_COLOR_YCBCR_FULL_RANGE), 1029 enc, range); 1030 if (!ret) 1031 /* program the HW registers */ 1032 malidp_de_set_color_encoding(plane, enc, range); 1033 else 1034 DRM_WARN("Failed to create video layer %d color properties\n", id); 1035 } 1036 } 1037 1038 kfree(formats); 1039 1040 return 0; 1041 1042 cleanup: 1043 kfree(formats); 1044 1045 return ret; 1046 } 1047