1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2015 Free Electrons 4 * Copyright (C) 2015 NextThing Co 5 * 6 * Maxime Ripard <maxime.ripard@free-electrons.com> 7 */ 8 9 #include <linux/component.h> 10 #include <linux/list.h> 11 #include <linux/module.h> 12 #include <linux/of_device.h> 13 #include <linux/of_graph.h> 14 #include <linux/platform_device.h> 15 #include <linux/reset.h> 16 17 #include <drm/drm_atomic.h> 18 #include <drm/drm_atomic_helper.h> 19 #include <drm/drm_crtc.h> 20 #include <drm/drm_fb_cma_helper.h> 21 #include <drm/drm_fourcc.h> 22 #include <drm/drm_gem_cma_helper.h> 23 #include <drm/drm_plane_helper.h> 24 #include <drm/drm_probe_helper.h> 25 26 #include "sun4i_backend.h" 27 #include "sun4i_drv.h" 28 #include "sun4i_frontend.h" 29 #include "sun4i_layer.h" 30 #include "sunxi_engine.h" 31 32 struct sun4i_backend_quirks { 33 /* backend <-> TCON muxing selection done in backend */ 34 bool needs_output_muxing; 35 36 /* alpha at the lowest z position is not always supported */ 37 bool supports_lowest_plane_alpha; 38 }; 39 40 static const u32 sunxi_rgb2yuv_coef[12] = { 41 0x00000107, 0x00000204, 0x00000064, 0x00000108, 42 0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808, 43 0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808 44 }; 45 46 static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine) 47 { 48 int i; 49 50 DRM_DEBUG_DRIVER("Applying RGB to YUV color correction\n"); 51 52 /* Set color correction */ 53 regmap_write(engine->regs, SUN4I_BACKEND_OCCTL_REG, 54 SUN4I_BACKEND_OCCTL_ENABLE); 55 56 for (i = 0; i < 12; i++) 57 regmap_write(engine->regs, SUN4I_BACKEND_OCRCOEF_REG(i), 58 sunxi_rgb2yuv_coef[i]); 59 } 60 61 static void sun4i_backend_disable_color_correction(struct sunxi_engine *engine) 62 { 63 DRM_DEBUG_DRIVER("Disabling color correction\n"); 64 65 /* Disable color correction */ 66 regmap_update_bits(engine->regs, SUN4I_BACKEND_OCCTL_REG, 67 SUN4I_BACKEND_OCCTL_ENABLE, 0); 68 } 69 70 static void sun4i_backend_commit(struct sunxi_engine *engine) 71 { 72 DRM_DEBUG_DRIVER("Committing changes\n"); 73 74 regmap_write(engine->regs, SUN4I_BACKEND_REGBUFFCTL_REG, 75 SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS | 76 SUN4I_BACKEND_REGBUFFCTL_LOADCTL); 77 } 78 79 void sun4i_backend_layer_enable(struct sun4i_backend *backend, 80 int layer, bool enable) 81 { 82 u32 val; 83 84 DRM_DEBUG_DRIVER("%sabling layer %d\n", enable ? "En" : "Dis", 85 layer); 86 87 if (enable) 88 val = SUN4I_BACKEND_MODCTL_LAY_EN(layer); 89 else 90 val = 0; 91 92 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG, 93 SUN4I_BACKEND_MODCTL_LAY_EN(layer), val); 94 } 95 96 static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode) 97 { 98 switch (format) { 99 case DRM_FORMAT_ARGB8888: 100 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888; 101 break; 102 103 case DRM_FORMAT_ARGB4444: 104 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444; 105 break; 106 107 case DRM_FORMAT_ARGB1555: 108 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555; 109 break; 110 111 case DRM_FORMAT_RGBA5551: 112 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551; 113 break; 114 115 case DRM_FORMAT_RGBA4444: 116 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444; 117 break; 118 119 case DRM_FORMAT_XRGB8888: 120 *mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888; 121 break; 122 123 case DRM_FORMAT_RGB888: 124 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB888; 125 break; 126 127 case DRM_FORMAT_RGB565: 128 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB565; 129 break; 130 131 default: 132 return -EINVAL; 133 } 134 135 return 0; 136 } 137 138 static const uint32_t sun4i_backend_formats[] = { 139 DRM_FORMAT_ARGB1555, 140 DRM_FORMAT_ARGB4444, 141 DRM_FORMAT_ARGB8888, 142 DRM_FORMAT_RGB565, 143 DRM_FORMAT_RGB888, 144 DRM_FORMAT_RGBA4444, 145 DRM_FORMAT_RGBA5551, 146 DRM_FORMAT_UYVY, 147 DRM_FORMAT_VYUY, 148 DRM_FORMAT_XRGB8888, 149 DRM_FORMAT_YUYV, 150 DRM_FORMAT_YVYU, 151 }; 152 153 bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier) 154 { 155 unsigned int i; 156 157 if (modifier != DRM_FORMAT_MOD_LINEAR) 158 return false; 159 160 for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++) 161 if (sun4i_backend_formats[i] == fmt) 162 return true; 163 164 return false; 165 } 166 167 int sun4i_backend_update_layer_coord(struct sun4i_backend *backend, 168 int layer, struct drm_plane *plane) 169 { 170 struct drm_plane_state *state = plane->state; 171 172 DRM_DEBUG_DRIVER("Updating layer %d\n", layer); 173 174 if (plane->type == DRM_PLANE_TYPE_PRIMARY) { 175 DRM_DEBUG_DRIVER("Primary layer, updating global size W: %u H: %u\n", 176 state->crtc_w, state->crtc_h); 177 regmap_write(backend->engine.regs, SUN4I_BACKEND_DISSIZE_REG, 178 SUN4I_BACKEND_DISSIZE(state->crtc_w, 179 state->crtc_h)); 180 } 181 182 /* Set height and width */ 183 DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n", 184 state->crtc_w, state->crtc_h); 185 regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYSIZE_REG(layer), 186 SUN4I_BACKEND_LAYSIZE(state->crtc_w, 187 state->crtc_h)); 188 189 /* Set base coordinates */ 190 DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n", 191 state->crtc_x, state->crtc_y); 192 regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYCOOR_REG(layer), 193 SUN4I_BACKEND_LAYCOOR(state->crtc_x, 194 state->crtc_y)); 195 196 return 0; 197 } 198 199 static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend, 200 int layer, struct drm_plane *plane) 201 { 202 struct drm_plane_state *state = plane->state; 203 struct drm_framebuffer *fb = state->fb; 204 const struct drm_format_info *format = fb->format; 205 const uint32_t fmt = format->format; 206 u32 val = SUN4I_BACKEND_IYUVCTL_EN; 207 int i; 208 209 for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++) 210 regmap_write(backend->engine.regs, 211 SUN4I_BACKEND_YGCOEF_REG(i), 212 sunxi_bt601_yuv2rgb_coef[i]); 213 214 /* 215 * We should do that only for a single plane, but the 216 * framebuffer's atomic_check has our back on this. 217 */ 218 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer), 219 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 220 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN); 221 222 /* TODO: Add support for the multi-planar YUV formats */ 223 if (drm_format_info_is_yuv_packed(format) && 224 drm_format_info_is_yuv_sampling_422(format)) 225 val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422; 226 else 227 DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt); 228 229 /* 230 * Allwinner seems to list the pixel sequence from right to left, while 231 * DRM lists it from left to right. 232 */ 233 switch (fmt) { 234 case DRM_FORMAT_YUYV: 235 val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY; 236 break; 237 case DRM_FORMAT_YVYU: 238 val |= SUN4I_BACKEND_IYUVCTL_FBPS_UYVY; 239 break; 240 case DRM_FORMAT_UYVY: 241 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YVYU; 242 break; 243 case DRM_FORMAT_VYUY: 244 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YUYV; 245 break; 246 default: 247 DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n", 248 fmt); 249 } 250 251 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val); 252 253 return 0; 254 } 255 256 int sun4i_backend_update_layer_formats(struct sun4i_backend *backend, 257 int layer, struct drm_plane *plane) 258 { 259 struct drm_plane_state *state = plane->state; 260 struct drm_framebuffer *fb = state->fb; 261 bool interlaced = false; 262 u32 val; 263 int ret; 264 265 /* Clear the YUV mode */ 266 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer), 267 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0); 268 269 if (plane->state->crtc) 270 interlaced = plane->state->crtc->state->adjusted_mode.flags 271 & DRM_MODE_FLAG_INTERLACE; 272 273 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG, 274 SUN4I_BACKEND_MODCTL_ITLMOD_EN, 275 interlaced ? SUN4I_BACKEND_MODCTL_ITLMOD_EN : 0); 276 277 DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n", 278 interlaced ? "on" : "off"); 279 280 val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8); 281 if (state->alpha != DRM_BLEND_ALPHA_OPAQUE) 282 val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN; 283 regmap_update_bits(backend->engine.regs, 284 SUN4I_BACKEND_ATTCTL_REG0(layer), 285 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK | 286 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN, 287 val); 288 289 if (fb->format->is_yuv) 290 return sun4i_backend_update_yuv_format(backend, layer, plane); 291 292 ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val); 293 if (ret) { 294 DRM_DEBUG_DRIVER("Invalid format\n"); 295 return ret; 296 } 297 298 regmap_update_bits(backend->engine.regs, 299 SUN4I_BACKEND_ATTCTL_REG1(layer), 300 SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val); 301 302 return 0; 303 } 304 305 int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend, 306 int layer, uint32_t fmt) 307 { 308 u32 val; 309 int ret; 310 311 ret = sun4i_backend_drm_format_to_layer(fmt, &val); 312 if (ret) { 313 DRM_DEBUG_DRIVER("Invalid format\n"); 314 return ret; 315 } 316 317 regmap_update_bits(backend->engine.regs, 318 SUN4I_BACKEND_ATTCTL_REG0(layer), 319 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN, 320 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN); 321 322 regmap_update_bits(backend->engine.regs, 323 SUN4I_BACKEND_ATTCTL_REG1(layer), 324 SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val); 325 326 return 0; 327 } 328 329 static int sun4i_backend_update_yuv_buffer(struct sun4i_backend *backend, 330 struct drm_framebuffer *fb, 331 dma_addr_t paddr) 332 { 333 /* TODO: Add support for the multi-planar YUV formats */ 334 DRM_DEBUG_DRIVER("Setting packed YUV buffer address to %pad\n", &paddr); 335 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVADD_REG(0), paddr); 336 337 DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8); 338 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVLINEWIDTH_REG(0), 339 fb->pitches[0] * 8); 340 341 return 0; 342 } 343 344 int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend, 345 int layer, struct drm_plane *plane) 346 { 347 struct drm_plane_state *state = plane->state; 348 struct drm_framebuffer *fb = state->fb; 349 u32 lo_paddr, hi_paddr; 350 dma_addr_t paddr; 351 352 /* Set the line width */ 353 DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8); 354 regmap_write(backend->engine.regs, 355 SUN4I_BACKEND_LAYLINEWIDTH_REG(layer), 356 fb->pitches[0] * 8); 357 358 /* Get the start of the displayed memory */ 359 paddr = drm_fb_cma_get_gem_addr(fb, state, 0); 360 DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr); 361 362 if (fb->format->is_yuv) 363 return sun4i_backend_update_yuv_buffer(backend, fb, paddr); 364 365 /* Write the 32 lower bits of the address (in bits) */ 366 lo_paddr = paddr << 3; 367 DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr); 368 regmap_write(backend->engine.regs, 369 SUN4I_BACKEND_LAYFB_L32ADD_REG(layer), 370 lo_paddr); 371 372 /* And the upper bits */ 373 hi_paddr = paddr >> 29; 374 DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr); 375 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG, 376 SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer), 377 SUN4I_BACKEND_LAYFB_H4ADD(layer, hi_paddr)); 378 379 return 0; 380 } 381 382 int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer, 383 struct drm_plane *plane) 384 { 385 struct drm_plane_state *state = plane->state; 386 struct sun4i_layer_state *p_state = state_to_sun4i_layer_state(state); 387 unsigned int priority = state->normalized_zpos; 388 unsigned int pipe = p_state->pipe; 389 390 DRM_DEBUG_DRIVER("Setting layer %d's priority to %d and pipe %d\n", 391 layer, priority, pipe); 392 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer), 393 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK | 394 SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK, 395 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(p_state->pipe) | 396 SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(priority)); 397 398 return 0; 399 } 400 401 void sun4i_backend_cleanup_layer(struct sun4i_backend *backend, 402 int layer) 403 { 404 regmap_update_bits(backend->engine.regs, 405 SUN4I_BACKEND_ATTCTL_REG0(layer), 406 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN | 407 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0); 408 } 409 410 static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state) 411 { 412 u16 src_h = state->src_h >> 16; 413 u16 src_w = state->src_w >> 16; 414 415 DRM_DEBUG_DRIVER("Input size %dx%d, output size %dx%d\n", 416 src_w, src_h, state->crtc_w, state->crtc_h); 417 418 if ((state->crtc_h != src_h) || (state->crtc_w != src_w)) 419 return true; 420 421 return false; 422 } 423 424 static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state) 425 { 426 struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane); 427 struct sun4i_backend *backend = layer->backend; 428 uint32_t format = state->fb->format->format; 429 uint64_t modifier = state->fb->modifier; 430 431 if (IS_ERR(backend->frontend)) 432 return false; 433 434 if (!sun4i_frontend_format_is_supported(format, modifier)) 435 return false; 436 437 if (!sun4i_backend_format_is_supported(format, modifier)) 438 return true; 439 440 /* 441 * TODO: The backend alone allows 2x and 4x integer scaling, including 442 * support for an alpha component (which the frontend doesn't support). 443 * Use the backend directly instead of the frontend in this case, with 444 * another test to return false. 445 */ 446 447 if (sun4i_backend_plane_uses_scaler(state)) 448 return true; 449 450 /* 451 * Here the format is supported by both the frontend and the backend 452 * and no frontend scaling is required, so use the backend directly. 453 */ 454 return false; 455 } 456 457 static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state, 458 bool *uses_frontend) 459 { 460 if (sun4i_backend_plane_uses_frontend(state)) { 461 *uses_frontend = true; 462 return true; 463 } 464 465 *uses_frontend = false; 466 467 /* Scaling is not supported without the frontend. */ 468 if (sun4i_backend_plane_uses_scaler(state)) 469 return false; 470 471 return true; 472 } 473 474 static void sun4i_backend_atomic_begin(struct sunxi_engine *engine, 475 struct drm_crtc_state *old_state) 476 { 477 u32 val; 478 479 WARN_ON(regmap_read_poll_timeout(engine->regs, 480 SUN4I_BACKEND_REGBUFFCTL_REG, 481 val, !(val & SUN4I_BACKEND_REGBUFFCTL_LOADCTL), 482 100, 50000)); 483 } 484 485 static int sun4i_backend_atomic_check(struct sunxi_engine *engine, 486 struct drm_crtc_state *crtc_state) 487 { 488 struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 }; 489 struct sun4i_backend *backend = engine_to_sun4i_backend(engine); 490 struct drm_atomic_state *state = crtc_state->state; 491 struct drm_device *drm = state->dev; 492 struct drm_plane *plane; 493 unsigned int num_planes = 0; 494 unsigned int num_alpha_planes = 0; 495 unsigned int num_frontend_planes = 0; 496 unsigned int num_alpha_planes_max = 1; 497 unsigned int num_yuv_planes = 0; 498 unsigned int current_pipe = 0; 499 unsigned int i; 500 501 DRM_DEBUG_DRIVER("Starting checking our planes\n"); 502 503 if (!crtc_state->planes_changed) 504 return 0; 505 506 drm_for_each_plane_mask(plane, drm, crtc_state->plane_mask) { 507 struct drm_plane_state *plane_state = 508 drm_atomic_get_plane_state(state, plane); 509 struct sun4i_layer_state *layer_state = 510 state_to_sun4i_layer_state(plane_state); 511 struct drm_framebuffer *fb = plane_state->fb; 512 struct drm_format_name_buf format_name; 513 514 if (!sun4i_backend_plane_is_supported(plane_state, 515 &layer_state->uses_frontend)) 516 return -EINVAL; 517 518 if (layer_state->uses_frontend) { 519 DRM_DEBUG_DRIVER("Using the frontend for plane %d\n", 520 plane->index); 521 num_frontend_planes++; 522 } else { 523 if (fb->format->is_yuv) { 524 DRM_DEBUG_DRIVER("Plane FB format is YUV\n"); 525 num_yuv_planes++; 526 } 527 } 528 529 DRM_DEBUG_DRIVER("Plane FB format is %s\n", 530 drm_get_format_name(fb->format->format, 531 &format_name)); 532 if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE)) 533 num_alpha_planes++; 534 535 DRM_DEBUG_DRIVER("Plane zpos is %d\n", 536 plane_state->normalized_zpos); 537 538 /* Sort our planes by Zpos */ 539 plane_states[plane_state->normalized_zpos] = plane_state; 540 541 num_planes++; 542 } 543 544 /* All our planes were disabled, bail out */ 545 if (!num_planes) 546 return 0; 547 548 /* 549 * The hardware is a bit unusual here. 550 * 551 * Even though it supports 4 layers, it does the composition 552 * in two separate steps. 553 * 554 * The first one is assigning a layer to one of its two 555 * pipes. If more that 1 layer is assigned to the same pipe, 556 * and if pixels overlaps, the pipe will take the pixel from 557 * the layer with the highest priority. 558 * 559 * The second step is the actual alpha blending, that takes 560 * the two pipes as input, and uses the potential alpha 561 * component to do the transparency between the two. 562 * 563 * This two-step scenario makes us unable to guarantee a 564 * robust alpha blending between the 4 layers in all 565 * situations, since this means that we need to have one layer 566 * with alpha at the lowest position of our two pipes. 567 * 568 * However, we cannot even do that on every platform, since 569 * the hardware has a bug where the lowest plane of the lowest 570 * pipe (pipe 0, priority 0), if it has any alpha, will 571 * discard the pixel data entirely and just display the pixels 572 * in the background color (black by default). 573 * 574 * This means that on the affected platforms, we effectively 575 * have only three valid configurations with alpha, all of 576 * them with the alpha being on pipe1 with the lowest 577 * position, which can be 1, 2 or 3 depending on the number of 578 * planes and their zpos. 579 */ 580 581 /* For platforms that are not affected by the issue described above. */ 582 if (backend->quirks->supports_lowest_plane_alpha) 583 num_alpha_planes_max++; 584 585 if (num_alpha_planes > num_alpha_planes_max) { 586 DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n"); 587 return -EINVAL; 588 } 589 590 /* We can't have an alpha plane at the lowest position */ 591 if (!backend->quirks->supports_lowest_plane_alpha && 592 (plane_states[0]->fb->format->has_alpha || 593 (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))) 594 return -EINVAL; 595 596 for (i = 1; i < num_planes; i++) { 597 struct drm_plane_state *p_state = plane_states[i]; 598 struct drm_framebuffer *fb = p_state->fb; 599 struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(p_state); 600 601 /* 602 * The only alpha position is the lowest plane of the 603 * second pipe. 604 */ 605 if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE)) 606 current_pipe++; 607 608 s_state->pipe = current_pipe; 609 } 610 611 /* We can only have a single YUV plane at a time */ 612 if (num_yuv_planes > SUN4I_BACKEND_NUM_YUV_PLANES) { 613 DRM_DEBUG_DRIVER("Too many planes with YUV, rejecting...\n"); 614 return -EINVAL; 615 } 616 617 if (num_frontend_planes > SUN4I_BACKEND_NUM_FRONTEND_LAYERS) { 618 DRM_DEBUG_DRIVER("Too many planes going through the frontend, rejecting\n"); 619 return -EINVAL; 620 } 621 622 DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video, %u YUV\n", 623 num_planes, num_alpha_planes, num_frontend_planes, 624 num_yuv_planes); 625 626 return 0; 627 } 628 629 static void sun4i_backend_vblank_quirk(struct sunxi_engine *engine) 630 { 631 struct sun4i_backend *backend = engine_to_sun4i_backend(engine); 632 struct sun4i_frontend *frontend = backend->frontend; 633 634 if (!frontend) 635 return; 636 637 /* 638 * In a teardown scenario with the frontend involved, we have 639 * to keep the frontend enabled until the next vblank, and 640 * only then disable it. 641 * 642 * This is due to the fact that the backend will not take into 643 * account the new configuration (with the plane that used to 644 * be fed by the frontend now disabled) until we write to the 645 * commit bit and the hardware fetches the new configuration 646 * during the next vblank. 647 * 648 * So we keep the frontend around in order to prevent any 649 * visual artifacts. 650 */ 651 spin_lock(&backend->frontend_lock); 652 if (backend->frontend_teardown) { 653 sun4i_frontend_exit(frontend); 654 backend->frontend_teardown = false; 655 } 656 spin_unlock(&backend->frontend_lock); 657 }; 658 659 static int sun4i_backend_init_sat(struct device *dev) { 660 struct sun4i_backend *backend = dev_get_drvdata(dev); 661 int ret; 662 663 backend->sat_reset = devm_reset_control_get(dev, "sat"); 664 if (IS_ERR(backend->sat_reset)) { 665 dev_err(dev, "Couldn't get the SAT reset line\n"); 666 return PTR_ERR(backend->sat_reset); 667 } 668 669 ret = reset_control_deassert(backend->sat_reset); 670 if (ret) { 671 dev_err(dev, "Couldn't deassert the SAT reset line\n"); 672 return ret; 673 } 674 675 backend->sat_clk = devm_clk_get(dev, "sat"); 676 if (IS_ERR(backend->sat_clk)) { 677 dev_err(dev, "Couldn't get our SAT clock\n"); 678 ret = PTR_ERR(backend->sat_clk); 679 goto err_assert_reset; 680 } 681 682 ret = clk_prepare_enable(backend->sat_clk); 683 if (ret) { 684 dev_err(dev, "Couldn't enable the SAT clock\n"); 685 return ret; 686 } 687 688 return 0; 689 690 err_assert_reset: 691 reset_control_assert(backend->sat_reset); 692 return ret; 693 } 694 695 static int sun4i_backend_free_sat(struct device *dev) { 696 struct sun4i_backend *backend = dev_get_drvdata(dev); 697 698 clk_disable_unprepare(backend->sat_clk); 699 reset_control_assert(backend->sat_reset); 700 701 return 0; 702 } 703 704 /* 705 * The display backend can take video output from the display frontend, or 706 * the display enhancement unit on the A80, as input for one it its layers. 707 * This relationship within the display pipeline is encoded in the device 708 * tree with of_graph, and we use it here to figure out which backend, if 709 * there are 2 or more, we are currently probing. The number would be in 710 * the "reg" property of the upstream output port endpoint. 711 */ 712 static int sun4i_backend_of_get_id(struct device_node *node) 713 { 714 struct device_node *ep, *remote; 715 struct of_endpoint of_ep; 716 717 /* Input port is 0, and we want the first endpoint. */ 718 ep = of_graph_get_endpoint_by_regs(node, 0, -1); 719 if (!ep) 720 return -EINVAL; 721 722 remote = of_graph_get_remote_endpoint(ep); 723 of_node_put(ep); 724 if (!remote) 725 return -EINVAL; 726 727 of_graph_parse_endpoint(remote, &of_ep); 728 of_node_put(remote); 729 return of_ep.id; 730 } 731 732 /* TODO: This needs to take multiple pipelines into account */ 733 static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv, 734 struct device_node *node) 735 { 736 struct device_node *port, *ep, *remote; 737 struct sun4i_frontend *frontend; 738 739 port = of_graph_get_port_by_id(node, 0); 740 if (!port) 741 return ERR_PTR(-EINVAL); 742 743 for_each_available_child_of_node(port, ep) { 744 remote = of_graph_get_remote_port_parent(ep); 745 if (!remote) 746 continue; 747 of_node_put(remote); 748 749 /* does this node match any registered engines? */ 750 list_for_each_entry(frontend, &drv->frontend_list, list) { 751 if (remote == frontend->node) { 752 of_node_put(port); 753 of_node_put(ep); 754 return frontend; 755 } 756 } 757 } 758 of_node_put(port); 759 return ERR_PTR(-EINVAL); 760 } 761 762 static const struct sunxi_engine_ops sun4i_backend_engine_ops = { 763 .atomic_begin = sun4i_backend_atomic_begin, 764 .atomic_check = sun4i_backend_atomic_check, 765 .commit = sun4i_backend_commit, 766 .layers_init = sun4i_layers_init, 767 .apply_color_correction = sun4i_backend_apply_color_correction, 768 .disable_color_correction = sun4i_backend_disable_color_correction, 769 .vblank_quirk = sun4i_backend_vblank_quirk, 770 }; 771 772 static struct regmap_config sun4i_backend_regmap_config = { 773 .reg_bits = 32, 774 .val_bits = 32, 775 .reg_stride = 4, 776 .max_register = 0x5800, 777 }; 778 779 static int sun4i_backend_bind(struct device *dev, struct device *master, 780 void *data) 781 { 782 struct platform_device *pdev = to_platform_device(dev); 783 struct drm_device *drm = data; 784 struct sun4i_drv *drv = drm->dev_private; 785 struct sun4i_backend *backend; 786 const struct sun4i_backend_quirks *quirks; 787 struct resource *res; 788 void __iomem *regs; 789 int i, ret; 790 791 backend = devm_kzalloc(dev, sizeof(*backend), GFP_KERNEL); 792 if (!backend) 793 return -ENOMEM; 794 dev_set_drvdata(dev, backend); 795 spin_lock_init(&backend->frontend_lock); 796 797 if (of_find_property(dev->of_node, "interconnects", NULL)) { 798 /* 799 * This assume we have the same DMA constraints for all our the 800 * devices in our pipeline (all the backends, but also the 801 * frontends). This sounds bad, but it has always been the case 802 * for us, and DRM doesn't do per-device allocation either, so 803 * we would need to fix DRM first... 804 */ 805 ret = of_dma_configure(drm->dev, dev->of_node, true); 806 if (ret) 807 return ret; 808 } else { 809 /* 810 * If we don't have the interconnect property, most likely 811 * because of an old DT, we need to set the DMA offset by hand 812 * on our device since the RAM mapping is at 0 for the DMA bus, 813 * unlike the CPU. 814 */ 815 drm->dev->dma_pfn_offset = PHYS_PFN_OFFSET; 816 } 817 818 backend->engine.node = dev->of_node; 819 backend->engine.ops = &sun4i_backend_engine_ops; 820 backend->engine.id = sun4i_backend_of_get_id(dev->of_node); 821 if (backend->engine.id < 0) 822 return backend->engine.id; 823 824 backend->frontend = sun4i_backend_find_frontend(drv, dev->of_node); 825 if (IS_ERR(backend->frontend)) 826 dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n"); 827 828 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 829 regs = devm_ioremap_resource(dev, res); 830 if (IS_ERR(regs)) 831 return PTR_ERR(regs); 832 833 backend->reset = devm_reset_control_get(dev, NULL); 834 if (IS_ERR(backend->reset)) { 835 dev_err(dev, "Couldn't get our reset line\n"); 836 return PTR_ERR(backend->reset); 837 } 838 839 ret = reset_control_deassert(backend->reset); 840 if (ret) { 841 dev_err(dev, "Couldn't deassert our reset line\n"); 842 return ret; 843 } 844 845 backend->bus_clk = devm_clk_get(dev, "ahb"); 846 if (IS_ERR(backend->bus_clk)) { 847 dev_err(dev, "Couldn't get the backend bus clock\n"); 848 ret = PTR_ERR(backend->bus_clk); 849 goto err_assert_reset; 850 } 851 clk_prepare_enable(backend->bus_clk); 852 853 backend->mod_clk = devm_clk_get(dev, "mod"); 854 if (IS_ERR(backend->mod_clk)) { 855 dev_err(dev, "Couldn't get the backend module clock\n"); 856 ret = PTR_ERR(backend->mod_clk); 857 goto err_disable_bus_clk; 858 } 859 860 ret = clk_set_rate_exclusive(backend->mod_clk, 300000000); 861 if (ret) { 862 dev_err(dev, "Couldn't set the module clock frequency\n"); 863 goto err_disable_bus_clk; 864 } 865 866 clk_prepare_enable(backend->mod_clk); 867 868 backend->ram_clk = devm_clk_get(dev, "ram"); 869 if (IS_ERR(backend->ram_clk)) { 870 dev_err(dev, "Couldn't get the backend RAM clock\n"); 871 ret = PTR_ERR(backend->ram_clk); 872 goto err_disable_mod_clk; 873 } 874 clk_prepare_enable(backend->ram_clk); 875 876 if (of_device_is_compatible(dev->of_node, 877 "allwinner,sun8i-a33-display-backend")) { 878 ret = sun4i_backend_init_sat(dev); 879 if (ret) { 880 dev_err(dev, "Couldn't init SAT resources\n"); 881 goto err_disable_ram_clk; 882 } 883 } 884 885 backend->engine.regs = devm_regmap_init_mmio(dev, regs, 886 &sun4i_backend_regmap_config); 887 if (IS_ERR(backend->engine.regs)) { 888 dev_err(dev, "Couldn't create the backend regmap\n"); 889 return PTR_ERR(backend->engine.regs); 890 } 891 892 list_add_tail(&backend->engine.list, &drv->engine_list); 893 894 /* 895 * Many of the backend's layer configuration registers have 896 * undefined default values. This poses a risk as we use 897 * regmap_update_bits in some places, and don't overwrite 898 * the whole register. 899 * 900 * Clear the registers here to have something predictable. 901 */ 902 for (i = 0x800; i < 0x1000; i += 4) 903 regmap_write(backend->engine.regs, i, 0); 904 905 /* Disable registers autoloading */ 906 regmap_write(backend->engine.regs, SUN4I_BACKEND_REGBUFFCTL_REG, 907 SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS); 908 909 /* Enable the backend */ 910 regmap_write(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG, 911 SUN4I_BACKEND_MODCTL_DEBE_EN | 912 SUN4I_BACKEND_MODCTL_START_CTL); 913 914 /* Set output selection if needed */ 915 quirks = of_device_get_match_data(dev); 916 if (quirks->needs_output_muxing) { 917 /* 918 * We assume there is no dynamic muxing of backends 919 * and TCONs, so we select the backend with same ID. 920 * 921 * While dynamic selection might be interesting, since 922 * the CRTC is tied to the TCON, while the layers are 923 * tied to the backends, this means, we will need to 924 * switch between groups of layers. There might not be 925 * a way to represent this constraint in DRM. 926 */ 927 regmap_update_bits(backend->engine.regs, 928 SUN4I_BACKEND_MODCTL_REG, 929 SUN4I_BACKEND_MODCTL_OUT_SEL, 930 (backend->engine.id 931 ? SUN4I_BACKEND_MODCTL_OUT_LCD1 932 : SUN4I_BACKEND_MODCTL_OUT_LCD0)); 933 } 934 935 backend->quirks = quirks; 936 937 return 0; 938 939 err_disable_ram_clk: 940 clk_disable_unprepare(backend->ram_clk); 941 err_disable_mod_clk: 942 clk_rate_exclusive_put(backend->mod_clk); 943 clk_disable_unprepare(backend->mod_clk); 944 err_disable_bus_clk: 945 clk_disable_unprepare(backend->bus_clk); 946 err_assert_reset: 947 reset_control_assert(backend->reset); 948 return ret; 949 } 950 951 static void sun4i_backend_unbind(struct device *dev, struct device *master, 952 void *data) 953 { 954 struct sun4i_backend *backend = dev_get_drvdata(dev); 955 956 list_del(&backend->engine.list); 957 958 if (of_device_is_compatible(dev->of_node, 959 "allwinner,sun8i-a33-display-backend")) 960 sun4i_backend_free_sat(dev); 961 962 clk_disable_unprepare(backend->ram_clk); 963 clk_rate_exclusive_put(backend->mod_clk); 964 clk_disable_unprepare(backend->mod_clk); 965 clk_disable_unprepare(backend->bus_clk); 966 reset_control_assert(backend->reset); 967 } 968 969 static const struct component_ops sun4i_backend_ops = { 970 .bind = sun4i_backend_bind, 971 .unbind = sun4i_backend_unbind, 972 }; 973 974 static int sun4i_backend_probe(struct platform_device *pdev) 975 { 976 return component_add(&pdev->dev, &sun4i_backend_ops); 977 } 978 979 static int sun4i_backend_remove(struct platform_device *pdev) 980 { 981 component_del(&pdev->dev, &sun4i_backend_ops); 982 983 return 0; 984 } 985 986 static const struct sun4i_backend_quirks sun4i_backend_quirks = { 987 .needs_output_muxing = true, 988 }; 989 990 static const struct sun4i_backend_quirks sun5i_backend_quirks = { 991 }; 992 993 static const struct sun4i_backend_quirks sun6i_backend_quirks = { 994 }; 995 996 static const struct sun4i_backend_quirks sun7i_backend_quirks = { 997 .needs_output_muxing = true, 998 .supports_lowest_plane_alpha = true, 999 }; 1000 1001 static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = { 1002 .supports_lowest_plane_alpha = true, 1003 }; 1004 1005 static const struct sun4i_backend_quirks sun9i_backend_quirks = { 1006 }; 1007 1008 static const struct of_device_id sun4i_backend_of_table[] = { 1009 { 1010 .compatible = "allwinner,sun4i-a10-display-backend", 1011 .data = &sun4i_backend_quirks, 1012 }, 1013 { 1014 .compatible = "allwinner,sun5i-a13-display-backend", 1015 .data = &sun5i_backend_quirks, 1016 }, 1017 { 1018 .compatible = "allwinner,sun6i-a31-display-backend", 1019 .data = &sun6i_backend_quirks, 1020 }, 1021 { 1022 .compatible = "allwinner,sun7i-a20-display-backend", 1023 .data = &sun7i_backend_quirks, 1024 }, 1025 { 1026 .compatible = "allwinner,sun8i-a23-display-backend", 1027 .data = &sun8i_a33_backend_quirks, 1028 }, 1029 { 1030 .compatible = "allwinner,sun8i-a33-display-backend", 1031 .data = &sun8i_a33_backend_quirks, 1032 }, 1033 { 1034 .compatible = "allwinner,sun9i-a80-display-backend", 1035 .data = &sun9i_backend_quirks, 1036 }, 1037 { } 1038 }; 1039 MODULE_DEVICE_TABLE(of, sun4i_backend_of_table); 1040 1041 static struct platform_driver sun4i_backend_platform_driver = { 1042 .probe = sun4i_backend_probe, 1043 .remove = sun4i_backend_remove, 1044 .driver = { 1045 .name = "sun4i-backend", 1046 .of_match_table = sun4i_backend_of_table, 1047 }, 1048 }; 1049 module_platform_driver(sun4i_backend_platform_driver); 1050 1051 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>"); 1052 MODULE_DESCRIPTION("Allwinner A10 Display Backend Driver"); 1053 MODULE_LICENSE("GPL"); 1054