1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 Broadcom 4 */ 5 6 /** 7 * DOC: VC4 KMS 8 * 9 * This is the general code for implementing KMS mode setting that 10 * doesn't clearly associate with any of the other objects (plane, 11 * crtc, HDMI encoder). 12 */ 13 14 #include <linux/clk.h> 15 16 #include <drm/drm_atomic.h> 17 #include <drm/drm_atomic_helper.h> 18 #include <drm/drm_crtc.h> 19 #include <drm/drm_gem_framebuffer_helper.h> 20 #include <drm/drm_plane_helper.h> 21 #include <drm/drm_probe_helper.h> 22 #include <drm/drm_vblank.h> 23 24 #include "vc4_drv.h" 25 #include "vc4_regs.h" 26 27 #define HVS_NUM_CHANNELS 3 28 29 struct vc4_ctm_state { 30 struct drm_private_state base; 31 struct drm_color_ctm *ctm; 32 int fifo; 33 }; 34 35 static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv) 36 { 37 return container_of(priv, struct vc4_ctm_state, base); 38 } 39 40 struct vc4_hvs_state { 41 struct drm_private_state base; 42 unsigned int unassigned_channels; 43 }; 44 45 static struct vc4_hvs_state * 46 to_vc4_hvs_state(struct drm_private_state *priv) 47 { 48 return container_of(priv, struct vc4_hvs_state, base); 49 } 50 51 struct vc4_load_tracker_state { 52 struct drm_private_state base; 53 u64 hvs_load; 54 u64 membus_load; 55 }; 56 57 static struct vc4_load_tracker_state * 58 to_vc4_load_tracker_state(struct drm_private_state *priv) 59 { 60 return container_of(priv, struct vc4_load_tracker_state, base); 61 } 62 63 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state, 64 struct drm_private_obj *manager) 65 { 66 struct drm_device *dev = state->dev; 67 struct vc4_dev *vc4 = to_vc4_dev(dev); 68 struct drm_private_state *priv_state; 69 int ret; 70 71 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx); 72 if (ret) 73 return ERR_PTR(ret); 74 75 priv_state = drm_atomic_get_private_obj_state(state, manager); 76 if (IS_ERR(priv_state)) 77 return ERR_CAST(priv_state); 78 79 return to_vc4_ctm_state(priv_state); 80 } 81 82 static struct drm_private_state * 83 vc4_ctm_duplicate_state(struct drm_private_obj *obj) 84 { 85 struct vc4_ctm_state *state; 86 87 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 88 if (!state) 89 return NULL; 90 91 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 92 93 return &state->base; 94 } 95 96 static void vc4_ctm_destroy_state(struct drm_private_obj *obj, 97 struct drm_private_state *state) 98 { 99 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state); 100 101 kfree(ctm_state); 102 } 103 104 static const struct drm_private_state_funcs vc4_ctm_state_funcs = { 105 .atomic_duplicate_state = vc4_ctm_duplicate_state, 106 .atomic_destroy_state = vc4_ctm_destroy_state, 107 }; 108 109 static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused) 110 { 111 struct vc4_dev *vc4 = to_vc4_dev(dev); 112 113 drm_atomic_private_obj_fini(&vc4->ctm_manager); 114 } 115 116 static int vc4_ctm_obj_init(struct vc4_dev *vc4) 117 { 118 struct vc4_ctm_state *ctm_state; 119 120 drm_modeset_lock_init(&vc4->ctm_state_lock); 121 122 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); 123 if (!ctm_state) 124 return -ENOMEM; 125 126 drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base, 127 &vc4_ctm_state_funcs); 128 129 return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL); 130 } 131 132 /* Converts a DRM S31.32 value to the HW S0.9 format. */ 133 static u16 vc4_ctm_s31_32_to_s0_9(u64 in) 134 { 135 u16 r; 136 137 /* Sign bit. */ 138 r = in & BIT_ULL(63) ? BIT(9) : 0; 139 140 if ((in & GENMASK_ULL(62, 32)) > 0) { 141 /* We have zero integer bits so we can only saturate here. */ 142 r |= GENMASK(8, 0); 143 } else { 144 /* Otherwise take the 9 most important fractional bits. */ 145 r |= (in >> 23) & GENMASK(8, 0); 146 } 147 148 return r; 149 } 150 151 static void 152 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) 153 { 154 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state); 155 struct drm_color_ctm *ctm = ctm_state->ctm; 156 157 if (ctm_state->fifo) { 158 HVS_WRITE(SCALER_OLEDCOEF2, 159 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]), 160 SCALER_OLEDCOEF2_R_TO_R) | 161 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]), 162 SCALER_OLEDCOEF2_R_TO_G) | 163 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]), 164 SCALER_OLEDCOEF2_R_TO_B)); 165 HVS_WRITE(SCALER_OLEDCOEF1, 166 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]), 167 SCALER_OLEDCOEF1_G_TO_R) | 168 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]), 169 SCALER_OLEDCOEF1_G_TO_G) | 170 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]), 171 SCALER_OLEDCOEF1_G_TO_B)); 172 HVS_WRITE(SCALER_OLEDCOEF0, 173 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]), 174 SCALER_OLEDCOEF0_B_TO_R) | 175 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]), 176 SCALER_OLEDCOEF0_B_TO_G) | 177 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]), 178 SCALER_OLEDCOEF0_B_TO_B)); 179 } 180 181 HVS_WRITE(SCALER_OLEDOFFS, 182 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); 183 } 184 185 static struct vc4_hvs_state * 186 vc4_hvs_get_global_state(struct drm_atomic_state *state) 187 { 188 struct vc4_dev *vc4 = to_vc4_dev(state->dev); 189 struct drm_private_state *priv_state; 190 191 priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels); 192 if (IS_ERR(priv_state)) 193 return ERR_CAST(priv_state); 194 195 return to_vc4_hvs_state(priv_state); 196 } 197 198 static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, 199 struct drm_atomic_state *state) 200 { 201 struct drm_crtc_state *crtc_state; 202 struct drm_crtc *crtc; 203 unsigned int i; 204 205 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 206 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); 207 u32 dispctrl; 208 u32 dsp3_mux; 209 210 if (!crtc_state->active) 211 continue; 212 213 if (vc4_state->assigned_channel != 2) 214 continue; 215 216 /* 217 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to 218 * FIFO X'. 219 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. 220 * 221 * DSP3 is connected to FIFO2 unless the transposer is 222 * enabled. In this case, FIFO 2 is directly accessed by the 223 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 224 * route. 225 */ 226 if (vc4_state->feed_txp) 227 dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); 228 else 229 dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); 230 231 dispctrl = HVS_READ(SCALER_DISPCTRL) & 232 ~SCALER_DISPCTRL_DSP3_MUX_MASK; 233 HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); 234 } 235 } 236 237 static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, 238 struct drm_atomic_state *state) 239 { 240 struct drm_crtc_state *crtc_state; 241 struct drm_crtc *crtc; 242 unsigned char mux; 243 unsigned int i; 244 u32 reg; 245 246 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 247 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); 248 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 249 250 if (!vc4_state->update_muxing) 251 continue; 252 253 switch (vc4_crtc->data->hvs_output) { 254 case 2: 255 mux = (vc4_state->assigned_channel == 2) ? 0 : 1; 256 reg = HVS_READ(SCALER_DISPECTRL); 257 HVS_WRITE(SCALER_DISPECTRL, 258 (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | 259 VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX)); 260 break; 261 262 case 3: 263 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) 264 mux = 3; 265 else 266 mux = vc4_state->assigned_channel; 267 268 reg = HVS_READ(SCALER_DISPCTRL); 269 HVS_WRITE(SCALER_DISPCTRL, 270 (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | 271 VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX)); 272 break; 273 274 case 4: 275 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) 276 mux = 3; 277 else 278 mux = vc4_state->assigned_channel; 279 280 reg = HVS_READ(SCALER_DISPEOLN); 281 HVS_WRITE(SCALER_DISPEOLN, 282 (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | 283 VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX)); 284 285 break; 286 287 case 5: 288 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) 289 mux = 3; 290 else 291 mux = vc4_state->assigned_channel; 292 293 reg = HVS_READ(SCALER_DISPDITHER); 294 HVS_WRITE(SCALER_DISPDITHER, 295 (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | 296 VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX)); 297 break; 298 299 default: 300 break; 301 } 302 } 303 } 304 305 static void 306 vc4_atomic_complete_commit(struct drm_atomic_state *state) 307 { 308 struct drm_device *dev = state->dev; 309 struct vc4_dev *vc4 = to_vc4_dev(dev); 310 struct vc4_hvs *hvs = vc4->hvs; 311 struct drm_crtc_state *new_crtc_state; 312 struct drm_crtc *crtc; 313 int i; 314 315 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 316 struct vc4_crtc_state *vc4_crtc_state; 317 318 if (!new_crtc_state->commit) 319 continue; 320 321 vc4_crtc_state = to_vc4_crtc_state(new_crtc_state); 322 vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); 323 } 324 325 if (vc4->hvs->hvs5) 326 clk_set_min_rate(hvs->core_clk, 500000000); 327 328 drm_atomic_helper_wait_for_fences(dev, state, false); 329 330 drm_atomic_helper_wait_for_dependencies(state); 331 332 drm_atomic_helper_commit_modeset_disables(dev, state); 333 334 vc4_ctm_commit(vc4, state); 335 336 if (vc4->hvs->hvs5) 337 vc5_hvs_pv_muxing_commit(vc4, state); 338 else 339 vc4_hvs_pv_muxing_commit(vc4, state); 340 341 drm_atomic_helper_commit_planes(dev, state, 0); 342 343 drm_atomic_helper_commit_modeset_enables(dev, state); 344 345 drm_atomic_helper_fake_vblank(state); 346 347 drm_atomic_helper_commit_hw_done(state); 348 349 drm_atomic_helper_wait_for_flip_done(dev, state); 350 351 drm_atomic_helper_cleanup_planes(dev, state); 352 353 drm_atomic_helper_commit_cleanup_done(state); 354 355 if (vc4->hvs->hvs5) 356 clk_set_min_rate(hvs->core_clk, 0); 357 358 drm_atomic_state_put(state); 359 360 up(&vc4->async_modeset); 361 } 362 363 static void commit_work(struct work_struct *work) 364 { 365 struct drm_atomic_state *state = container_of(work, 366 struct drm_atomic_state, 367 commit_work); 368 vc4_atomic_complete_commit(state); 369 } 370 371 /** 372 * vc4_atomic_commit - commit validated state object 373 * @dev: DRM device 374 * @state: the driver state object 375 * @nonblock: nonblocking commit 376 * 377 * This function commits a with drm_atomic_helper_check() pre-validated state 378 * object. This can still fail when e.g. the framebuffer reservation fails. For 379 * now this doesn't implement asynchronous commits. 380 * 381 * RETURNS 382 * Zero for success or -errno. 383 */ 384 static int vc4_atomic_commit(struct drm_device *dev, 385 struct drm_atomic_state *state, 386 bool nonblock) 387 { 388 struct vc4_dev *vc4 = to_vc4_dev(dev); 389 int ret; 390 391 if (state->async_update) { 392 ret = down_interruptible(&vc4->async_modeset); 393 if (ret) 394 return ret; 395 396 ret = drm_atomic_helper_prepare_planes(dev, state); 397 if (ret) { 398 up(&vc4->async_modeset); 399 return ret; 400 } 401 402 drm_atomic_helper_async_commit(dev, state); 403 404 drm_atomic_helper_cleanup_planes(dev, state); 405 406 up(&vc4->async_modeset); 407 408 return 0; 409 } 410 411 /* We know for sure we don't want an async update here. Set 412 * state->legacy_cursor_update to false to prevent 413 * drm_atomic_helper_setup_commit() from auto-completing 414 * commit->flip_done. 415 */ 416 state->legacy_cursor_update = false; 417 ret = drm_atomic_helper_setup_commit(state, nonblock); 418 if (ret) 419 return ret; 420 421 INIT_WORK(&state->commit_work, commit_work); 422 423 ret = down_interruptible(&vc4->async_modeset); 424 if (ret) 425 return ret; 426 427 ret = drm_atomic_helper_prepare_planes(dev, state); 428 if (ret) { 429 up(&vc4->async_modeset); 430 return ret; 431 } 432 433 if (!nonblock) { 434 ret = drm_atomic_helper_wait_for_fences(dev, state, true); 435 if (ret) { 436 drm_atomic_helper_cleanup_planes(dev, state); 437 up(&vc4->async_modeset); 438 return ret; 439 } 440 } 441 442 /* 443 * This is the point of no return - everything below never fails except 444 * when the hw goes bonghits. Which means we can commit the new state on 445 * the software side now. 446 */ 447 448 BUG_ON(drm_atomic_helper_swap_state(state, false) < 0); 449 450 /* 451 * Everything below can be run asynchronously without the need to grab 452 * any modeset locks at all under one condition: It must be guaranteed 453 * that the asynchronous work has either been cancelled (if the driver 454 * supports it, which at least requires that the framebuffers get 455 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed 456 * before the new state gets committed on the software side with 457 * drm_atomic_helper_swap_state(). 458 * 459 * This scheme allows new atomic state updates to be prepared and 460 * checked in parallel to the asynchronous completion of the previous 461 * update. Which is important since compositors need to figure out the 462 * composition of the next frame right after having submitted the 463 * current layout. 464 */ 465 466 drm_atomic_state_get(state); 467 if (nonblock) 468 queue_work(system_unbound_wq, &state->commit_work); 469 else 470 vc4_atomic_complete_commit(state); 471 472 return 0; 473 } 474 475 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, 476 struct drm_file *file_priv, 477 const struct drm_mode_fb_cmd2 *mode_cmd) 478 { 479 struct drm_mode_fb_cmd2 mode_cmd_local; 480 481 /* If the user didn't specify a modifier, use the 482 * vc4_set_tiling_ioctl() state for the BO. 483 */ 484 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) { 485 struct drm_gem_object *gem_obj; 486 struct vc4_bo *bo; 487 488 gem_obj = drm_gem_object_lookup(file_priv, 489 mode_cmd->handles[0]); 490 if (!gem_obj) { 491 DRM_DEBUG("Failed to look up GEM BO %d\n", 492 mode_cmd->handles[0]); 493 return ERR_PTR(-ENOENT); 494 } 495 bo = to_vc4_bo(gem_obj); 496 497 mode_cmd_local = *mode_cmd; 498 499 if (bo->t_format) { 500 mode_cmd_local.modifier[0] = 501 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; 502 } else { 503 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE; 504 } 505 506 drm_gem_object_put(gem_obj); 507 508 mode_cmd = &mode_cmd_local; 509 } 510 511 return drm_gem_fb_create(dev, file_priv, mode_cmd); 512 } 513 514 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC 515 * at a time and the HW only supports S0.9 scalars. To account for the latter, 516 * we don't allow userland to set a CTM that we have no hope of approximating. 517 */ 518 static int 519 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 520 { 521 struct vc4_dev *vc4 = to_vc4_dev(dev); 522 struct vc4_ctm_state *ctm_state = NULL; 523 struct drm_crtc *crtc; 524 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 525 struct drm_color_ctm *ctm; 526 int i; 527 528 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 529 /* CTM is being disabled. */ 530 if (!new_crtc_state->ctm && old_crtc_state->ctm) { 531 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 532 if (IS_ERR(ctm_state)) 533 return PTR_ERR(ctm_state); 534 ctm_state->fifo = 0; 535 } 536 } 537 538 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 539 if (new_crtc_state->ctm == old_crtc_state->ctm) 540 continue; 541 542 if (!ctm_state) { 543 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 544 if (IS_ERR(ctm_state)) 545 return PTR_ERR(ctm_state); 546 } 547 548 /* CTM is being enabled or the matrix changed. */ 549 if (new_crtc_state->ctm) { 550 struct vc4_crtc_state *vc4_crtc_state = 551 to_vc4_crtc_state(new_crtc_state); 552 553 /* fifo is 1-based since 0 disables CTM. */ 554 int fifo = vc4_crtc_state->assigned_channel + 1; 555 556 /* Check userland isn't trying to turn on CTM for more 557 * than one CRTC at a time. 558 */ 559 if (ctm_state->fifo && ctm_state->fifo != fifo) { 560 DRM_DEBUG_DRIVER("Too many CTM configured\n"); 561 return -EINVAL; 562 } 563 564 /* Check we can approximate the specified CTM. 565 * We disallow scalars |c| > 1.0 since the HW has 566 * no integer bits. 567 */ 568 ctm = new_crtc_state->ctm->data; 569 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) { 570 u64 val = ctm->matrix[i]; 571 572 val &= ~BIT_ULL(63); 573 if (val > BIT_ULL(32)) 574 return -EINVAL; 575 } 576 577 ctm_state->fifo = fifo; 578 ctm_state->ctm = ctm; 579 } 580 } 581 582 return 0; 583 } 584 585 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) 586 { 587 struct drm_plane_state *old_plane_state, *new_plane_state; 588 struct vc4_dev *vc4 = to_vc4_dev(state->dev); 589 struct vc4_load_tracker_state *load_state; 590 struct drm_private_state *priv_state; 591 struct drm_plane *plane; 592 int i; 593 594 if (!vc4->load_tracker_available) 595 return 0; 596 597 priv_state = drm_atomic_get_private_obj_state(state, 598 &vc4->load_tracker); 599 if (IS_ERR(priv_state)) 600 return PTR_ERR(priv_state); 601 602 load_state = to_vc4_load_tracker_state(priv_state); 603 for_each_oldnew_plane_in_state(state, plane, old_plane_state, 604 new_plane_state, i) { 605 struct vc4_plane_state *vc4_plane_state; 606 607 if (old_plane_state->fb && old_plane_state->crtc) { 608 vc4_plane_state = to_vc4_plane_state(old_plane_state); 609 load_state->membus_load -= vc4_plane_state->membus_load; 610 load_state->hvs_load -= vc4_plane_state->hvs_load; 611 } 612 613 if (new_plane_state->fb && new_plane_state->crtc) { 614 vc4_plane_state = to_vc4_plane_state(new_plane_state); 615 load_state->membus_load += vc4_plane_state->membus_load; 616 load_state->hvs_load += vc4_plane_state->hvs_load; 617 } 618 } 619 620 /* Don't check the load when the tracker is disabled. */ 621 if (!vc4->load_tracker_enabled) 622 return 0; 623 624 /* The absolute limit is 2Gbyte/sec, but let's take a margin to let 625 * the system work when other blocks are accessing the memory. 626 */ 627 if (load_state->membus_load > SZ_1G + SZ_512M) 628 return -ENOSPC; 629 630 /* HVS clock is supposed to run @ 250Mhz, let's take a margin and 631 * consider the maximum number of cycles is 240M. 632 */ 633 if (load_state->hvs_load > 240000000ULL) 634 return -ENOSPC; 635 636 return 0; 637 } 638 639 static struct drm_private_state * 640 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj) 641 { 642 struct vc4_load_tracker_state *state; 643 644 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 645 if (!state) 646 return NULL; 647 648 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 649 650 return &state->base; 651 } 652 653 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj, 654 struct drm_private_state *state) 655 { 656 struct vc4_load_tracker_state *load_state; 657 658 load_state = to_vc4_load_tracker_state(state); 659 kfree(load_state); 660 } 661 662 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { 663 .atomic_duplicate_state = vc4_load_tracker_duplicate_state, 664 .atomic_destroy_state = vc4_load_tracker_destroy_state, 665 }; 666 667 static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused) 668 { 669 struct vc4_dev *vc4 = to_vc4_dev(dev); 670 671 if (!vc4->load_tracker_available) 672 return; 673 674 drm_atomic_private_obj_fini(&vc4->load_tracker); 675 } 676 677 static int vc4_load_tracker_obj_init(struct vc4_dev *vc4) 678 { 679 struct vc4_load_tracker_state *load_state; 680 681 if (!vc4->load_tracker_available) 682 return 0; 683 684 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); 685 if (!load_state) 686 return -ENOMEM; 687 688 drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker, 689 &load_state->base, 690 &vc4_load_tracker_state_funcs); 691 692 return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL); 693 } 694 695 static struct drm_private_state * 696 vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj) 697 { 698 struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state); 699 struct vc4_hvs_state *state; 700 701 state = kzalloc(sizeof(*state), GFP_KERNEL); 702 if (!state) 703 return NULL; 704 705 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 706 707 state->unassigned_channels = old_state->unassigned_channels; 708 709 return &state->base; 710 } 711 712 static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj, 713 struct drm_private_state *state) 714 { 715 struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state); 716 717 kfree(hvs_state); 718 } 719 720 static const struct drm_private_state_funcs vc4_hvs_state_funcs = { 721 .atomic_duplicate_state = vc4_hvs_channels_duplicate_state, 722 .atomic_destroy_state = vc4_hvs_channels_destroy_state, 723 }; 724 725 static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused) 726 { 727 struct vc4_dev *vc4 = to_vc4_dev(dev); 728 729 drm_atomic_private_obj_fini(&vc4->hvs_channels); 730 } 731 732 static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4) 733 { 734 struct vc4_hvs_state *state; 735 736 state = kzalloc(sizeof(*state), GFP_KERNEL); 737 if (!state) 738 return -ENOMEM; 739 740 state->unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0); 741 drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels, 742 &state->base, 743 &vc4_hvs_state_funcs); 744 745 return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL); 746 } 747 748 /* 749 * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and 750 * the TXP (and therefore all the CRTCs found on that platform). 751 * 752 * The naive (and our initial) implementation would just iterate over 753 * all the active CRTCs, try to find a suitable FIFO, and then remove it 754 * from the pool of available FIFOs. However, there are a few corner 755 * cases that need to be considered: 756 * 757 * - When running in a dual-display setup (so with two CRTCs involved), 758 * we can update the state of a single CRTC (for example by changing 759 * its mode using xrandr under X11) without affecting the other. In 760 * this case, the other CRTC wouldn't be in the state at all, so we 761 * need to consider all the running CRTCs in the DRM device to assign 762 * a FIFO, not just the one in the state. 763 * 764 * - To fix the above, we can't use drm_atomic_get_crtc_state on all 765 * enabled CRTCs to pull their CRTC state into the global state, since 766 * a page flip would start considering their vblank to complete. Since 767 * we don't have a guarantee that they are actually active, that 768 * vblank might never happen, and shouldn't even be considered if we 769 * want to do a page flip on a single CRTC. That can be tested by 770 * doing a modetest -v first on HDMI1 and then on HDMI0. 771 * 772 * - Since we need the pixelvalve to be disabled and enabled back when 773 * the FIFO is changed, we should keep the FIFO assigned for as long 774 * as the CRTC is enabled, only considering it free again once that 775 * CRTC has been disabled. This can be tested by booting X11 on a 776 * single display, and changing the resolution down and then back up. 777 */ 778 static int vc4_pv_muxing_atomic_check(struct drm_device *dev, 779 struct drm_atomic_state *state) 780 { 781 struct vc4_hvs_state *hvs_new_state; 782 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 783 struct drm_crtc *crtc; 784 unsigned int i; 785 786 hvs_new_state = vc4_hvs_get_global_state(state); 787 if (!hvs_new_state) 788 return -EINVAL; 789 790 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 791 struct vc4_crtc_state *old_vc4_crtc_state = 792 to_vc4_crtc_state(old_crtc_state); 793 struct vc4_crtc_state *new_vc4_crtc_state = 794 to_vc4_crtc_state(new_crtc_state); 795 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 796 unsigned int matching_channels; 797 798 /* Nothing to do here, let's skip it */ 799 if (old_crtc_state->enable == new_crtc_state->enable) 800 continue; 801 802 /* Muxing will need to be modified, mark it as such */ 803 new_vc4_crtc_state->update_muxing = true; 804 805 /* If we're disabling our CRTC, we put back our channel */ 806 if (!new_crtc_state->enable) { 807 hvs_new_state->unassigned_channels |= BIT(old_vc4_crtc_state->assigned_channel); 808 new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; 809 continue; 810 } 811 812 /* 813 * The problem we have to solve here is that we have 814 * up to 7 encoders, connected to up to 6 CRTCs. 815 * 816 * Those CRTCs, depending on the instance, can be 817 * routed to 1, 2 or 3 HVS FIFOs, and we need to set 818 * the change the muxing between FIFOs and outputs in 819 * the HVS accordingly. 820 * 821 * It would be pretty hard to come up with an 822 * algorithm that would generically solve 823 * this. However, the current routing trees we support 824 * allow us to simplify a bit the problem. 825 * 826 * Indeed, with the current supported layouts, if we 827 * try to assign in the ascending crtc index order the 828 * FIFOs, we can't fall into the situation where an 829 * earlier CRTC that had multiple routes is assigned 830 * one that was the only option for a later CRTC. 831 * 832 * If the layout changes and doesn't give us that in 833 * the future, we will need to have something smarter, 834 * but it works so far. 835 */ 836 matching_channels = hvs_new_state->unassigned_channels & vc4_crtc->data->hvs_available_channels; 837 if (matching_channels) { 838 unsigned int channel = ffs(matching_channels) - 1; 839 840 new_vc4_crtc_state->assigned_channel = channel; 841 hvs_new_state->unassigned_channels &= ~BIT(channel); 842 } else { 843 return -EINVAL; 844 } 845 } 846 847 return 0; 848 } 849 850 static int 851 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 852 { 853 int ret; 854 855 ret = vc4_pv_muxing_atomic_check(dev, state); 856 if (ret) 857 return ret; 858 859 ret = vc4_ctm_atomic_check(dev, state); 860 if (ret < 0) 861 return ret; 862 863 ret = drm_atomic_helper_check(dev, state); 864 if (ret) 865 return ret; 866 867 return vc4_load_tracker_atomic_check(state); 868 } 869 870 static const struct drm_mode_config_funcs vc4_mode_funcs = { 871 .atomic_check = vc4_atomic_check, 872 .atomic_commit = vc4_atomic_commit, 873 .fb_create = vc4_fb_create, 874 }; 875 876 int vc4_kms_load(struct drm_device *dev) 877 { 878 struct vc4_dev *vc4 = to_vc4_dev(dev); 879 bool is_vc5 = of_device_is_compatible(dev->dev->of_node, 880 "brcm,bcm2711-vc5"); 881 int ret; 882 883 if (!is_vc5) { 884 vc4->load_tracker_available = true; 885 886 /* Start with the load tracker enabled. Can be 887 * disabled through the debugfs load_tracker file. 888 */ 889 vc4->load_tracker_enabled = true; 890 } 891 892 sema_init(&vc4->async_modeset, 1); 893 894 /* Set support for vblank irq fast disable, before drm_vblank_init() */ 895 dev->vblank_disable_immediate = true; 896 897 dev->irq_enabled = true; 898 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 899 if (ret < 0) { 900 dev_err(dev->dev, "failed to initialize vblank\n"); 901 return ret; 902 } 903 904 if (is_vc5) { 905 dev->mode_config.max_width = 7680; 906 dev->mode_config.max_height = 7680; 907 } else { 908 dev->mode_config.max_width = 2048; 909 dev->mode_config.max_height = 2048; 910 } 911 912 dev->mode_config.funcs = &vc4_mode_funcs; 913 dev->mode_config.preferred_depth = 24; 914 dev->mode_config.async_page_flip = true; 915 dev->mode_config.allow_fb_modifiers = true; 916 917 ret = vc4_ctm_obj_init(vc4); 918 if (ret) 919 return ret; 920 921 ret = vc4_load_tracker_obj_init(vc4); 922 if (ret) 923 return ret; 924 925 ret = vc4_hvs_channels_obj_init(vc4); 926 if (ret) 927 return ret; 928 929 drm_mode_config_reset(dev); 930 931 drm_kms_helper_poll_init(dev); 932 933 return 0; 934 } 935