1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 Broadcom 4 */ 5 6 /** 7 * DOC: VC4 KMS 8 * 9 * This is the general code for implementing KMS mode setting that 10 * doesn't clearly associate with any of the other objects (plane, 11 * crtc, HDMI encoder). 12 */ 13 14 #include <linux/clk.h> 15 16 #include <drm/drm_atomic.h> 17 #include <drm/drm_atomic_helper.h> 18 #include <drm/drm_crtc.h> 19 #include <drm/drm_gem_framebuffer_helper.h> 20 #include <drm/drm_plane_helper.h> 21 #include <drm/drm_probe_helper.h> 22 #include <drm/drm_vblank.h> 23 24 #include "vc4_drv.h" 25 #include "vc4_regs.h" 26 27 #define HVS_NUM_CHANNELS 3 28 29 struct vc4_ctm_state { 30 struct drm_private_state base; 31 struct drm_color_ctm *ctm; 32 int fifo; 33 }; 34 35 static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv) 36 { 37 return container_of(priv, struct vc4_ctm_state, base); 38 } 39 40 struct vc4_hvs_state { 41 struct drm_private_state base; 42 unsigned long core_clock_rate; 43 44 struct { 45 unsigned in_use: 1; 46 unsigned long fifo_load; 47 struct drm_crtc_commit *pending_commit; 48 } fifo_state[HVS_NUM_CHANNELS]; 49 }; 50 51 static struct vc4_hvs_state * 52 to_vc4_hvs_state(struct drm_private_state *priv) 53 { 54 return container_of(priv, struct vc4_hvs_state, base); 55 } 56 57 struct vc4_load_tracker_state { 58 struct drm_private_state base; 59 u64 hvs_load; 60 u64 membus_load; 61 }; 62 63 static struct vc4_load_tracker_state * 64 to_vc4_load_tracker_state(struct drm_private_state *priv) 65 { 66 return container_of(priv, struct vc4_load_tracker_state, base); 67 } 68 69 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state, 70 struct drm_private_obj *manager) 71 { 72 struct drm_device *dev = state->dev; 73 struct vc4_dev *vc4 = to_vc4_dev(dev); 74 struct drm_private_state *priv_state; 75 int ret; 76 77 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx); 78 if (ret) 79 return ERR_PTR(ret); 80 81 priv_state = drm_atomic_get_private_obj_state(state, manager); 82 if (IS_ERR(priv_state)) 83 return ERR_CAST(priv_state); 84 85 return to_vc4_ctm_state(priv_state); 86 } 87 88 static struct drm_private_state * 89 vc4_ctm_duplicate_state(struct drm_private_obj *obj) 90 { 91 struct vc4_ctm_state *state; 92 93 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 94 if (!state) 95 return NULL; 96 97 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 98 99 return &state->base; 100 } 101 102 static void vc4_ctm_destroy_state(struct drm_private_obj *obj, 103 struct drm_private_state *state) 104 { 105 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state); 106 107 kfree(ctm_state); 108 } 109 110 static const struct drm_private_state_funcs vc4_ctm_state_funcs = { 111 .atomic_duplicate_state = vc4_ctm_duplicate_state, 112 .atomic_destroy_state = vc4_ctm_destroy_state, 113 }; 114 115 static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused) 116 { 117 struct vc4_dev *vc4 = to_vc4_dev(dev); 118 119 drm_atomic_private_obj_fini(&vc4->ctm_manager); 120 } 121 122 static int vc4_ctm_obj_init(struct vc4_dev *vc4) 123 { 124 struct vc4_ctm_state *ctm_state; 125 126 drm_modeset_lock_init(&vc4->ctm_state_lock); 127 128 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); 129 if (!ctm_state) 130 return -ENOMEM; 131 132 drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base, 133 &vc4_ctm_state_funcs); 134 135 return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL); 136 } 137 138 /* Converts a DRM S31.32 value to the HW S0.9 format. */ 139 static u16 vc4_ctm_s31_32_to_s0_9(u64 in) 140 { 141 u16 r; 142 143 /* Sign bit. */ 144 r = in & BIT_ULL(63) ? BIT(9) : 0; 145 146 if ((in & GENMASK_ULL(62, 32)) > 0) { 147 /* We have zero integer bits so we can only saturate here. */ 148 r |= GENMASK(8, 0); 149 } else { 150 /* Otherwise take the 9 most important fractional bits. */ 151 r |= (in >> 23) & GENMASK(8, 0); 152 } 153 154 return r; 155 } 156 157 static void 158 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) 159 { 160 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state); 161 struct drm_color_ctm *ctm = ctm_state->ctm; 162 163 if (ctm_state->fifo) { 164 HVS_WRITE(SCALER_OLEDCOEF2, 165 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]), 166 SCALER_OLEDCOEF2_R_TO_R) | 167 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]), 168 SCALER_OLEDCOEF2_R_TO_G) | 169 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]), 170 SCALER_OLEDCOEF2_R_TO_B)); 171 HVS_WRITE(SCALER_OLEDCOEF1, 172 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]), 173 SCALER_OLEDCOEF1_G_TO_R) | 174 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]), 175 SCALER_OLEDCOEF1_G_TO_G) | 176 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]), 177 SCALER_OLEDCOEF1_G_TO_B)); 178 HVS_WRITE(SCALER_OLEDCOEF0, 179 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]), 180 SCALER_OLEDCOEF0_B_TO_R) | 181 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]), 182 SCALER_OLEDCOEF0_B_TO_G) | 183 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]), 184 SCALER_OLEDCOEF0_B_TO_B)); 185 } 186 187 HVS_WRITE(SCALER_OLEDOFFS, 188 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); 189 } 190 191 static struct vc4_hvs_state * 192 vc4_hvs_get_new_global_state(struct drm_atomic_state *state) 193 { 194 struct vc4_dev *vc4 = to_vc4_dev(state->dev); 195 struct drm_private_state *priv_state; 196 197 priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels); 198 if (IS_ERR(priv_state)) 199 return ERR_CAST(priv_state); 200 201 return to_vc4_hvs_state(priv_state); 202 } 203 204 static struct vc4_hvs_state * 205 vc4_hvs_get_old_global_state(struct drm_atomic_state *state) 206 { 207 struct vc4_dev *vc4 = to_vc4_dev(state->dev); 208 struct drm_private_state *priv_state; 209 210 priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels); 211 if (IS_ERR(priv_state)) 212 return ERR_CAST(priv_state); 213 214 return to_vc4_hvs_state(priv_state); 215 } 216 217 static struct vc4_hvs_state * 218 vc4_hvs_get_global_state(struct drm_atomic_state *state) 219 { 220 struct vc4_dev *vc4 = to_vc4_dev(state->dev); 221 struct drm_private_state *priv_state; 222 223 priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels); 224 if (IS_ERR(priv_state)) 225 return ERR_CAST(priv_state); 226 227 return to_vc4_hvs_state(priv_state); 228 } 229 230 static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, 231 struct drm_atomic_state *state) 232 { 233 struct drm_crtc_state *crtc_state; 234 struct drm_crtc *crtc; 235 unsigned int i; 236 237 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 238 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 239 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); 240 u32 dispctrl; 241 u32 dsp3_mux; 242 243 if (!crtc_state->active) 244 continue; 245 246 if (vc4_state->assigned_channel != 2) 247 continue; 248 249 /* 250 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to 251 * FIFO X'. 252 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. 253 * 254 * DSP3 is connected to FIFO2 unless the transposer is 255 * enabled. In this case, FIFO 2 is directly accessed by the 256 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 257 * route. 258 */ 259 if (vc4_crtc->feeds_txp) 260 dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); 261 else 262 dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); 263 264 dispctrl = HVS_READ(SCALER_DISPCTRL) & 265 ~SCALER_DISPCTRL_DSP3_MUX_MASK; 266 HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); 267 } 268 } 269 270 static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, 271 struct drm_atomic_state *state) 272 { 273 struct drm_crtc_state *crtc_state; 274 struct drm_crtc *crtc; 275 unsigned char mux; 276 unsigned int i; 277 u32 reg; 278 279 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 280 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); 281 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 282 283 if (!vc4_state->update_muxing) 284 continue; 285 286 switch (vc4_crtc->data->hvs_output) { 287 case 2: 288 mux = (vc4_state->assigned_channel == 2) ? 0 : 1; 289 reg = HVS_READ(SCALER_DISPECTRL); 290 HVS_WRITE(SCALER_DISPECTRL, 291 (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | 292 VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX)); 293 break; 294 295 case 3: 296 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) 297 mux = 3; 298 else 299 mux = vc4_state->assigned_channel; 300 301 reg = HVS_READ(SCALER_DISPCTRL); 302 HVS_WRITE(SCALER_DISPCTRL, 303 (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | 304 VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX)); 305 break; 306 307 case 4: 308 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) 309 mux = 3; 310 else 311 mux = vc4_state->assigned_channel; 312 313 reg = HVS_READ(SCALER_DISPEOLN); 314 HVS_WRITE(SCALER_DISPEOLN, 315 (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | 316 VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX)); 317 318 break; 319 320 case 5: 321 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) 322 mux = 3; 323 else 324 mux = vc4_state->assigned_channel; 325 326 reg = HVS_READ(SCALER_DISPDITHER); 327 HVS_WRITE(SCALER_DISPDITHER, 328 (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | 329 VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX)); 330 break; 331 332 default: 333 break; 334 } 335 } 336 } 337 338 static void vc4_atomic_commit_tail(struct drm_atomic_state *state) 339 { 340 struct drm_device *dev = state->dev; 341 struct vc4_dev *vc4 = to_vc4_dev(dev); 342 struct vc4_hvs *hvs = vc4->hvs; 343 struct drm_crtc_state *new_crtc_state; 344 struct vc4_hvs_state *new_hvs_state; 345 struct drm_crtc *crtc; 346 struct vc4_hvs_state *old_hvs_state; 347 unsigned int channel; 348 int i; 349 350 old_hvs_state = vc4_hvs_get_old_global_state(state); 351 if (WARN_ON(IS_ERR(old_hvs_state))) 352 return; 353 354 new_hvs_state = vc4_hvs_get_new_global_state(state); 355 if (WARN_ON(IS_ERR(new_hvs_state))) 356 return; 357 358 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 359 struct vc4_crtc_state *vc4_crtc_state; 360 361 if (!new_crtc_state->commit) 362 continue; 363 364 vc4_crtc_state = to_vc4_crtc_state(new_crtc_state); 365 vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); 366 } 367 368 for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) { 369 struct drm_crtc_commit *commit; 370 int ret; 371 372 if (!old_hvs_state->fifo_state[channel].in_use) 373 continue; 374 375 commit = old_hvs_state->fifo_state[channel].pending_commit; 376 if (!commit) 377 continue; 378 379 ret = drm_crtc_commit_wait(commit); 380 if (ret) 381 drm_err(dev, "Timed out waiting for commit\n"); 382 383 drm_crtc_commit_put(commit); 384 old_hvs_state->fifo_state[channel].pending_commit = NULL; 385 } 386 387 if (vc4->hvs->hvs5) { 388 unsigned long core_rate = max_t(unsigned long, 389 500000000, 390 new_hvs_state->core_clock_rate); 391 392 clk_set_min_rate(hvs->core_clk, core_rate); 393 } 394 drm_atomic_helper_commit_modeset_disables(dev, state); 395 396 vc4_ctm_commit(vc4, state); 397 398 if (vc4->hvs->hvs5) 399 vc5_hvs_pv_muxing_commit(vc4, state); 400 else 401 vc4_hvs_pv_muxing_commit(vc4, state); 402 403 drm_atomic_helper_commit_planes(dev, state, 0); 404 405 drm_atomic_helper_commit_modeset_enables(dev, state); 406 407 drm_atomic_helper_fake_vblank(state); 408 409 drm_atomic_helper_commit_hw_done(state); 410 411 drm_atomic_helper_wait_for_flip_done(dev, state); 412 413 drm_atomic_helper_cleanup_planes(dev, state); 414 415 if (vc4->hvs->hvs5) { 416 drm_dbg(dev, "Running the core clock at %lu Hz\n", 417 new_hvs_state->core_clock_rate); 418 419 clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate); 420 } 421 } 422 423 static int vc4_atomic_commit_setup(struct drm_atomic_state *state) 424 { 425 struct drm_crtc_state *crtc_state; 426 struct vc4_hvs_state *hvs_state; 427 struct drm_crtc *crtc; 428 unsigned int i; 429 430 hvs_state = vc4_hvs_get_new_global_state(state); 431 if (WARN_ON(IS_ERR(hvs_state))) 432 return PTR_ERR(hvs_state); 433 434 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 435 struct vc4_crtc_state *vc4_crtc_state = 436 to_vc4_crtc_state(crtc_state); 437 unsigned int channel = 438 vc4_crtc_state->assigned_channel; 439 440 if (channel == VC4_HVS_CHANNEL_DISABLED) 441 continue; 442 443 if (!hvs_state->fifo_state[channel].in_use) 444 continue; 445 446 hvs_state->fifo_state[channel].pending_commit = 447 drm_crtc_commit_get(crtc_state->commit); 448 } 449 450 return 0; 451 } 452 453 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, 454 struct drm_file *file_priv, 455 const struct drm_mode_fb_cmd2 *mode_cmd) 456 { 457 struct drm_mode_fb_cmd2 mode_cmd_local; 458 459 /* If the user didn't specify a modifier, use the 460 * vc4_set_tiling_ioctl() state for the BO. 461 */ 462 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) { 463 struct drm_gem_object *gem_obj; 464 struct vc4_bo *bo; 465 466 gem_obj = drm_gem_object_lookup(file_priv, 467 mode_cmd->handles[0]); 468 if (!gem_obj) { 469 DRM_DEBUG("Failed to look up GEM BO %d\n", 470 mode_cmd->handles[0]); 471 return ERR_PTR(-ENOENT); 472 } 473 bo = to_vc4_bo(gem_obj); 474 475 mode_cmd_local = *mode_cmd; 476 477 if (bo->t_format) { 478 mode_cmd_local.modifier[0] = 479 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; 480 } else { 481 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE; 482 } 483 484 drm_gem_object_put(gem_obj); 485 486 mode_cmd = &mode_cmd_local; 487 } 488 489 return drm_gem_fb_create(dev, file_priv, mode_cmd); 490 } 491 492 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC 493 * at a time and the HW only supports S0.9 scalars. To account for the latter, 494 * we don't allow userland to set a CTM that we have no hope of approximating. 495 */ 496 static int 497 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 498 { 499 struct vc4_dev *vc4 = to_vc4_dev(dev); 500 struct vc4_ctm_state *ctm_state = NULL; 501 struct drm_crtc *crtc; 502 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 503 struct drm_color_ctm *ctm; 504 int i; 505 506 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 507 /* CTM is being disabled. */ 508 if (!new_crtc_state->ctm && old_crtc_state->ctm) { 509 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 510 if (IS_ERR(ctm_state)) 511 return PTR_ERR(ctm_state); 512 ctm_state->fifo = 0; 513 } 514 } 515 516 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 517 if (new_crtc_state->ctm == old_crtc_state->ctm) 518 continue; 519 520 if (!ctm_state) { 521 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 522 if (IS_ERR(ctm_state)) 523 return PTR_ERR(ctm_state); 524 } 525 526 /* CTM is being enabled or the matrix changed. */ 527 if (new_crtc_state->ctm) { 528 struct vc4_crtc_state *vc4_crtc_state = 529 to_vc4_crtc_state(new_crtc_state); 530 531 /* fifo is 1-based since 0 disables CTM. */ 532 int fifo = vc4_crtc_state->assigned_channel + 1; 533 534 /* Check userland isn't trying to turn on CTM for more 535 * than one CRTC at a time. 536 */ 537 if (ctm_state->fifo && ctm_state->fifo != fifo) { 538 DRM_DEBUG_DRIVER("Too many CTM configured\n"); 539 return -EINVAL; 540 } 541 542 /* Check we can approximate the specified CTM. 543 * We disallow scalars |c| > 1.0 since the HW has 544 * no integer bits. 545 */ 546 ctm = new_crtc_state->ctm->data; 547 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) { 548 u64 val = ctm->matrix[i]; 549 550 val &= ~BIT_ULL(63); 551 if (val > BIT_ULL(32)) 552 return -EINVAL; 553 } 554 555 ctm_state->fifo = fifo; 556 ctm_state->ctm = ctm; 557 } 558 } 559 560 return 0; 561 } 562 563 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) 564 { 565 struct drm_plane_state *old_plane_state, *new_plane_state; 566 struct vc4_dev *vc4 = to_vc4_dev(state->dev); 567 struct vc4_load_tracker_state *load_state; 568 struct drm_private_state *priv_state; 569 struct drm_plane *plane; 570 int i; 571 572 priv_state = drm_atomic_get_private_obj_state(state, 573 &vc4->load_tracker); 574 if (IS_ERR(priv_state)) 575 return PTR_ERR(priv_state); 576 577 load_state = to_vc4_load_tracker_state(priv_state); 578 for_each_oldnew_plane_in_state(state, plane, old_plane_state, 579 new_plane_state, i) { 580 struct vc4_plane_state *vc4_plane_state; 581 582 if (old_plane_state->fb && old_plane_state->crtc) { 583 vc4_plane_state = to_vc4_plane_state(old_plane_state); 584 load_state->membus_load -= vc4_plane_state->membus_load; 585 load_state->hvs_load -= vc4_plane_state->hvs_load; 586 } 587 588 if (new_plane_state->fb && new_plane_state->crtc) { 589 vc4_plane_state = to_vc4_plane_state(new_plane_state); 590 load_state->membus_load += vc4_plane_state->membus_load; 591 load_state->hvs_load += vc4_plane_state->hvs_load; 592 } 593 } 594 595 /* Don't check the load when the tracker is disabled. */ 596 if (!vc4->load_tracker_enabled) 597 return 0; 598 599 /* The absolute limit is 2Gbyte/sec, but let's take a margin to let 600 * the system work when other blocks are accessing the memory. 601 */ 602 if (load_state->membus_load > SZ_1G + SZ_512M) 603 return -ENOSPC; 604 605 /* HVS clock is supposed to run @ 250Mhz, let's take a margin and 606 * consider the maximum number of cycles is 240M. 607 */ 608 if (load_state->hvs_load > 240000000ULL) 609 return -ENOSPC; 610 611 return 0; 612 } 613 614 static struct drm_private_state * 615 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj) 616 { 617 struct vc4_load_tracker_state *state; 618 619 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 620 if (!state) 621 return NULL; 622 623 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 624 625 return &state->base; 626 } 627 628 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj, 629 struct drm_private_state *state) 630 { 631 struct vc4_load_tracker_state *load_state; 632 633 load_state = to_vc4_load_tracker_state(state); 634 kfree(load_state); 635 } 636 637 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { 638 .atomic_duplicate_state = vc4_load_tracker_duplicate_state, 639 .atomic_destroy_state = vc4_load_tracker_destroy_state, 640 }; 641 642 static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused) 643 { 644 struct vc4_dev *vc4 = to_vc4_dev(dev); 645 646 drm_atomic_private_obj_fini(&vc4->load_tracker); 647 } 648 649 static int vc4_load_tracker_obj_init(struct vc4_dev *vc4) 650 { 651 struct vc4_load_tracker_state *load_state; 652 653 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); 654 if (!load_state) 655 return -ENOMEM; 656 657 drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker, 658 &load_state->base, 659 &vc4_load_tracker_state_funcs); 660 661 return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL); 662 } 663 664 static struct drm_private_state * 665 vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj) 666 { 667 struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state); 668 struct vc4_hvs_state *state; 669 unsigned int i; 670 671 state = kzalloc(sizeof(*state), GFP_KERNEL); 672 if (!state) 673 return NULL; 674 675 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 676 677 for (i = 0; i < HVS_NUM_CHANNELS; i++) { 678 state->fifo_state[i].in_use = old_state->fifo_state[i].in_use; 679 state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load; 680 } 681 682 state->core_clock_rate = old_state->core_clock_rate; 683 684 return &state->base; 685 } 686 687 static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj, 688 struct drm_private_state *state) 689 { 690 struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state); 691 unsigned int i; 692 693 for (i = 0; i < HVS_NUM_CHANNELS; i++) { 694 if (!hvs_state->fifo_state[i].pending_commit) 695 continue; 696 697 drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit); 698 } 699 700 kfree(hvs_state); 701 } 702 703 static const struct drm_private_state_funcs vc4_hvs_state_funcs = { 704 .atomic_duplicate_state = vc4_hvs_channels_duplicate_state, 705 .atomic_destroy_state = vc4_hvs_channels_destroy_state, 706 }; 707 708 static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused) 709 { 710 struct vc4_dev *vc4 = to_vc4_dev(dev); 711 712 drm_atomic_private_obj_fini(&vc4->hvs_channels); 713 } 714 715 static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4) 716 { 717 struct vc4_hvs_state *state; 718 719 state = kzalloc(sizeof(*state), GFP_KERNEL); 720 if (!state) 721 return -ENOMEM; 722 723 drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels, 724 &state->base, 725 &vc4_hvs_state_funcs); 726 727 return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL); 728 } 729 730 /* 731 * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and 732 * the TXP (and therefore all the CRTCs found on that platform). 733 * 734 * The naive (and our initial) implementation would just iterate over 735 * all the active CRTCs, try to find a suitable FIFO, and then remove it 736 * from the pool of available FIFOs. However, there are a few corner 737 * cases that need to be considered: 738 * 739 * - When running in a dual-display setup (so with two CRTCs involved), 740 * we can update the state of a single CRTC (for example by changing 741 * its mode using xrandr under X11) without affecting the other. In 742 * this case, the other CRTC wouldn't be in the state at all, so we 743 * need to consider all the running CRTCs in the DRM device to assign 744 * a FIFO, not just the one in the state. 745 * 746 * - To fix the above, we can't use drm_atomic_get_crtc_state on all 747 * enabled CRTCs to pull their CRTC state into the global state, since 748 * a page flip would start considering their vblank to complete. Since 749 * we don't have a guarantee that they are actually active, that 750 * vblank might never happen, and shouldn't even be considered if we 751 * want to do a page flip on a single CRTC. That can be tested by 752 * doing a modetest -v first on HDMI1 and then on HDMI0. 753 * 754 * - Since we need the pixelvalve to be disabled and enabled back when 755 * the FIFO is changed, we should keep the FIFO assigned for as long 756 * as the CRTC is enabled, only considering it free again once that 757 * CRTC has been disabled. This can be tested by booting X11 on a 758 * single display, and changing the resolution down and then back up. 759 */ 760 static int vc4_pv_muxing_atomic_check(struct drm_device *dev, 761 struct drm_atomic_state *state) 762 { 763 struct vc4_hvs_state *hvs_new_state; 764 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 765 struct drm_crtc *crtc; 766 unsigned int unassigned_channels = 0; 767 unsigned int i; 768 769 hvs_new_state = vc4_hvs_get_global_state(state); 770 if (IS_ERR(hvs_new_state)) 771 return PTR_ERR(hvs_new_state); 772 773 for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++) 774 if (!hvs_new_state->fifo_state[i].in_use) 775 unassigned_channels |= BIT(i); 776 777 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 778 struct vc4_crtc_state *old_vc4_crtc_state = 779 to_vc4_crtc_state(old_crtc_state); 780 struct vc4_crtc_state *new_vc4_crtc_state = 781 to_vc4_crtc_state(new_crtc_state); 782 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 783 unsigned int matching_channels; 784 unsigned int channel; 785 786 /* Nothing to do here, let's skip it */ 787 if (old_crtc_state->enable == new_crtc_state->enable) 788 continue; 789 790 /* Muxing will need to be modified, mark it as such */ 791 new_vc4_crtc_state->update_muxing = true; 792 793 /* If we're disabling our CRTC, we put back our channel */ 794 if (!new_crtc_state->enable) { 795 channel = old_vc4_crtc_state->assigned_channel; 796 hvs_new_state->fifo_state[channel].in_use = false; 797 new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; 798 continue; 799 } 800 801 /* 802 * The problem we have to solve here is that we have 803 * up to 7 encoders, connected to up to 6 CRTCs. 804 * 805 * Those CRTCs, depending on the instance, can be 806 * routed to 1, 2 or 3 HVS FIFOs, and we need to set 807 * the change the muxing between FIFOs and outputs in 808 * the HVS accordingly. 809 * 810 * It would be pretty hard to come up with an 811 * algorithm that would generically solve 812 * this. However, the current routing trees we support 813 * allow us to simplify a bit the problem. 814 * 815 * Indeed, with the current supported layouts, if we 816 * try to assign in the ascending crtc index order the 817 * FIFOs, we can't fall into the situation where an 818 * earlier CRTC that had multiple routes is assigned 819 * one that was the only option for a later CRTC. 820 * 821 * If the layout changes and doesn't give us that in 822 * the future, we will need to have something smarter, 823 * but it works so far. 824 */ 825 matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels; 826 if (!matching_channels) 827 return -EINVAL; 828 829 channel = ffs(matching_channels) - 1; 830 new_vc4_crtc_state->assigned_channel = channel; 831 unassigned_channels &= ~BIT(channel); 832 hvs_new_state->fifo_state[channel].in_use = true; 833 } 834 835 return 0; 836 } 837 838 static int 839 vc4_core_clock_atomic_check(struct drm_atomic_state *state) 840 { 841 struct vc4_dev *vc4 = to_vc4_dev(state->dev); 842 struct drm_private_state *priv_state; 843 struct vc4_hvs_state *hvs_new_state; 844 struct vc4_load_tracker_state *load_state; 845 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 846 struct drm_crtc *crtc; 847 unsigned int num_outputs; 848 unsigned long pixel_rate; 849 unsigned long cob_rate; 850 unsigned int i; 851 852 priv_state = drm_atomic_get_private_obj_state(state, 853 &vc4->load_tracker); 854 if (IS_ERR(priv_state)) 855 return PTR_ERR(priv_state); 856 857 load_state = to_vc4_load_tracker_state(priv_state); 858 859 hvs_new_state = vc4_hvs_get_global_state(state); 860 if (IS_ERR(hvs_new_state)) 861 return PTR_ERR(hvs_new_state); 862 863 for_each_oldnew_crtc_in_state(state, crtc, 864 old_crtc_state, 865 new_crtc_state, 866 i) { 867 if (old_crtc_state->active) { 868 struct vc4_crtc_state *old_vc4_state = 869 to_vc4_crtc_state(old_crtc_state); 870 unsigned int channel = old_vc4_state->assigned_channel; 871 872 hvs_new_state->fifo_state[channel].fifo_load = 0; 873 } 874 875 if (new_crtc_state->active) { 876 struct vc4_crtc_state *new_vc4_state = 877 to_vc4_crtc_state(new_crtc_state); 878 unsigned int channel = new_vc4_state->assigned_channel; 879 880 hvs_new_state->fifo_state[channel].fifo_load = 881 new_vc4_state->hvs_load; 882 } 883 } 884 885 cob_rate = 0; 886 num_outputs = 0; 887 for (i = 0; i < HVS_NUM_CHANNELS; i++) { 888 if (!hvs_new_state->fifo_state[i].in_use) 889 continue; 890 891 num_outputs++; 892 cob_rate += hvs_new_state->fifo_state[i].fifo_load; 893 } 894 895 pixel_rate = load_state->hvs_load; 896 if (num_outputs > 1) { 897 pixel_rate = (pixel_rate * 40) / 100; 898 } else { 899 pixel_rate = (pixel_rate * 60) / 100; 900 } 901 902 hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate); 903 904 return 0; 905 } 906 907 908 static int 909 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 910 { 911 int ret; 912 913 ret = vc4_pv_muxing_atomic_check(dev, state); 914 if (ret) 915 return ret; 916 917 ret = vc4_ctm_atomic_check(dev, state); 918 if (ret < 0) 919 return ret; 920 921 ret = drm_atomic_helper_check(dev, state); 922 if (ret) 923 return ret; 924 925 ret = vc4_load_tracker_atomic_check(state); 926 if (ret) 927 return ret; 928 929 return vc4_core_clock_atomic_check(state); 930 } 931 932 static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = { 933 .atomic_commit_setup = vc4_atomic_commit_setup, 934 .atomic_commit_tail = vc4_atomic_commit_tail, 935 }; 936 937 static const struct drm_mode_config_funcs vc4_mode_funcs = { 938 .atomic_check = vc4_atomic_check, 939 .atomic_commit = drm_atomic_helper_commit, 940 .fb_create = vc4_fb_create, 941 }; 942 943 int vc4_kms_load(struct drm_device *dev) 944 { 945 struct vc4_dev *vc4 = to_vc4_dev(dev); 946 bool is_vc5 = of_device_is_compatible(dev->dev->of_node, 947 "brcm,bcm2711-vc5"); 948 int ret; 949 950 /* 951 * The limits enforced by the load tracker aren't relevant for 952 * the BCM2711, but the load tracker computations are used for 953 * the core clock rate calculation. 954 */ 955 if (!is_vc5) { 956 /* Start with the load tracker enabled. Can be 957 * disabled through the debugfs load_tracker file. 958 */ 959 vc4->load_tracker_enabled = true; 960 } 961 962 /* Set support for vblank irq fast disable, before drm_vblank_init() */ 963 dev->vblank_disable_immediate = true; 964 965 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 966 if (ret < 0) { 967 dev_err(dev->dev, "failed to initialize vblank\n"); 968 return ret; 969 } 970 971 if (is_vc5) { 972 dev->mode_config.max_width = 7680; 973 dev->mode_config.max_height = 7680; 974 } else { 975 dev->mode_config.max_width = 2048; 976 dev->mode_config.max_height = 2048; 977 } 978 979 dev->mode_config.funcs = &vc4_mode_funcs; 980 dev->mode_config.helper_private = &vc4_mode_config_helpers; 981 dev->mode_config.preferred_depth = 24; 982 dev->mode_config.async_page_flip = true; 983 984 ret = vc4_ctm_obj_init(vc4); 985 if (ret) 986 return ret; 987 988 ret = vc4_load_tracker_obj_init(vc4); 989 if (ret) 990 return ret; 991 992 ret = vc4_hvs_channels_obj_init(vc4); 993 if (ret) 994 return ret; 995 996 drm_mode_config_reset(dev); 997 998 drm_kms_helper_poll_init(dev); 999 1000 return 0; 1001 } 1002