1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2015 Broadcom 4 */ 5 6 /** 7 * DOC: VC4 KMS 8 * 9 * This is the general code for implementing KMS mode setting that 10 * doesn't clearly associate with any of the other objects (plane, 11 * crtc, HDMI encoder). 12 */ 13 14 #include <linux/clk.h> 15 16 #include <drm/drm_atomic.h> 17 #include <drm/drm_atomic_helper.h> 18 #include <drm/drm_crtc.h> 19 #include <drm/drm_fourcc.h> 20 #include <drm/drm_gem_framebuffer_helper.h> 21 #include <drm/drm_plane_helper.h> 22 #include <drm/drm_probe_helper.h> 23 #include <drm/drm_vblank.h> 24 25 #include "vc4_drv.h" 26 #include "vc4_regs.h" 27 28 #define HVS_NUM_CHANNELS 3 29 30 struct vc4_ctm_state { 31 struct drm_private_state base; 32 struct drm_color_ctm *ctm; 33 int fifo; 34 }; 35 36 static struct vc4_ctm_state * 37 to_vc4_ctm_state(const struct drm_private_state *priv) 38 { 39 return container_of(priv, struct vc4_ctm_state, base); 40 } 41 42 struct vc4_hvs_state { 43 struct drm_private_state base; 44 unsigned long core_clock_rate; 45 46 struct { 47 unsigned in_use: 1; 48 unsigned long fifo_load; 49 struct drm_crtc_commit *pending_commit; 50 } fifo_state[HVS_NUM_CHANNELS]; 51 }; 52 53 static struct vc4_hvs_state * 54 to_vc4_hvs_state(const struct drm_private_state *priv) 55 { 56 return container_of(priv, struct vc4_hvs_state, base); 57 } 58 59 struct vc4_load_tracker_state { 60 struct drm_private_state base; 61 u64 hvs_load; 62 u64 membus_load; 63 }; 64 65 static struct vc4_load_tracker_state * 66 to_vc4_load_tracker_state(const struct drm_private_state *priv) 67 { 68 return container_of(priv, struct vc4_load_tracker_state, base); 69 } 70 71 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state, 72 struct drm_private_obj *manager) 73 { 74 struct drm_device *dev = state->dev; 75 struct vc4_dev *vc4 = to_vc4_dev(dev); 76 struct drm_private_state *priv_state; 77 int ret; 78 79 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx); 80 if (ret) 81 return ERR_PTR(ret); 82 83 priv_state = drm_atomic_get_private_obj_state(state, manager); 84 if (IS_ERR(priv_state)) 85 return ERR_CAST(priv_state); 86 87 return to_vc4_ctm_state(priv_state); 88 } 89 90 static struct drm_private_state * 91 vc4_ctm_duplicate_state(struct drm_private_obj *obj) 92 { 93 struct vc4_ctm_state *state; 94 95 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 96 if (!state) 97 return NULL; 98 99 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 100 101 return &state->base; 102 } 103 104 static void vc4_ctm_destroy_state(struct drm_private_obj *obj, 105 struct drm_private_state *state) 106 { 107 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state); 108 109 kfree(ctm_state); 110 } 111 112 static const struct drm_private_state_funcs vc4_ctm_state_funcs = { 113 .atomic_duplicate_state = vc4_ctm_duplicate_state, 114 .atomic_destroy_state = vc4_ctm_destroy_state, 115 }; 116 117 static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused) 118 { 119 struct vc4_dev *vc4 = to_vc4_dev(dev); 120 121 drm_atomic_private_obj_fini(&vc4->ctm_manager); 122 } 123 124 static int vc4_ctm_obj_init(struct vc4_dev *vc4) 125 { 126 struct vc4_ctm_state *ctm_state; 127 128 drm_modeset_lock_init(&vc4->ctm_state_lock); 129 130 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); 131 if (!ctm_state) 132 return -ENOMEM; 133 134 drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base, 135 &vc4_ctm_state_funcs); 136 137 return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL); 138 } 139 140 /* Converts a DRM S31.32 value to the HW S0.9 format. */ 141 static u16 vc4_ctm_s31_32_to_s0_9(u64 in) 142 { 143 u16 r; 144 145 /* Sign bit. */ 146 r = in & BIT_ULL(63) ? BIT(9) : 0; 147 148 if ((in & GENMASK_ULL(62, 32)) > 0) { 149 /* We have zero integer bits so we can only saturate here. */ 150 r |= GENMASK(8, 0); 151 } else { 152 /* Otherwise take the 9 most important fractional bits. */ 153 r |= (in >> 23) & GENMASK(8, 0); 154 } 155 156 return r; 157 } 158 159 static void 160 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) 161 { 162 struct vc4_hvs *hvs = vc4->hvs; 163 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state); 164 struct drm_color_ctm *ctm = ctm_state->ctm; 165 166 if (ctm_state->fifo) { 167 HVS_WRITE(SCALER_OLEDCOEF2, 168 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]), 169 SCALER_OLEDCOEF2_R_TO_R) | 170 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]), 171 SCALER_OLEDCOEF2_R_TO_G) | 172 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]), 173 SCALER_OLEDCOEF2_R_TO_B)); 174 HVS_WRITE(SCALER_OLEDCOEF1, 175 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]), 176 SCALER_OLEDCOEF1_G_TO_R) | 177 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]), 178 SCALER_OLEDCOEF1_G_TO_G) | 179 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]), 180 SCALER_OLEDCOEF1_G_TO_B)); 181 HVS_WRITE(SCALER_OLEDCOEF0, 182 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]), 183 SCALER_OLEDCOEF0_B_TO_R) | 184 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]), 185 SCALER_OLEDCOEF0_B_TO_G) | 186 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]), 187 SCALER_OLEDCOEF0_B_TO_B)); 188 } 189 190 HVS_WRITE(SCALER_OLEDOFFS, 191 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); 192 } 193 194 static struct vc4_hvs_state * 195 vc4_hvs_get_new_global_state(struct drm_atomic_state *state) 196 { 197 struct vc4_dev *vc4 = to_vc4_dev(state->dev); 198 struct drm_private_state *priv_state; 199 200 priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels); 201 if (IS_ERR(priv_state)) 202 return ERR_CAST(priv_state); 203 204 return to_vc4_hvs_state(priv_state); 205 } 206 207 static struct vc4_hvs_state * 208 vc4_hvs_get_old_global_state(struct drm_atomic_state *state) 209 { 210 struct vc4_dev *vc4 = to_vc4_dev(state->dev); 211 struct drm_private_state *priv_state; 212 213 priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels); 214 if (IS_ERR(priv_state)) 215 return ERR_CAST(priv_state); 216 217 return to_vc4_hvs_state(priv_state); 218 } 219 220 static struct vc4_hvs_state * 221 vc4_hvs_get_global_state(struct drm_atomic_state *state) 222 { 223 struct vc4_dev *vc4 = to_vc4_dev(state->dev); 224 struct drm_private_state *priv_state; 225 226 priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels); 227 if (IS_ERR(priv_state)) 228 return ERR_CAST(priv_state); 229 230 return to_vc4_hvs_state(priv_state); 231 } 232 233 static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, 234 struct drm_atomic_state *state) 235 { 236 struct vc4_hvs *hvs = vc4->hvs; 237 struct drm_crtc_state *crtc_state; 238 struct drm_crtc *crtc; 239 unsigned int i; 240 241 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 242 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 243 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); 244 u32 dispctrl; 245 u32 dsp3_mux; 246 247 if (!crtc_state->active) 248 continue; 249 250 if (vc4_state->assigned_channel != 2) 251 continue; 252 253 /* 254 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to 255 * FIFO X'. 256 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'. 257 * 258 * DSP3 is connected to FIFO2 unless the transposer is 259 * enabled. In this case, FIFO 2 is directly accessed by the 260 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1 261 * route. 262 */ 263 if (vc4_crtc->feeds_txp) 264 dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX); 265 else 266 dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX); 267 268 dispctrl = HVS_READ(SCALER_DISPCTRL) & 269 ~SCALER_DISPCTRL_DSP3_MUX_MASK; 270 HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux); 271 } 272 } 273 274 static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, 275 struct drm_atomic_state *state) 276 { 277 struct vc4_hvs *hvs = vc4->hvs; 278 struct drm_crtc_state *crtc_state; 279 struct drm_crtc *crtc; 280 unsigned char mux; 281 unsigned int i; 282 u32 reg; 283 284 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 285 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); 286 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 287 unsigned int channel = vc4_state->assigned_channel; 288 289 if (!vc4_state->update_muxing) 290 continue; 291 292 switch (vc4_crtc->data->hvs_output) { 293 case 2: 294 drm_WARN_ON(&vc4->base, 295 VC4_GET_FIELD(HVS_READ(SCALER_DISPCTRL), 296 SCALER_DISPCTRL_DSP3_MUX) == channel); 297 298 mux = (channel == 2) ? 0 : 1; 299 reg = HVS_READ(SCALER_DISPECTRL); 300 HVS_WRITE(SCALER_DISPECTRL, 301 (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | 302 VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX)); 303 break; 304 305 case 3: 306 if (channel == VC4_HVS_CHANNEL_DISABLED) 307 mux = 3; 308 else 309 mux = channel; 310 311 reg = HVS_READ(SCALER_DISPCTRL); 312 HVS_WRITE(SCALER_DISPCTRL, 313 (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | 314 VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX)); 315 break; 316 317 case 4: 318 if (channel == VC4_HVS_CHANNEL_DISABLED) 319 mux = 3; 320 else 321 mux = channel; 322 323 reg = HVS_READ(SCALER_DISPEOLN); 324 HVS_WRITE(SCALER_DISPEOLN, 325 (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | 326 VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX)); 327 328 break; 329 330 case 5: 331 if (channel == VC4_HVS_CHANNEL_DISABLED) 332 mux = 3; 333 else 334 mux = channel; 335 336 reg = HVS_READ(SCALER_DISPDITHER); 337 HVS_WRITE(SCALER_DISPDITHER, 338 (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | 339 VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX)); 340 break; 341 342 default: 343 break; 344 } 345 } 346 } 347 348 static void vc4_atomic_commit_tail(struct drm_atomic_state *state) 349 { 350 struct drm_device *dev = state->dev; 351 struct vc4_dev *vc4 = to_vc4_dev(dev); 352 struct vc4_hvs *hvs = vc4->hvs; 353 struct drm_crtc_state *new_crtc_state; 354 struct vc4_hvs_state *new_hvs_state; 355 struct drm_crtc *crtc; 356 struct vc4_hvs_state *old_hvs_state; 357 unsigned int channel; 358 int i; 359 360 old_hvs_state = vc4_hvs_get_old_global_state(state); 361 if (WARN_ON(IS_ERR(old_hvs_state))) 362 return; 363 364 new_hvs_state = vc4_hvs_get_new_global_state(state); 365 if (WARN_ON(IS_ERR(new_hvs_state))) 366 return; 367 368 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 369 struct vc4_crtc_state *vc4_crtc_state; 370 371 if (!new_crtc_state->commit) 372 continue; 373 374 vc4_crtc_state = to_vc4_crtc_state(new_crtc_state); 375 vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel); 376 } 377 378 for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) { 379 struct drm_crtc_commit *commit; 380 int ret; 381 382 if (!old_hvs_state->fifo_state[channel].in_use) 383 continue; 384 385 commit = old_hvs_state->fifo_state[channel].pending_commit; 386 if (!commit) 387 continue; 388 389 ret = drm_crtc_commit_wait(commit); 390 if (ret) 391 drm_err(dev, "Timed out waiting for commit\n"); 392 393 drm_crtc_commit_put(commit); 394 old_hvs_state->fifo_state[channel].pending_commit = NULL; 395 } 396 397 if (vc4->is_vc5) { 398 unsigned long state_rate = max(old_hvs_state->core_clock_rate, 399 new_hvs_state->core_clock_rate); 400 unsigned long core_rate = max_t(unsigned long, 401 500000000, state_rate); 402 403 drm_dbg(dev, "Raising the core clock at %lu Hz\n", core_rate); 404 405 /* 406 * Do a temporary request on the core clock during the 407 * modeset. 408 */ 409 WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate)); 410 } 411 412 drm_atomic_helper_commit_modeset_disables(dev, state); 413 414 vc4_ctm_commit(vc4, state); 415 416 if (vc4->is_vc5) 417 vc5_hvs_pv_muxing_commit(vc4, state); 418 else 419 vc4_hvs_pv_muxing_commit(vc4, state); 420 421 drm_atomic_helper_commit_planes(dev, state, 422 DRM_PLANE_COMMIT_ACTIVE_ONLY); 423 424 drm_atomic_helper_commit_modeset_enables(dev, state); 425 426 drm_atomic_helper_fake_vblank(state); 427 428 drm_atomic_helper_commit_hw_done(state); 429 430 drm_atomic_helper_wait_for_flip_done(dev, state); 431 432 drm_atomic_helper_cleanup_planes(dev, state); 433 434 if (vc4->is_vc5) { 435 drm_dbg(dev, "Running the core clock at %lu Hz\n", 436 new_hvs_state->core_clock_rate); 437 438 /* 439 * Request a clock rate based on the current HVS 440 * requirements. 441 */ 442 WARN_ON(clk_set_min_rate(hvs->core_clk, new_hvs_state->core_clock_rate)); 443 444 drm_dbg(dev, "Core clock actual rate: %lu Hz\n", 445 clk_get_rate(hvs->core_clk)); 446 } 447 } 448 449 static int vc4_atomic_commit_setup(struct drm_atomic_state *state) 450 { 451 struct drm_crtc_state *crtc_state; 452 struct vc4_hvs_state *hvs_state; 453 struct drm_crtc *crtc; 454 unsigned int i; 455 456 hvs_state = vc4_hvs_get_new_global_state(state); 457 if (WARN_ON(IS_ERR(hvs_state))) 458 return PTR_ERR(hvs_state); 459 460 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 461 struct vc4_crtc_state *vc4_crtc_state = 462 to_vc4_crtc_state(crtc_state); 463 unsigned int channel = 464 vc4_crtc_state->assigned_channel; 465 466 if (channel == VC4_HVS_CHANNEL_DISABLED) 467 continue; 468 469 if (!hvs_state->fifo_state[channel].in_use) 470 continue; 471 472 hvs_state->fifo_state[channel].pending_commit = 473 drm_crtc_commit_get(crtc_state->commit); 474 } 475 476 return 0; 477 } 478 479 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, 480 struct drm_file *file_priv, 481 const struct drm_mode_fb_cmd2 *mode_cmd) 482 { 483 struct vc4_dev *vc4 = to_vc4_dev(dev); 484 struct drm_mode_fb_cmd2 mode_cmd_local; 485 486 if (WARN_ON_ONCE(vc4->is_vc5)) 487 return ERR_PTR(-ENODEV); 488 489 /* If the user didn't specify a modifier, use the 490 * vc4_set_tiling_ioctl() state for the BO. 491 */ 492 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) { 493 struct drm_gem_object *gem_obj; 494 struct vc4_bo *bo; 495 496 gem_obj = drm_gem_object_lookup(file_priv, 497 mode_cmd->handles[0]); 498 if (!gem_obj) { 499 DRM_DEBUG("Failed to look up GEM BO %d\n", 500 mode_cmd->handles[0]); 501 return ERR_PTR(-ENOENT); 502 } 503 bo = to_vc4_bo(gem_obj); 504 505 mode_cmd_local = *mode_cmd; 506 507 if (bo->t_format) { 508 mode_cmd_local.modifier[0] = 509 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; 510 } else { 511 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE; 512 } 513 514 drm_gem_object_put(gem_obj); 515 516 mode_cmd = &mode_cmd_local; 517 } 518 519 return drm_gem_fb_create(dev, file_priv, mode_cmd); 520 } 521 522 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC 523 * at a time and the HW only supports S0.9 scalars. To account for the latter, 524 * we don't allow userland to set a CTM that we have no hope of approximating. 525 */ 526 static int 527 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 528 { 529 struct vc4_dev *vc4 = to_vc4_dev(dev); 530 struct vc4_ctm_state *ctm_state = NULL; 531 struct drm_crtc *crtc; 532 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 533 struct drm_color_ctm *ctm; 534 int i; 535 536 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 537 /* CTM is being disabled. */ 538 if (!new_crtc_state->ctm && old_crtc_state->ctm) { 539 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 540 if (IS_ERR(ctm_state)) 541 return PTR_ERR(ctm_state); 542 ctm_state->fifo = 0; 543 } 544 } 545 546 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 547 if (new_crtc_state->ctm == old_crtc_state->ctm) 548 continue; 549 550 if (!ctm_state) { 551 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 552 if (IS_ERR(ctm_state)) 553 return PTR_ERR(ctm_state); 554 } 555 556 /* CTM is being enabled or the matrix changed. */ 557 if (new_crtc_state->ctm) { 558 struct vc4_crtc_state *vc4_crtc_state = 559 to_vc4_crtc_state(new_crtc_state); 560 561 /* fifo is 1-based since 0 disables CTM. */ 562 int fifo = vc4_crtc_state->assigned_channel + 1; 563 564 /* Check userland isn't trying to turn on CTM for more 565 * than one CRTC at a time. 566 */ 567 if (ctm_state->fifo && ctm_state->fifo != fifo) { 568 DRM_DEBUG_DRIVER("Too many CTM configured\n"); 569 return -EINVAL; 570 } 571 572 /* Check we can approximate the specified CTM. 573 * We disallow scalars |c| > 1.0 since the HW has 574 * no integer bits. 575 */ 576 ctm = new_crtc_state->ctm->data; 577 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) { 578 u64 val = ctm->matrix[i]; 579 580 val &= ~BIT_ULL(63); 581 if (val > BIT_ULL(32)) 582 return -EINVAL; 583 } 584 585 ctm_state->fifo = fifo; 586 ctm_state->ctm = ctm; 587 } 588 } 589 590 return 0; 591 } 592 593 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) 594 { 595 struct drm_plane_state *old_plane_state, *new_plane_state; 596 struct vc4_dev *vc4 = to_vc4_dev(state->dev); 597 struct vc4_load_tracker_state *load_state; 598 struct drm_private_state *priv_state; 599 struct drm_plane *plane; 600 int i; 601 602 priv_state = drm_atomic_get_private_obj_state(state, 603 &vc4->load_tracker); 604 if (IS_ERR(priv_state)) 605 return PTR_ERR(priv_state); 606 607 load_state = to_vc4_load_tracker_state(priv_state); 608 for_each_oldnew_plane_in_state(state, plane, old_plane_state, 609 new_plane_state, i) { 610 struct vc4_plane_state *vc4_plane_state; 611 612 if (old_plane_state->fb && old_plane_state->crtc) { 613 vc4_plane_state = to_vc4_plane_state(old_plane_state); 614 load_state->membus_load -= vc4_plane_state->membus_load; 615 load_state->hvs_load -= vc4_plane_state->hvs_load; 616 } 617 618 if (new_plane_state->fb && new_plane_state->crtc) { 619 vc4_plane_state = to_vc4_plane_state(new_plane_state); 620 load_state->membus_load += vc4_plane_state->membus_load; 621 load_state->hvs_load += vc4_plane_state->hvs_load; 622 } 623 } 624 625 /* Don't check the load when the tracker is disabled. */ 626 if (!vc4->load_tracker_enabled) 627 return 0; 628 629 /* The absolute limit is 2Gbyte/sec, but let's take a margin to let 630 * the system work when other blocks are accessing the memory. 631 */ 632 if (load_state->membus_load > SZ_1G + SZ_512M) 633 return -ENOSPC; 634 635 /* HVS clock is supposed to run @ 250Mhz, let's take a margin and 636 * consider the maximum number of cycles is 240M. 637 */ 638 if (load_state->hvs_load > 240000000ULL) 639 return -ENOSPC; 640 641 return 0; 642 } 643 644 static struct drm_private_state * 645 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj) 646 { 647 struct vc4_load_tracker_state *state; 648 649 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 650 if (!state) 651 return NULL; 652 653 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 654 655 return &state->base; 656 } 657 658 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj, 659 struct drm_private_state *state) 660 { 661 struct vc4_load_tracker_state *load_state; 662 663 load_state = to_vc4_load_tracker_state(state); 664 kfree(load_state); 665 } 666 667 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { 668 .atomic_duplicate_state = vc4_load_tracker_duplicate_state, 669 .atomic_destroy_state = vc4_load_tracker_destroy_state, 670 }; 671 672 static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused) 673 { 674 struct vc4_dev *vc4 = to_vc4_dev(dev); 675 676 drm_atomic_private_obj_fini(&vc4->load_tracker); 677 } 678 679 static int vc4_load_tracker_obj_init(struct vc4_dev *vc4) 680 { 681 struct vc4_load_tracker_state *load_state; 682 683 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); 684 if (!load_state) 685 return -ENOMEM; 686 687 drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker, 688 &load_state->base, 689 &vc4_load_tracker_state_funcs); 690 691 return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL); 692 } 693 694 static struct drm_private_state * 695 vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj) 696 { 697 struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state); 698 struct vc4_hvs_state *state; 699 unsigned int i; 700 701 state = kzalloc(sizeof(*state), GFP_KERNEL); 702 if (!state) 703 return NULL; 704 705 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 706 707 for (i = 0; i < HVS_NUM_CHANNELS; i++) { 708 state->fifo_state[i].in_use = old_state->fifo_state[i].in_use; 709 state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load; 710 } 711 712 state->core_clock_rate = old_state->core_clock_rate; 713 714 return &state->base; 715 } 716 717 static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj, 718 struct drm_private_state *state) 719 { 720 struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state); 721 unsigned int i; 722 723 for (i = 0; i < HVS_NUM_CHANNELS; i++) { 724 if (!hvs_state->fifo_state[i].pending_commit) 725 continue; 726 727 drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit); 728 } 729 730 kfree(hvs_state); 731 } 732 733 static void vc4_hvs_channels_print_state(struct drm_printer *p, 734 const struct drm_private_state *state) 735 { 736 struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state); 737 unsigned int i; 738 739 drm_printf(p, "HVS State\n"); 740 drm_printf(p, "\tCore Clock Rate: %lu\n", hvs_state->core_clock_rate); 741 742 for (i = 0; i < HVS_NUM_CHANNELS; i++) { 743 drm_printf(p, "\tChannel %d\n", i); 744 drm_printf(p, "\t\tin use=%d\n", hvs_state->fifo_state[i].in_use); 745 drm_printf(p, "\t\tload=%lu\n", hvs_state->fifo_state[i].fifo_load); 746 } 747 } 748 749 static const struct drm_private_state_funcs vc4_hvs_state_funcs = { 750 .atomic_duplicate_state = vc4_hvs_channels_duplicate_state, 751 .atomic_destroy_state = vc4_hvs_channels_destroy_state, 752 .atomic_print_state = vc4_hvs_channels_print_state, 753 }; 754 755 static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused) 756 { 757 struct vc4_dev *vc4 = to_vc4_dev(dev); 758 759 drm_atomic_private_obj_fini(&vc4->hvs_channels); 760 } 761 762 static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4) 763 { 764 struct vc4_hvs_state *state; 765 766 state = kzalloc(sizeof(*state), GFP_KERNEL); 767 if (!state) 768 return -ENOMEM; 769 770 drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels, 771 &state->base, 772 &vc4_hvs_state_funcs); 773 774 return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL); 775 } 776 777 /* 778 * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and 779 * the TXP (and therefore all the CRTCs found on that platform). 780 * 781 * The naive (and our initial) implementation would just iterate over 782 * all the active CRTCs, try to find a suitable FIFO, and then remove it 783 * from the pool of available FIFOs. However, there are a few corner 784 * cases that need to be considered: 785 * 786 * - When running in a dual-display setup (so with two CRTCs involved), 787 * we can update the state of a single CRTC (for example by changing 788 * its mode using xrandr under X11) without affecting the other. In 789 * this case, the other CRTC wouldn't be in the state at all, so we 790 * need to consider all the running CRTCs in the DRM device to assign 791 * a FIFO, not just the one in the state. 792 * 793 * - To fix the above, we can't use drm_atomic_get_crtc_state on all 794 * enabled CRTCs to pull their CRTC state into the global state, since 795 * a page flip would start considering their vblank to complete. Since 796 * we don't have a guarantee that they are actually active, that 797 * vblank might never happen, and shouldn't even be considered if we 798 * want to do a page flip on a single CRTC. That can be tested by 799 * doing a modetest -v first on HDMI1 and then on HDMI0. 800 * 801 * - Since we need the pixelvalve to be disabled and enabled back when 802 * the FIFO is changed, we should keep the FIFO assigned for as long 803 * as the CRTC is enabled, only considering it free again once that 804 * CRTC has been disabled. This can be tested by booting X11 on a 805 * single display, and changing the resolution down and then back up. 806 */ 807 static int vc4_pv_muxing_atomic_check(struct drm_device *dev, 808 struct drm_atomic_state *state) 809 { 810 struct vc4_hvs_state *hvs_new_state; 811 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 812 struct drm_crtc *crtc; 813 unsigned int unassigned_channels = 0; 814 unsigned int i; 815 816 hvs_new_state = vc4_hvs_get_global_state(state); 817 if (IS_ERR(hvs_new_state)) 818 return PTR_ERR(hvs_new_state); 819 820 for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++) 821 if (!hvs_new_state->fifo_state[i].in_use) 822 unassigned_channels |= BIT(i); 823 824 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 825 struct vc4_crtc_state *old_vc4_crtc_state = 826 to_vc4_crtc_state(old_crtc_state); 827 struct vc4_crtc_state *new_vc4_crtc_state = 828 to_vc4_crtc_state(new_crtc_state); 829 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 830 unsigned int matching_channels; 831 unsigned int channel; 832 833 drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name); 834 835 /* Nothing to do here, let's skip it */ 836 if (old_crtc_state->enable == new_crtc_state->enable) { 837 if (new_crtc_state->enable) 838 drm_dbg(dev, "%s: Already enabled, reusing channel %d.\n", 839 crtc->name, new_vc4_crtc_state->assigned_channel); 840 else 841 drm_dbg(dev, "%s: Disabled, ignoring.\n", crtc->name); 842 843 continue; 844 } 845 846 /* Muxing will need to be modified, mark it as such */ 847 new_vc4_crtc_state->update_muxing = true; 848 849 /* If we're disabling our CRTC, we put back our channel */ 850 if (!new_crtc_state->enable) { 851 channel = old_vc4_crtc_state->assigned_channel; 852 853 drm_dbg(dev, "%s: Disabling, Freeing channel %d\n", 854 crtc->name, channel); 855 856 hvs_new_state->fifo_state[channel].in_use = false; 857 new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; 858 continue; 859 } 860 861 /* 862 * The problem we have to solve here is that we have 863 * up to 7 encoders, connected to up to 6 CRTCs. 864 * 865 * Those CRTCs, depending on the instance, can be 866 * routed to 1, 2 or 3 HVS FIFOs, and we need to set 867 * the change the muxing between FIFOs and outputs in 868 * the HVS accordingly. 869 * 870 * It would be pretty hard to come up with an 871 * algorithm that would generically solve 872 * this. However, the current routing trees we support 873 * allow us to simplify a bit the problem. 874 * 875 * Indeed, with the current supported layouts, if we 876 * try to assign in the ascending crtc index order the 877 * FIFOs, we can't fall into the situation where an 878 * earlier CRTC that had multiple routes is assigned 879 * one that was the only option for a later CRTC. 880 * 881 * If the layout changes and doesn't give us that in 882 * the future, we will need to have something smarter, 883 * but it works so far. 884 */ 885 matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels; 886 if (!matching_channels) 887 return -EINVAL; 888 889 channel = ffs(matching_channels) - 1; 890 891 drm_dbg(dev, "Assigned HVS channel %d to CRTC %s\n", channel, crtc->name); 892 new_vc4_crtc_state->assigned_channel = channel; 893 unassigned_channels &= ~BIT(channel); 894 hvs_new_state->fifo_state[channel].in_use = true; 895 } 896 897 return 0; 898 } 899 900 static int 901 vc4_core_clock_atomic_check(struct drm_atomic_state *state) 902 { 903 struct vc4_dev *vc4 = to_vc4_dev(state->dev); 904 struct drm_private_state *priv_state; 905 struct vc4_hvs_state *hvs_new_state; 906 struct vc4_load_tracker_state *load_state; 907 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 908 struct drm_crtc *crtc; 909 unsigned int num_outputs; 910 unsigned long pixel_rate; 911 unsigned long cob_rate; 912 unsigned int i; 913 914 priv_state = drm_atomic_get_private_obj_state(state, 915 &vc4->load_tracker); 916 if (IS_ERR(priv_state)) 917 return PTR_ERR(priv_state); 918 919 load_state = to_vc4_load_tracker_state(priv_state); 920 921 hvs_new_state = vc4_hvs_get_global_state(state); 922 if (IS_ERR(hvs_new_state)) 923 return PTR_ERR(hvs_new_state); 924 925 for_each_oldnew_crtc_in_state(state, crtc, 926 old_crtc_state, 927 new_crtc_state, 928 i) { 929 if (old_crtc_state->active) { 930 struct vc4_crtc_state *old_vc4_state = 931 to_vc4_crtc_state(old_crtc_state); 932 unsigned int channel = old_vc4_state->assigned_channel; 933 934 hvs_new_state->fifo_state[channel].fifo_load = 0; 935 } 936 937 if (new_crtc_state->active) { 938 struct vc4_crtc_state *new_vc4_state = 939 to_vc4_crtc_state(new_crtc_state); 940 unsigned int channel = new_vc4_state->assigned_channel; 941 942 hvs_new_state->fifo_state[channel].fifo_load = 943 new_vc4_state->hvs_load; 944 } 945 } 946 947 cob_rate = 0; 948 num_outputs = 0; 949 for (i = 0; i < HVS_NUM_CHANNELS; i++) { 950 if (!hvs_new_state->fifo_state[i].in_use) 951 continue; 952 953 num_outputs++; 954 cob_rate = max_t(unsigned long, 955 hvs_new_state->fifo_state[i].fifo_load, 956 cob_rate); 957 } 958 959 pixel_rate = load_state->hvs_load; 960 if (num_outputs > 1) { 961 pixel_rate = (pixel_rate * 40) / 100; 962 } else { 963 pixel_rate = (pixel_rate * 60) / 100; 964 } 965 966 hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate); 967 968 return 0; 969 } 970 971 972 static int 973 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 974 { 975 int ret; 976 977 ret = vc4_pv_muxing_atomic_check(dev, state); 978 if (ret) 979 return ret; 980 981 ret = vc4_ctm_atomic_check(dev, state); 982 if (ret < 0) 983 return ret; 984 985 ret = drm_atomic_helper_check(dev, state); 986 if (ret) 987 return ret; 988 989 ret = vc4_load_tracker_atomic_check(state); 990 if (ret) 991 return ret; 992 993 return vc4_core_clock_atomic_check(state); 994 } 995 996 static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = { 997 .atomic_commit_setup = vc4_atomic_commit_setup, 998 .atomic_commit_tail = vc4_atomic_commit_tail, 999 }; 1000 1001 static const struct drm_mode_config_funcs vc4_mode_funcs = { 1002 .atomic_check = vc4_atomic_check, 1003 .atomic_commit = drm_atomic_helper_commit, 1004 .fb_create = vc4_fb_create, 1005 }; 1006 1007 static const struct drm_mode_config_funcs vc5_mode_funcs = { 1008 .atomic_check = vc4_atomic_check, 1009 .atomic_commit = drm_atomic_helper_commit, 1010 .fb_create = drm_gem_fb_create, 1011 }; 1012 1013 int vc4_kms_load(struct drm_device *dev) 1014 { 1015 struct vc4_dev *vc4 = to_vc4_dev(dev); 1016 int ret; 1017 1018 /* 1019 * The limits enforced by the load tracker aren't relevant for 1020 * the BCM2711, but the load tracker computations are used for 1021 * the core clock rate calculation. 1022 */ 1023 if (!vc4->is_vc5) { 1024 /* Start with the load tracker enabled. Can be 1025 * disabled through the debugfs load_tracker file. 1026 */ 1027 vc4->load_tracker_enabled = true; 1028 } 1029 1030 /* Set support for vblank irq fast disable, before drm_vblank_init() */ 1031 dev->vblank_disable_immediate = true; 1032 1033 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 1034 if (ret < 0) { 1035 dev_err(dev->dev, "failed to initialize vblank\n"); 1036 return ret; 1037 } 1038 1039 if (vc4->is_vc5) { 1040 dev->mode_config.max_width = 7680; 1041 dev->mode_config.max_height = 7680; 1042 } else { 1043 dev->mode_config.max_width = 2048; 1044 dev->mode_config.max_height = 2048; 1045 } 1046 1047 dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs; 1048 dev->mode_config.helper_private = &vc4_mode_config_helpers; 1049 dev->mode_config.preferred_depth = 24; 1050 dev->mode_config.async_page_flip = true; 1051 1052 ret = vc4_ctm_obj_init(vc4); 1053 if (ret) 1054 return ret; 1055 1056 ret = vc4_load_tracker_obj_init(vc4); 1057 if (ret) 1058 return ret; 1059 1060 ret = vc4_hvs_channels_obj_init(vc4); 1061 if (ret) 1062 return ret; 1063 1064 drm_mode_config_reset(dev); 1065 1066 drm_kms_helper_poll_init(dev); 1067 1068 return 0; 1069 } 1070