1 /* 2 * Copyright (C) 2015 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 /** 10 * DOC: VC4 KMS 11 * 12 * This is the general code for implementing KMS mode setting that 13 * doesn't clearly associate with any of the other objects (plane, 14 * crtc, HDMI encoder). 15 */ 16 17 #include <drm/drm_crtc.h> 18 #include <drm/drm_atomic.h> 19 #include <drm/drm_atomic_helper.h> 20 #include <drm/drm_gem_framebuffer_helper.h> 21 #include <drm/drm_plane_helper.h> 22 #include <drm/drm_probe_helper.h> 23 #include "vc4_drv.h" 24 #include "vc4_regs.h" 25 26 struct vc4_ctm_state { 27 struct drm_private_state base; 28 struct drm_color_ctm *ctm; 29 int fifo; 30 }; 31 32 static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv) 33 { 34 return container_of(priv, struct vc4_ctm_state, base); 35 } 36 37 struct vc4_load_tracker_state { 38 struct drm_private_state base; 39 u64 hvs_load; 40 u64 membus_load; 41 }; 42 43 static struct vc4_load_tracker_state * 44 to_vc4_load_tracker_state(struct drm_private_state *priv) 45 { 46 return container_of(priv, struct vc4_load_tracker_state, base); 47 } 48 49 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state, 50 struct drm_private_obj *manager) 51 { 52 struct drm_device *dev = state->dev; 53 struct vc4_dev *vc4 = dev->dev_private; 54 struct drm_private_state *priv_state; 55 int ret; 56 57 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx); 58 if (ret) 59 return ERR_PTR(ret); 60 61 priv_state = drm_atomic_get_private_obj_state(state, manager); 62 if (IS_ERR(priv_state)) 63 return ERR_CAST(priv_state); 64 65 return to_vc4_ctm_state(priv_state); 66 } 67 68 static struct drm_private_state * 69 vc4_ctm_duplicate_state(struct drm_private_obj *obj) 70 { 71 struct vc4_ctm_state *state; 72 73 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 74 if (!state) 75 return NULL; 76 77 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 78 79 return &state->base; 80 } 81 82 static void vc4_ctm_destroy_state(struct drm_private_obj *obj, 83 struct drm_private_state *state) 84 { 85 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state); 86 87 kfree(ctm_state); 88 } 89 90 static const struct drm_private_state_funcs vc4_ctm_state_funcs = { 91 .atomic_duplicate_state = vc4_ctm_duplicate_state, 92 .atomic_destroy_state = vc4_ctm_destroy_state, 93 }; 94 95 /* Converts a DRM S31.32 value to the HW S0.9 format. */ 96 static u16 vc4_ctm_s31_32_to_s0_9(u64 in) 97 { 98 u16 r; 99 100 /* Sign bit. */ 101 r = in & BIT_ULL(63) ? BIT(9) : 0; 102 103 if ((in & GENMASK_ULL(62, 32)) > 0) { 104 /* We have zero integer bits so we can only saturate here. */ 105 r |= GENMASK(8, 0); 106 } else { 107 /* Otherwise take the 9 most important fractional bits. */ 108 r |= (in >> 23) & GENMASK(8, 0); 109 } 110 111 return r; 112 } 113 114 static void 115 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) 116 { 117 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state); 118 struct drm_color_ctm *ctm = ctm_state->ctm; 119 120 if (ctm_state->fifo) { 121 HVS_WRITE(SCALER_OLEDCOEF2, 122 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]), 123 SCALER_OLEDCOEF2_R_TO_R) | 124 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]), 125 SCALER_OLEDCOEF2_R_TO_G) | 126 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]), 127 SCALER_OLEDCOEF2_R_TO_B)); 128 HVS_WRITE(SCALER_OLEDCOEF1, 129 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]), 130 SCALER_OLEDCOEF1_G_TO_R) | 131 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]), 132 SCALER_OLEDCOEF1_G_TO_G) | 133 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]), 134 SCALER_OLEDCOEF1_G_TO_B)); 135 HVS_WRITE(SCALER_OLEDCOEF0, 136 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]), 137 SCALER_OLEDCOEF0_B_TO_R) | 138 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]), 139 SCALER_OLEDCOEF0_B_TO_G) | 140 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]), 141 SCALER_OLEDCOEF0_B_TO_B)); 142 } 143 144 HVS_WRITE(SCALER_OLEDOFFS, 145 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); 146 } 147 148 static void 149 vc4_atomic_complete_commit(struct drm_atomic_state *state) 150 { 151 struct drm_device *dev = state->dev; 152 struct vc4_dev *vc4 = to_vc4_dev(dev); 153 struct vc4_crtc *vc4_crtc; 154 int i; 155 156 for (i = 0; i < dev->mode_config.num_crtc; i++) { 157 if (!state->crtcs[i].ptr || !state->crtcs[i].commit) 158 continue; 159 160 vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr); 161 vc4_hvs_mask_underrun(dev, vc4_crtc->channel); 162 } 163 164 drm_atomic_helper_wait_for_fences(dev, state, false); 165 166 drm_atomic_helper_wait_for_dependencies(state); 167 168 drm_atomic_helper_commit_modeset_disables(dev, state); 169 170 vc4_ctm_commit(vc4, state); 171 172 drm_atomic_helper_commit_planes(dev, state, 0); 173 174 drm_atomic_helper_commit_modeset_enables(dev, state); 175 176 drm_atomic_helper_fake_vblank(state); 177 178 drm_atomic_helper_commit_hw_done(state); 179 180 drm_atomic_helper_wait_for_flip_done(dev, state); 181 182 drm_atomic_helper_cleanup_planes(dev, state); 183 184 drm_atomic_helper_commit_cleanup_done(state); 185 186 drm_atomic_state_put(state); 187 188 up(&vc4->async_modeset); 189 } 190 191 static void commit_work(struct work_struct *work) 192 { 193 struct drm_atomic_state *state = container_of(work, 194 struct drm_atomic_state, 195 commit_work); 196 vc4_atomic_complete_commit(state); 197 } 198 199 /** 200 * vc4_atomic_commit - commit validated state object 201 * @dev: DRM device 202 * @state: the driver state object 203 * @nonblock: nonblocking commit 204 * 205 * This function commits a with drm_atomic_helper_check() pre-validated state 206 * object. This can still fail when e.g. the framebuffer reservation fails. For 207 * now this doesn't implement asynchronous commits. 208 * 209 * RETURNS 210 * Zero for success or -errno. 211 */ 212 static int vc4_atomic_commit(struct drm_device *dev, 213 struct drm_atomic_state *state, 214 bool nonblock) 215 { 216 struct vc4_dev *vc4 = to_vc4_dev(dev); 217 int ret; 218 219 if (state->async_update) { 220 ret = down_interruptible(&vc4->async_modeset); 221 if (ret) 222 return ret; 223 224 ret = drm_atomic_helper_prepare_planes(dev, state); 225 if (ret) { 226 up(&vc4->async_modeset); 227 return ret; 228 } 229 230 drm_atomic_helper_async_commit(dev, state); 231 232 drm_atomic_helper_cleanup_planes(dev, state); 233 234 up(&vc4->async_modeset); 235 236 return 0; 237 } 238 239 /* We know for sure we don't want an async update here. Set 240 * state->legacy_cursor_update to false to prevent 241 * drm_atomic_helper_setup_commit() from auto-completing 242 * commit->flip_done. 243 */ 244 state->legacy_cursor_update = false; 245 ret = drm_atomic_helper_setup_commit(state, nonblock); 246 if (ret) 247 return ret; 248 249 INIT_WORK(&state->commit_work, commit_work); 250 251 ret = down_interruptible(&vc4->async_modeset); 252 if (ret) 253 return ret; 254 255 ret = drm_atomic_helper_prepare_planes(dev, state); 256 if (ret) { 257 up(&vc4->async_modeset); 258 return ret; 259 } 260 261 if (!nonblock) { 262 ret = drm_atomic_helper_wait_for_fences(dev, state, true); 263 if (ret) { 264 drm_atomic_helper_cleanup_planes(dev, state); 265 up(&vc4->async_modeset); 266 return ret; 267 } 268 } 269 270 /* 271 * This is the point of no return - everything below never fails except 272 * when the hw goes bonghits. Which means we can commit the new state on 273 * the software side now. 274 */ 275 276 BUG_ON(drm_atomic_helper_swap_state(state, false) < 0); 277 278 /* 279 * Everything below can be run asynchronously without the need to grab 280 * any modeset locks at all under one condition: It must be guaranteed 281 * that the asynchronous work has either been cancelled (if the driver 282 * supports it, which at least requires that the framebuffers get 283 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed 284 * before the new state gets committed on the software side with 285 * drm_atomic_helper_swap_state(). 286 * 287 * This scheme allows new atomic state updates to be prepared and 288 * checked in parallel to the asynchronous completion of the previous 289 * update. Which is important since compositors need to figure out the 290 * composition of the next frame right after having submitted the 291 * current layout. 292 */ 293 294 drm_atomic_state_get(state); 295 if (nonblock) 296 queue_work(system_unbound_wq, &state->commit_work); 297 else 298 vc4_atomic_complete_commit(state); 299 300 return 0; 301 } 302 303 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, 304 struct drm_file *file_priv, 305 const struct drm_mode_fb_cmd2 *mode_cmd) 306 { 307 struct drm_mode_fb_cmd2 mode_cmd_local; 308 309 /* If the user didn't specify a modifier, use the 310 * vc4_set_tiling_ioctl() state for the BO. 311 */ 312 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) { 313 struct drm_gem_object *gem_obj; 314 struct vc4_bo *bo; 315 316 gem_obj = drm_gem_object_lookup(file_priv, 317 mode_cmd->handles[0]); 318 if (!gem_obj) { 319 DRM_DEBUG("Failed to look up GEM BO %d\n", 320 mode_cmd->handles[0]); 321 return ERR_PTR(-ENOENT); 322 } 323 bo = to_vc4_bo(gem_obj); 324 325 mode_cmd_local = *mode_cmd; 326 327 if (bo->t_format) { 328 mode_cmd_local.modifier[0] = 329 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; 330 } else { 331 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE; 332 } 333 334 drm_gem_object_put_unlocked(gem_obj); 335 336 mode_cmd = &mode_cmd_local; 337 } 338 339 return drm_gem_fb_create(dev, file_priv, mode_cmd); 340 } 341 342 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC 343 * at a time and the HW only supports S0.9 scalars. To account for the latter, 344 * we don't allow userland to set a CTM that we have no hope of approximating. 345 */ 346 static int 347 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 348 { 349 struct vc4_dev *vc4 = to_vc4_dev(dev); 350 struct vc4_ctm_state *ctm_state = NULL; 351 struct drm_crtc *crtc; 352 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 353 struct drm_color_ctm *ctm; 354 int i; 355 356 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 357 /* CTM is being disabled. */ 358 if (!new_crtc_state->ctm && old_crtc_state->ctm) { 359 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 360 if (IS_ERR(ctm_state)) 361 return PTR_ERR(ctm_state); 362 ctm_state->fifo = 0; 363 } 364 } 365 366 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 367 if (new_crtc_state->ctm == old_crtc_state->ctm) 368 continue; 369 370 if (!ctm_state) { 371 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 372 if (IS_ERR(ctm_state)) 373 return PTR_ERR(ctm_state); 374 } 375 376 /* CTM is being enabled or the matrix changed. */ 377 if (new_crtc_state->ctm) { 378 /* fifo is 1-based since 0 disables CTM. */ 379 int fifo = to_vc4_crtc(crtc)->channel + 1; 380 381 /* Check userland isn't trying to turn on CTM for more 382 * than one CRTC at a time. 383 */ 384 if (ctm_state->fifo && ctm_state->fifo != fifo) { 385 DRM_DEBUG_DRIVER("Too many CTM configured\n"); 386 return -EINVAL; 387 } 388 389 /* Check we can approximate the specified CTM. 390 * We disallow scalars |c| > 1.0 since the HW has 391 * no integer bits. 392 */ 393 ctm = new_crtc_state->ctm->data; 394 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) { 395 u64 val = ctm->matrix[i]; 396 397 val &= ~BIT_ULL(63); 398 if (val > BIT_ULL(32)) 399 return -EINVAL; 400 } 401 402 ctm_state->fifo = fifo; 403 ctm_state->ctm = ctm; 404 } 405 } 406 407 return 0; 408 } 409 410 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state) 411 { 412 struct drm_plane_state *old_plane_state, *new_plane_state; 413 struct vc4_dev *vc4 = to_vc4_dev(state->dev); 414 struct vc4_load_tracker_state *load_state; 415 struct drm_private_state *priv_state; 416 struct drm_plane *plane; 417 int i; 418 419 priv_state = drm_atomic_get_private_obj_state(state, 420 &vc4->load_tracker); 421 if (IS_ERR(priv_state)) 422 return PTR_ERR(priv_state); 423 424 load_state = to_vc4_load_tracker_state(priv_state); 425 for_each_oldnew_plane_in_state(state, plane, old_plane_state, 426 new_plane_state, i) { 427 struct vc4_plane_state *vc4_plane_state; 428 429 if (old_plane_state->fb && old_plane_state->crtc) { 430 vc4_plane_state = to_vc4_plane_state(old_plane_state); 431 load_state->membus_load -= vc4_plane_state->membus_load; 432 load_state->hvs_load -= vc4_plane_state->hvs_load; 433 } 434 435 if (new_plane_state->fb && new_plane_state->crtc) { 436 vc4_plane_state = to_vc4_plane_state(new_plane_state); 437 load_state->membus_load += vc4_plane_state->membus_load; 438 load_state->hvs_load += vc4_plane_state->hvs_load; 439 } 440 } 441 442 /* Don't check the load when the tracker is disabled. */ 443 if (!vc4->load_tracker_enabled) 444 return 0; 445 446 /* The absolute limit is 2Gbyte/sec, but let's take a margin to let 447 * the system work when other blocks are accessing the memory. 448 */ 449 if (load_state->membus_load > SZ_1G + SZ_512M) 450 return -ENOSPC; 451 452 /* HVS clock is supposed to run @ 250Mhz, let's take a margin and 453 * consider the maximum number of cycles is 240M. 454 */ 455 if (load_state->hvs_load > 240000000ULL) 456 return -ENOSPC; 457 458 return 0; 459 } 460 461 static struct drm_private_state * 462 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj) 463 { 464 struct vc4_load_tracker_state *state; 465 466 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 467 if (!state) 468 return NULL; 469 470 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 471 472 return &state->base; 473 } 474 475 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj, 476 struct drm_private_state *state) 477 { 478 struct vc4_load_tracker_state *load_state; 479 480 load_state = to_vc4_load_tracker_state(state); 481 kfree(load_state); 482 } 483 484 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { 485 .atomic_duplicate_state = vc4_load_tracker_duplicate_state, 486 .atomic_destroy_state = vc4_load_tracker_destroy_state, 487 }; 488 489 static int 490 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 491 { 492 int ret; 493 494 ret = vc4_ctm_atomic_check(dev, state); 495 if (ret < 0) 496 return ret; 497 498 ret = drm_atomic_helper_check(dev, state); 499 if (ret) 500 return ret; 501 502 return vc4_load_tracker_atomic_check(state); 503 } 504 505 static const struct drm_mode_config_funcs vc4_mode_funcs = { 506 .atomic_check = vc4_atomic_check, 507 .atomic_commit = vc4_atomic_commit, 508 .fb_create = vc4_fb_create, 509 }; 510 511 int vc4_kms_load(struct drm_device *dev) 512 { 513 struct vc4_dev *vc4 = to_vc4_dev(dev); 514 struct vc4_ctm_state *ctm_state; 515 struct vc4_load_tracker_state *load_state; 516 int ret; 517 518 /* Start with the load tracker enabled. Can be disabled through the 519 * debugfs load_tracker file. 520 */ 521 vc4->load_tracker_enabled = true; 522 523 sema_init(&vc4->async_modeset, 1); 524 525 /* Set support for vblank irq fast disable, before drm_vblank_init() */ 526 dev->vblank_disable_immediate = true; 527 528 dev->irq_enabled = true; 529 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 530 if (ret < 0) { 531 dev_err(dev->dev, "failed to initialize vblank\n"); 532 return ret; 533 } 534 535 dev->mode_config.max_width = 2048; 536 dev->mode_config.max_height = 2048; 537 dev->mode_config.funcs = &vc4_mode_funcs; 538 dev->mode_config.preferred_depth = 24; 539 dev->mode_config.async_page_flip = true; 540 dev->mode_config.allow_fb_modifiers = true; 541 542 drm_modeset_lock_init(&vc4->ctm_state_lock); 543 544 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); 545 if (!ctm_state) 546 return -ENOMEM; 547 548 drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base, 549 &vc4_ctm_state_funcs); 550 551 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); 552 if (!load_state) { 553 drm_atomic_private_obj_fini(&vc4->ctm_manager); 554 return -ENOMEM; 555 } 556 557 drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base, 558 &vc4_load_tracker_state_funcs); 559 560 drm_mode_config_reset(dev); 561 562 drm_kms_helper_poll_init(dev); 563 564 return 0; 565 } 566