1 /* 2 * Copyright (C) 2014 Red Hat 3 * Copyright (C) 2014 Intel Corp. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robdclark@gmail.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 */ 27 28 #include <drm/drmP.h> 29 #include <drm/drm_atomic.h> 30 #include <drm/drm_atomic_uapi.h> 31 #include <drm/drm_plane_helper.h> 32 #include <drm/drm_crtc_helper.h> 33 #include <drm/drm_atomic_helper.h> 34 #include <drm/drm_writeback.h> 35 #include <drm/drm_damage_helper.h> 36 #include <linux/dma-fence.h> 37 38 #include "drm_crtc_helper_internal.h" 39 #include "drm_crtc_internal.h" 40 41 /** 42 * DOC: overview 43 * 44 * This helper library provides implementations of check and commit functions on 45 * top of the CRTC modeset helper callbacks and the plane helper callbacks. It 46 * also provides convenience implementations for the atomic state handling 47 * callbacks for drivers which don't need to subclass the drm core structures to 48 * add their own additional internal state. 49 * 50 * This library also provides default implementations for the check callback in 51 * drm_atomic_helper_check() and for the commit callback with 52 * drm_atomic_helper_commit(). But the individual stages and callbacks are 53 * exposed to allow drivers to mix and match and e.g. use the plane helpers only 54 * together with a driver private modeset implementation. 55 * 56 * This library also provides implementations for all the legacy driver 57 * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(), 58 * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the 59 * various functions to implement set_property callbacks. New drivers must not 60 * implement these functions themselves but must use the provided helpers. 61 * 62 * The atomic helper uses the same function table structures as all other 63 * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs, 64 * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It 65 * also shares the &struct drm_plane_helper_funcs function table with the plane 66 * helpers. 67 */ 68 static void 69 drm_atomic_helper_plane_changed(struct drm_atomic_state *state, 70 struct drm_plane_state *old_plane_state, 71 struct drm_plane_state *plane_state, 72 struct drm_plane *plane) 73 { 74 struct drm_crtc_state *crtc_state; 75 76 if (old_plane_state->crtc) { 77 crtc_state = drm_atomic_get_new_crtc_state(state, 78 old_plane_state->crtc); 79 80 if (WARN_ON(!crtc_state)) 81 return; 82 83 crtc_state->planes_changed = true; 84 } 85 86 if (plane_state->crtc) { 87 crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); 88 89 if (WARN_ON(!crtc_state)) 90 return; 91 92 crtc_state->planes_changed = true; 93 } 94 } 95 96 /* 97 * For connectors that support multiple encoders, either the 98 * .atomic_best_encoder() or .best_encoder() operation must be implemented. 99 */ 100 static struct drm_encoder * 101 pick_single_encoder_for_connector(struct drm_connector *connector) 102 { 103 WARN_ON(connector->encoder_ids[1]); 104 return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]); 105 } 106 107 static int handle_conflicting_encoders(struct drm_atomic_state *state, 108 bool disable_conflicting_encoders) 109 { 110 struct drm_connector_state *new_conn_state; 111 struct drm_connector *connector; 112 struct drm_connector_list_iter conn_iter; 113 struct drm_encoder *encoder; 114 unsigned encoder_mask = 0; 115 int i, ret = 0; 116 117 /* 118 * First loop, find all newly assigned encoders from the connectors 119 * part of the state. If the same encoder is assigned to multiple 120 * connectors bail out. 121 */ 122 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 123 const struct drm_connector_helper_funcs *funcs = connector->helper_private; 124 struct drm_encoder *new_encoder; 125 126 if (!new_conn_state->crtc) 127 continue; 128 129 if (funcs->atomic_best_encoder) 130 new_encoder = funcs->atomic_best_encoder(connector, new_conn_state); 131 else if (funcs->best_encoder) 132 new_encoder = funcs->best_encoder(connector); 133 else 134 new_encoder = pick_single_encoder_for_connector(connector); 135 136 if (new_encoder) { 137 if (encoder_mask & drm_encoder_mask(new_encoder)) { 138 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n", 139 new_encoder->base.id, new_encoder->name, 140 connector->base.id, connector->name); 141 142 return -EINVAL; 143 } 144 145 encoder_mask |= drm_encoder_mask(new_encoder); 146 } 147 } 148 149 if (!encoder_mask) 150 return 0; 151 152 /* 153 * Second loop, iterate over all connectors not part of the state. 154 * 155 * If a conflicting encoder is found and disable_conflicting_encoders 156 * is not set, an error is returned. Userspace can provide a solution 157 * through the atomic ioctl. 158 * 159 * If the flag is set conflicting connectors are removed from the crtc 160 * and the crtc is disabled if no encoder is left. This preserves 161 * compatibility with the legacy set_config behavior. 162 */ 163 drm_connector_list_iter_begin(state->dev, &conn_iter); 164 drm_for_each_connector_iter(connector, &conn_iter) { 165 struct drm_crtc_state *crtc_state; 166 167 if (drm_atomic_get_new_connector_state(state, connector)) 168 continue; 169 170 encoder = connector->state->best_encoder; 171 if (!encoder || !(encoder_mask & drm_encoder_mask(encoder))) 172 continue; 173 174 if (!disable_conflicting_encoders) { 175 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n", 176 encoder->base.id, encoder->name, 177 connector->state->crtc->base.id, 178 connector->state->crtc->name, 179 connector->base.id, connector->name); 180 ret = -EINVAL; 181 goto out; 182 } 183 184 new_conn_state = drm_atomic_get_connector_state(state, connector); 185 if (IS_ERR(new_conn_state)) { 186 ret = PTR_ERR(new_conn_state); 187 goto out; 188 } 189 190 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n", 191 encoder->base.id, encoder->name, 192 new_conn_state->crtc->base.id, new_conn_state->crtc->name, 193 connector->base.id, connector->name); 194 195 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 196 197 ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL); 198 if (ret) 199 goto out; 200 201 if (!crtc_state->connector_mask) { 202 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, 203 NULL); 204 if (ret < 0) 205 goto out; 206 207 crtc_state->active = false; 208 } 209 } 210 out: 211 drm_connector_list_iter_end(&conn_iter); 212 213 return ret; 214 } 215 216 static void 217 set_best_encoder(struct drm_atomic_state *state, 218 struct drm_connector_state *conn_state, 219 struct drm_encoder *encoder) 220 { 221 struct drm_crtc_state *crtc_state; 222 struct drm_crtc *crtc; 223 224 if (conn_state->best_encoder) { 225 /* Unset the encoder_mask in the old crtc state. */ 226 crtc = conn_state->connector->state->crtc; 227 228 /* A NULL crtc is an error here because we should have 229 * duplicated a NULL best_encoder when crtc was NULL. 230 * As an exception restoring duplicated atomic state 231 * during resume is allowed, so don't warn when 232 * best_encoder is equal to encoder we intend to set. 233 */ 234 WARN_ON(!crtc && encoder != conn_state->best_encoder); 235 if (crtc) { 236 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 237 238 crtc_state->encoder_mask &= 239 ~drm_encoder_mask(conn_state->best_encoder); 240 } 241 } 242 243 if (encoder) { 244 crtc = conn_state->crtc; 245 WARN_ON(!crtc); 246 if (crtc) { 247 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 248 249 crtc_state->encoder_mask |= 250 drm_encoder_mask(encoder); 251 } 252 } 253 254 conn_state->best_encoder = encoder; 255 } 256 257 static void 258 steal_encoder(struct drm_atomic_state *state, 259 struct drm_encoder *encoder) 260 { 261 struct drm_crtc_state *crtc_state; 262 struct drm_connector *connector; 263 struct drm_connector_state *old_connector_state, *new_connector_state; 264 int i; 265 266 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 267 struct drm_crtc *encoder_crtc; 268 269 if (new_connector_state->best_encoder != encoder) 270 continue; 271 272 encoder_crtc = old_connector_state->crtc; 273 274 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n", 275 encoder->base.id, encoder->name, 276 encoder_crtc->base.id, encoder_crtc->name); 277 278 set_best_encoder(state, new_connector_state, NULL); 279 280 crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc); 281 crtc_state->connectors_changed = true; 282 283 return; 284 } 285 } 286 287 static int 288 update_connector_routing(struct drm_atomic_state *state, 289 struct drm_connector *connector, 290 struct drm_connector_state *old_connector_state, 291 struct drm_connector_state *new_connector_state) 292 { 293 const struct drm_connector_helper_funcs *funcs; 294 struct drm_encoder *new_encoder; 295 struct drm_crtc_state *crtc_state; 296 297 DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n", 298 connector->base.id, 299 connector->name); 300 301 if (old_connector_state->crtc != new_connector_state->crtc) { 302 if (old_connector_state->crtc) { 303 crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc); 304 crtc_state->connectors_changed = true; 305 } 306 307 if (new_connector_state->crtc) { 308 crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc); 309 crtc_state->connectors_changed = true; 310 } 311 } 312 313 if (!new_connector_state->crtc) { 314 DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n", 315 connector->base.id, 316 connector->name); 317 318 set_best_encoder(state, new_connector_state, NULL); 319 320 return 0; 321 } 322 323 crtc_state = drm_atomic_get_new_crtc_state(state, 324 new_connector_state->crtc); 325 /* 326 * For compatibility with legacy users, we want to make sure that 327 * we allow DPMS On->Off modesets on unregistered connectors. Modesets 328 * which would result in anything else must be considered invalid, to 329 * avoid turning on new displays on dead connectors. 330 * 331 * Since the connector can be unregistered at any point during an 332 * atomic check or commit, this is racy. But that's OK: all we care 333 * about is ensuring that userspace can't do anything but shut off the 334 * display on a connector that was destroyed after its been notified, 335 * not before. 336 */ 337 if (drm_connector_is_unregistered(connector) && crtc_state->active) { 338 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n", 339 connector->base.id, connector->name); 340 return -EINVAL; 341 } 342 343 funcs = connector->helper_private; 344 345 if (funcs->atomic_best_encoder) 346 new_encoder = funcs->atomic_best_encoder(connector, 347 new_connector_state); 348 else if (funcs->best_encoder) 349 new_encoder = funcs->best_encoder(connector); 350 else 351 new_encoder = pick_single_encoder_for_connector(connector); 352 353 if (!new_encoder) { 354 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", 355 connector->base.id, 356 connector->name); 357 return -EINVAL; 358 } 359 360 if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) { 361 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n", 362 new_encoder->base.id, 363 new_encoder->name, 364 new_connector_state->crtc->base.id, 365 new_connector_state->crtc->name); 366 return -EINVAL; 367 } 368 369 if (new_encoder == new_connector_state->best_encoder) { 370 set_best_encoder(state, new_connector_state, new_encoder); 371 372 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n", 373 connector->base.id, 374 connector->name, 375 new_encoder->base.id, 376 new_encoder->name, 377 new_connector_state->crtc->base.id, 378 new_connector_state->crtc->name); 379 380 return 0; 381 } 382 383 steal_encoder(state, new_encoder); 384 385 set_best_encoder(state, new_connector_state, new_encoder); 386 387 crtc_state->connectors_changed = true; 388 389 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", 390 connector->base.id, 391 connector->name, 392 new_encoder->base.id, 393 new_encoder->name, 394 new_connector_state->crtc->base.id, 395 new_connector_state->crtc->name); 396 397 return 0; 398 } 399 400 static int 401 mode_fixup(struct drm_atomic_state *state) 402 { 403 struct drm_crtc *crtc; 404 struct drm_crtc_state *new_crtc_state; 405 struct drm_connector *connector; 406 struct drm_connector_state *new_conn_state; 407 int i; 408 int ret; 409 410 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 411 if (!new_crtc_state->mode_changed && 412 !new_crtc_state->connectors_changed) 413 continue; 414 415 drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode); 416 } 417 418 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 419 const struct drm_encoder_helper_funcs *funcs; 420 struct drm_encoder *encoder; 421 422 WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc); 423 424 if (!new_conn_state->crtc || !new_conn_state->best_encoder) 425 continue; 426 427 new_crtc_state = 428 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 429 430 /* 431 * Each encoder has at most one connector (since we always steal 432 * it away), so we won't call ->mode_fixup twice. 433 */ 434 encoder = new_conn_state->best_encoder; 435 funcs = encoder->helper_private; 436 437 ret = drm_bridge_mode_fixup(encoder->bridge, &new_crtc_state->mode, 438 &new_crtc_state->adjusted_mode); 439 if (!ret) { 440 DRM_DEBUG_ATOMIC("Bridge fixup failed\n"); 441 return -EINVAL; 442 } 443 444 if (funcs && funcs->atomic_check) { 445 ret = funcs->atomic_check(encoder, new_crtc_state, 446 new_conn_state); 447 if (ret) { 448 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n", 449 encoder->base.id, encoder->name); 450 return ret; 451 } 452 } else if (funcs && funcs->mode_fixup) { 453 ret = funcs->mode_fixup(encoder, &new_crtc_state->mode, 454 &new_crtc_state->adjusted_mode); 455 if (!ret) { 456 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n", 457 encoder->base.id, encoder->name); 458 return -EINVAL; 459 } 460 } 461 } 462 463 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 464 const struct drm_crtc_helper_funcs *funcs; 465 466 if (!new_crtc_state->enable) 467 continue; 468 469 if (!new_crtc_state->mode_changed && 470 !new_crtc_state->connectors_changed) 471 continue; 472 473 funcs = crtc->helper_private; 474 if (!funcs->mode_fixup) 475 continue; 476 477 ret = funcs->mode_fixup(crtc, &new_crtc_state->mode, 478 &new_crtc_state->adjusted_mode); 479 if (!ret) { 480 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n", 481 crtc->base.id, crtc->name); 482 return -EINVAL; 483 } 484 } 485 486 return 0; 487 } 488 489 static enum drm_mode_status mode_valid_path(struct drm_connector *connector, 490 struct drm_encoder *encoder, 491 struct drm_crtc *crtc, 492 struct drm_display_mode *mode) 493 { 494 enum drm_mode_status ret; 495 496 ret = drm_encoder_mode_valid(encoder, mode); 497 if (ret != MODE_OK) { 498 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n", 499 encoder->base.id, encoder->name); 500 return ret; 501 } 502 503 ret = drm_bridge_mode_valid(encoder->bridge, mode); 504 if (ret != MODE_OK) { 505 DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n"); 506 return ret; 507 } 508 509 ret = drm_crtc_mode_valid(crtc, mode); 510 if (ret != MODE_OK) { 511 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n", 512 crtc->base.id, crtc->name); 513 return ret; 514 } 515 516 return ret; 517 } 518 519 static int 520 mode_valid(struct drm_atomic_state *state) 521 { 522 struct drm_connector_state *conn_state; 523 struct drm_connector *connector; 524 int i; 525 526 for_each_new_connector_in_state(state, connector, conn_state, i) { 527 struct drm_encoder *encoder = conn_state->best_encoder; 528 struct drm_crtc *crtc = conn_state->crtc; 529 struct drm_crtc_state *crtc_state; 530 enum drm_mode_status mode_status; 531 struct drm_display_mode *mode; 532 533 if (!crtc || !encoder) 534 continue; 535 536 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 537 if (!crtc_state) 538 continue; 539 if (!crtc_state->mode_changed && !crtc_state->connectors_changed) 540 continue; 541 542 mode = &crtc_state->mode; 543 544 mode_status = mode_valid_path(connector, encoder, crtc, mode); 545 if (mode_status != MODE_OK) 546 return -EINVAL; 547 } 548 549 return 0; 550 } 551 552 /** 553 * drm_atomic_helper_check_modeset - validate state object for modeset changes 554 * @dev: DRM device 555 * @state: the driver state object 556 * 557 * Check the state object to see if the requested state is physically possible. 558 * This does all the crtc and connector related computations for an atomic 559 * update and adds any additional connectors needed for full modesets. It calls 560 * the various per-object callbacks in the follow order: 561 * 562 * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder. 563 * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state. 564 * 3. If it's determined a modeset is needed then all connectors on the affected crtc 565 * crtc are added and &drm_connector_helper_funcs.atomic_check is run on them. 566 * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and 567 * &drm_crtc_helper_funcs.mode_valid are called on the affected components. 568 * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges. 569 * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state. 570 * This function is only called when the encoder will be part of a configured crtc, 571 * it must not be used for implementing connector property validation. 572 * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called 573 * instead. 574 * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints. 575 * 576 * &drm_crtc_state.mode_changed is set when the input mode is changed. 577 * &drm_crtc_state.connectors_changed is set when a connector is added or 578 * removed from the crtc. &drm_crtc_state.active_changed is set when 579 * &drm_crtc_state.active changes, which is used for DPMS. 580 * See also: drm_atomic_crtc_needs_modeset() 581 * 582 * IMPORTANT: 583 * 584 * Drivers which set &drm_crtc_state.mode_changed (e.g. in their 585 * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done 586 * without a full modeset) _must_ call this function afterwards after that 587 * change. It is permitted to call this function multiple times for the same 588 * update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend 589 * upon the adjusted dotclock for fifo space allocation and watermark 590 * computation. 591 * 592 * RETURNS: 593 * Zero for success or -errno 594 */ 595 int 596 drm_atomic_helper_check_modeset(struct drm_device *dev, 597 struct drm_atomic_state *state) 598 { 599 struct drm_crtc *crtc; 600 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 601 struct drm_connector *connector; 602 struct drm_connector_state *old_connector_state, *new_connector_state; 603 int i, ret; 604 unsigned connectors_mask = 0; 605 606 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 607 bool has_connectors = 608 !!new_crtc_state->connector_mask; 609 610 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 611 612 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) { 613 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", 614 crtc->base.id, crtc->name); 615 new_crtc_state->mode_changed = true; 616 } 617 618 if (old_crtc_state->enable != new_crtc_state->enable) { 619 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n", 620 crtc->base.id, crtc->name); 621 622 /* 623 * For clarity this assignment is done here, but 624 * enable == 0 is only true when there are no 625 * connectors and a NULL mode. 626 * 627 * The other way around is true as well. enable != 0 628 * iff connectors are attached and a mode is set. 629 */ 630 new_crtc_state->mode_changed = true; 631 new_crtc_state->connectors_changed = true; 632 } 633 634 if (old_crtc_state->active != new_crtc_state->active) { 635 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n", 636 crtc->base.id, crtc->name); 637 new_crtc_state->active_changed = true; 638 } 639 640 if (new_crtc_state->enable != has_connectors) { 641 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n", 642 crtc->base.id, crtc->name); 643 644 return -EINVAL; 645 } 646 } 647 648 ret = handle_conflicting_encoders(state, false); 649 if (ret) 650 return ret; 651 652 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 653 const struct drm_connector_helper_funcs *funcs = connector->helper_private; 654 655 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 656 657 /* 658 * This only sets crtc->connectors_changed for routing changes, 659 * drivers must set crtc->connectors_changed themselves when 660 * connector properties need to be updated. 661 */ 662 ret = update_connector_routing(state, connector, 663 old_connector_state, 664 new_connector_state); 665 if (ret) 666 return ret; 667 if (old_connector_state->crtc) { 668 new_crtc_state = drm_atomic_get_new_crtc_state(state, 669 old_connector_state->crtc); 670 if (old_connector_state->link_status != 671 new_connector_state->link_status) 672 new_crtc_state->connectors_changed = true; 673 674 if (old_connector_state->max_requested_bpc != 675 new_connector_state->max_requested_bpc) 676 new_crtc_state->connectors_changed = true; 677 } 678 679 if (funcs->atomic_check) 680 ret = funcs->atomic_check(connector, new_connector_state); 681 if (ret) 682 return ret; 683 684 connectors_mask |= BIT(i); 685 } 686 687 /* 688 * After all the routing has been prepared we need to add in any 689 * connector which is itself unchanged, but who's crtc changes it's 690 * configuration. This must be done before calling mode_fixup in case a 691 * crtc only changed its mode but has the same set of connectors. 692 */ 693 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 694 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 695 continue; 696 697 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n", 698 crtc->base.id, crtc->name, 699 new_crtc_state->enable ? 'y' : 'n', 700 new_crtc_state->active ? 'y' : 'n'); 701 702 ret = drm_atomic_add_affected_connectors(state, crtc); 703 if (ret != 0) 704 return ret; 705 706 ret = drm_atomic_add_affected_planes(state, crtc); 707 if (ret != 0) 708 return ret; 709 } 710 711 /* 712 * Iterate over all connectors again, to make sure atomic_check() 713 * has been called on them when a modeset is forced. 714 */ 715 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 716 const struct drm_connector_helper_funcs *funcs = connector->helper_private; 717 718 if (connectors_mask & BIT(i)) 719 continue; 720 721 if (funcs->atomic_check) 722 ret = funcs->atomic_check(connector, new_connector_state); 723 if (ret) 724 return ret; 725 } 726 727 ret = mode_valid(state); 728 if (ret) 729 return ret; 730 731 return mode_fixup(state); 732 } 733 EXPORT_SYMBOL(drm_atomic_helper_check_modeset); 734 735 /** 736 * drm_atomic_helper_check_plane_state() - Check plane state for validity 737 * @plane_state: plane state to check 738 * @crtc_state: crtc state to check 739 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point 740 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point 741 * @can_position: is it legal to position the plane such that it 742 * doesn't cover the entire crtc? This will generally 743 * only be false for primary planes. 744 * @can_update_disabled: can the plane be updated while the crtc 745 * is disabled? 746 * 747 * Checks that a desired plane update is valid, and updates various 748 * bits of derived state (clipped coordinates etc.). Drivers that provide 749 * their own plane handling rather than helper-provided implementations may 750 * still wish to call this function to avoid duplication of error checking 751 * code. 752 * 753 * RETURNS: 754 * Zero if update appears valid, error code on failure 755 */ 756 int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, 757 const struct drm_crtc_state *crtc_state, 758 int min_scale, 759 int max_scale, 760 bool can_position, 761 bool can_update_disabled) 762 { 763 struct drm_framebuffer *fb = plane_state->fb; 764 struct drm_rect *src = &plane_state->src; 765 struct drm_rect *dst = &plane_state->dst; 766 unsigned int rotation = plane_state->rotation; 767 struct drm_rect clip = {}; 768 int hscale, vscale; 769 770 WARN_ON(plane_state->crtc && plane_state->crtc != crtc_state->crtc); 771 772 *src = drm_plane_state_src(plane_state); 773 *dst = drm_plane_state_dest(plane_state); 774 775 if (!fb) { 776 plane_state->visible = false; 777 return 0; 778 } 779 780 /* crtc should only be NULL when disabling (i.e., !fb) */ 781 if (WARN_ON(!plane_state->crtc)) { 782 plane_state->visible = false; 783 return 0; 784 } 785 786 if (!crtc_state->enable && !can_update_disabled) { 787 DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n"); 788 return -EINVAL; 789 } 790 791 drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation); 792 793 /* Check scaling */ 794 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); 795 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); 796 if (hscale < 0 || vscale < 0) { 797 DRM_DEBUG_KMS("Invalid scaling of plane\n"); 798 drm_rect_debug_print("src: ", &plane_state->src, true); 799 drm_rect_debug_print("dst: ", &plane_state->dst, false); 800 return -ERANGE; 801 } 802 803 if (crtc_state->enable) 804 drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2); 805 806 plane_state->visible = drm_rect_clip_scaled(src, dst, &clip); 807 808 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); 809 810 if (!plane_state->visible) 811 /* 812 * Plane isn't visible; some drivers can handle this 813 * so we just return success here. Drivers that can't 814 * (including those that use the primary plane helper's 815 * update function) will return an error from their 816 * update_plane handler. 817 */ 818 return 0; 819 820 if (!can_position && !drm_rect_equals(dst, &clip)) { 821 DRM_DEBUG_KMS("Plane must cover entire CRTC\n"); 822 drm_rect_debug_print("dst: ", dst, false); 823 drm_rect_debug_print("clip: ", &clip, false); 824 return -EINVAL; 825 } 826 827 return 0; 828 } 829 EXPORT_SYMBOL(drm_atomic_helper_check_plane_state); 830 831 /** 832 * drm_atomic_helper_check_planes - validate state object for planes changes 833 * @dev: DRM device 834 * @state: the driver state object 835 * 836 * Check the state object to see if the requested state is physically possible. 837 * This does all the plane update related checks using by calling into the 838 * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check 839 * hooks provided by the driver. 840 * 841 * It also sets &drm_crtc_state.planes_changed to indicate that a crtc has 842 * updated planes. 843 * 844 * RETURNS: 845 * Zero for success or -errno 846 */ 847 int 848 drm_atomic_helper_check_planes(struct drm_device *dev, 849 struct drm_atomic_state *state) 850 { 851 struct drm_crtc *crtc; 852 struct drm_crtc_state *new_crtc_state; 853 struct drm_plane *plane; 854 struct drm_plane_state *new_plane_state, *old_plane_state; 855 int i, ret = 0; 856 857 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 858 const struct drm_plane_helper_funcs *funcs; 859 860 WARN_ON(!drm_modeset_is_locked(&plane->mutex)); 861 862 funcs = plane->helper_private; 863 864 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane); 865 866 drm_atomic_helper_check_plane_damage(state, new_plane_state); 867 868 if (!funcs || !funcs->atomic_check) 869 continue; 870 871 ret = funcs->atomic_check(plane, new_plane_state); 872 if (ret) { 873 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", 874 plane->base.id, plane->name); 875 return ret; 876 } 877 } 878 879 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 880 const struct drm_crtc_helper_funcs *funcs; 881 882 funcs = crtc->helper_private; 883 884 if (!funcs || !funcs->atomic_check) 885 continue; 886 887 ret = funcs->atomic_check(crtc, new_crtc_state); 888 if (ret) { 889 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", 890 crtc->base.id, crtc->name); 891 return ret; 892 } 893 } 894 895 return ret; 896 } 897 EXPORT_SYMBOL(drm_atomic_helper_check_planes); 898 899 /** 900 * drm_atomic_helper_check - validate state object 901 * @dev: DRM device 902 * @state: the driver state object 903 * 904 * Check the state object to see if the requested state is physically possible. 905 * Only crtcs and planes have check callbacks, so for any additional (global) 906 * checking that a driver needs it can simply wrap that around this function. 907 * Drivers without such needs can directly use this as their 908 * &drm_mode_config_funcs.atomic_check callback. 909 * 910 * This just wraps the two parts of the state checking for planes and modeset 911 * state in the default order: First it calls drm_atomic_helper_check_modeset() 912 * and then drm_atomic_helper_check_planes(). The assumption is that the 913 * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check 914 * functions depend upon an updated adjusted_mode.clock to e.g. properly compute 915 * watermarks. 916 * 917 * Note that zpos normalization will add all enable planes to the state which 918 * might not desired for some drivers. 919 * For example enable/disable of a cursor plane which have fixed zpos value 920 * would trigger all other enabled planes to be forced to the state change. 921 * 922 * RETURNS: 923 * Zero for success or -errno 924 */ 925 int drm_atomic_helper_check(struct drm_device *dev, 926 struct drm_atomic_state *state) 927 { 928 int ret; 929 930 ret = drm_atomic_helper_check_modeset(dev, state); 931 if (ret) 932 return ret; 933 934 if (dev->mode_config.normalize_zpos) { 935 ret = drm_atomic_normalize_zpos(dev, state); 936 if (ret) 937 return ret; 938 } 939 940 ret = drm_atomic_helper_check_planes(dev, state); 941 if (ret) 942 return ret; 943 944 if (state->legacy_cursor_update) 945 state->async_update = !drm_atomic_helper_async_check(dev, state); 946 947 return ret; 948 } 949 EXPORT_SYMBOL(drm_atomic_helper_check); 950 951 static void 952 disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) 953 { 954 struct drm_connector *connector; 955 struct drm_connector_state *old_conn_state, *new_conn_state; 956 struct drm_crtc *crtc; 957 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 958 int i; 959 960 for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) { 961 const struct drm_encoder_helper_funcs *funcs; 962 struct drm_encoder *encoder; 963 964 /* Shut down everything that's in the changeset and currently 965 * still on. So need to check the old, saved state. */ 966 if (!old_conn_state->crtc) 967 continue; 968 969 old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc); 970 971 if (!old_crtc_state->active || 972 !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state)) 973 continue; 974 975 encoder = old_conn_state->best_encoder; 976 977 /* We shouldn't get this far if we didn't previously have 978 * an encoder.. but WARN_ON() rather than explode. 979 */ 980 if (WARN_ON(!encoder)) 981 continue; 982 983 funcs = encoder->helper_private; 984 985 DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n", 986 encoder->base.id, encoder->name); 987 988 /* 989 * Each encoder has at most one connector (since we always steal 990 * it away), so we won't call disable hooks twice. 991 */ 992 drm_bridge_disable(encoder->bridge); 993 994 /* Right function depends upon target state. */ 995 if (funcs) { 996 if (new_conn_state->crtc && funcs->prepare) 997 funcs->prepare(encoder); 998 else if (funcs->disable) 999 funcs->disable(encoder); 1000 else if (funcs->dpms) 1001 funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 1002 } 1003 1004 drm_bridge_post_disable(encoder->bridge); 1005 } 1006 1007 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { 1008 const struct drm_crtc_helper_funcs *funcs; 1009 int ret; 1010 1011 /* Shut down everything that needs a full modeset. */ 1012 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 1013 continue; 1014 1015 if (!old_crtc_state->active) 1016 continue; 1017 1018 funcs = crtc->helper_private; 1019 1020 DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n", 1021 crtc->base.id, crtc->name); 1022 1023 1024 /* Right function depends upon target state. */ 1025 if (new_crtc_state->enable && funcs->prepare) 1026 funcs->prepare(crtc); 1027 else if (funcs->atomic_disable) 1028 funcs->atomic_disable(crtc, old_crtc_state); 1029 else if (funcs->disable) 1030 funcs->disable(crtc); 1031 else 1032 funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 1033 1034 if (!(dev->irq_enabled && dev->num_crtcs)) 1035 continue; 1036 1037 ret = drm_crtc_vblank_get(crtc); 1038 WARN_ONCE(ret != -EINVAL, "driver forgot to call drm_crtc_vblank_off()\n"); 1039 if (ret == 0) 1040 drm_crtc_vblank_put(crtc); 1041 } 1042 } 1043 1044 /** 1045 * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state 1046 * @dev: DRM device 1047 * @old_state: atomic state object with old state structures 1048 * 1049 * This function updates all the various legacy modeset state pointers in 1050 * connectors, encoders and crtcs. It also updates the timestamping constants 1051 * used for precise vblank timestamps by calling 1052 * drm_calc_timestamping_constants(). 1053 * 1054 * Drivers can use this for building their own atomic commit if they don't have 1055 * a pure helper-based modeset implementation. 1056 * 1057 * Since these updates are not synchronized with lockings, only code paths 1058 * called from &drm_mode_config_helper_funcs.atomic_commit_tail can look at the 1059 * legacy state filled out by this helper. Defacto this means this helper and 1060 * the legacy state pointers are only really useful for transitioning an 1061 * existing driver to the atomic world. 1062 */ 1063 void 1064 drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, 1065 struct drm_atomic_state *old_state) 1066 { 1067 struct drm_connector *connector; 1068 struct drm_connector_state *old_conn_state, *new_conn_state; 1069 struct drm_crtc *crtc; 1070 struct drm_crtc_state *new_crtc_state; 1071 int i; 1072 1073 /* clear out existing links and update dpms */ 1074 for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) { 1075 if (connector->encoder) { 1076 WARN_ON(!connector->encoder->crtc); 1077 1078 connector->encoder->crtc = NULL; 1079 connector->encoder = NULL; 1080 } 1081 1082 crtc = new_conn_state->crtc; 1083 if ((!crtc && old_conn_state->crtc) || 1084 (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) { 1085 int mode = DRM_MODE_DPMS_OFF; 1086 1087 if (crtc && crtc->state->active) 1088 mode = DRM_MODE_DPMS_ON; 1089 1090 connector->dpms = mode; 1091 } 1092 } 1093 1094 /* set new links */ 1095 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) { 1096 if (!new_conn_state->crtc) 1097 continue; 1098 1099 if (WARN_ON(!new_conn_state->best_encoder)) 1100 continue; 1101 1102 connector->encoder = new_conn_state->best_encoder; 1103 connector->encoder->crtc = new_conn_state->crtc; 1104 } 1105 1106 /* set legacy state in the crtc structure */ 1107 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { 1108 struct drm_plane *primary = crtc->primary; 1109 struct drm_plane_state *new_plane_state; 1110 1111 crtc->mode = new_crtc_state->mode; 1112 crtc->enabled = new_crtc_state->enable; 1113 1114 new_plane_state = 1115 drm_atomic_get_new_plane_state(old_state, primary); 1116 1117 if (new_plane_state && new_plane_state->crtc == crtc) { 1118 crtc->x = new_plane_state->src_x >> 16; 1119 crtc->y = new_plane_state->src_y >> 16; 1120 } 1121 1122 if (new_crtc_state->enable) 1123 drm_calc_timestamping_constants(crtc, 1124 &new_crtc_state->adjusted_mode); 1125 } 1126 } 1127 EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state); 1128 1129 static void 1130 crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) 1131 { 1132 struct drm_crtc *crtc; 1133 struct drm_crtc_state *new_crtc_state; 1134 struct drm_connector *connector; 1135 struct drm_connector_state *new_conn_state; 1136 int i; 1137 1138 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { 1139 const struct drm_crtc_helper_funcs *funcs; 1140 1141 if (!new_crtc_state->mode_changed) 1142 continue; 1143 1144 funcs = crtc->helper_private; 1145 1146 if (new_crtc_state->enable && funcs->mode_set_nofb) { 1147 DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n", 1148 crtc->base.id, crtc->name); 1149 1150 funcs->mode_set_nofb(crtc); 1151 } 1152 } 1153 1154 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) { 1155 const struct drm_encoder_helper_funcs *funcs; 1156 struct drm_encoder *encoder; 1157 struct drm_display_mode *mode, *adjusted_mode; 1158 1159 if (!new_conn_state->best_encoder) 1160 continue; 1161 1162 encoder = new_conn_state->best_encoder; 1163 funcs = encoder->helper_private; 1164 new_crtc_state = new_conn_state->crtc->state; 1165 mode = &new_crtc_state->mode; 1166 adjusted_mode = &new_crtc_state->adjusted_mode; 1167 1168 if (!new_crtc_state->mode_changed) 1169 continue; 1170 1171 DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n", 1172 encoder->base.id, encoder->name); 1173 1174 /* 1175 * Each encoder has at most one connector (since we always steal 1176 * it away), so we won't call mode_set hooks twice. 1177 */ 1178 if (funcs && funcs->atomic_mode_set) { 1179 funcs->atomic_mode_set(encoder, new_crtc_state, 1180 new_conn_state); 1181 } else if (funcs && funcs->mode_set) { 1182 funcs->mode_set(encoder, mode, adjusted_mode); 1183 } 1184 1185 drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode); 1186 } 1187 } 1188 1189 /** 1190 * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs 1191 * @dev: DRM device 1192 * @old_state: atomic state object with old state structures 1193 * 1194 * This function shuts down all the outputs that need to be shut down and 1195 * prepares them (if required) with the new mode. 1196 * 1197 * For compatibility with legacy crtc helpers this should be called before 1198 * drm_atomic_helper_commit_planes(), which is what the default commit function 1199 * does. But drivers with different needs can group the modeset commits together 1200 * and do the plane commits at the end. This is useful for drivers doing runtime 1201 * PM since planes updates then only happen when the CRTC is actually enabled. 1202 */ 1203 void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, 1204 struct drm_atomic_state *old_state) 1205 { 1206 disable_outputs(dev, old_state); 1207 1208 drm_atomic_helper_update_legacy_modeset_state(dev, old_state); 1209 1210 crtc_set_mode(dev, old_state); 1211 } 1212 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables); 1213 1214 static void drm_atomic_helper_commit_writebacks(struct drm_device *dev, 1215 struct drm_atomic_state *old_state) 1216 { 1217 struct drm_connector *connector; 1218 struct drm_connector_state *new_conn_state; 1219 int i; 1220 1221 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) { 1222 const struct drm_connector_helper_funcs *funcs; 1223 1224 funcs = connector->helper_private; 1225 if (!funcs->atomic_commit) 1226 continue; 1227 1228 if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) { 1229 WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); 1230 funcs->atomic_commit(connector, new_conn_state); 1231 } 1232 } 1233 } 1234 1235 /** 1236 * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs 1237 * @dev: DRM device 1238 * @old_state: atomic state object with old state structures 1239 * 1240 * This function enables all the outputs with the new configuration which had to 1241 * be turned off for the update. 1242 * 1243 * For compatibility with legacy crtc helpers this should be called after 1244 * drm_atomic_helper_commit_planes(), which is what the default commit function 1245 * does. But drivers with different needs can group the modeset commits together 1246 * and do the plane commits at the end. This is useful for drivers doing runtime 1247 * PM since planes updates then only happen when the CRTC is actually enabled. 1248 */ 1249 void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, 1250 struct drm_atomic_state *old_state) 1251 { 1252 struct drm_crtc *crtc; 1253 struct drm_crtc_state *old_crtc_state; 1254 struct drm_crtc_state *new_crtc_state; 1255 struct drm_connector *connector; 1256 struct drm_connector_state *new_conn_state; 1257 int i; 1258 1259 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { 1260 const struct drm_crtc_helper_funcs *funcs; 1261 1262 /* Need to filter out CRTCs where only planes change. */ 1263 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 1264 continue; 1265 1266 if (!new_crtc_state->active) 1267 continue; 1268 1269 funcs = crtc->helper_private; 1270 1271 if (new_crtc_state->enable) { 1272 DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", 1273 crtc->base.id, crtc->name); 1274 1275 if (funcs->atomic_enable) 1276 funcs->atomic_enable(crtc, old_crtc_state); 1277 else 1278 funcs->commit(crtc); 1279 } 1280 } 1281 1282 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) { 1283 const struct drm_encoder_helper_funcs *funcs; 1284 struct drm_encoder *encoder; 1285 1286 if (!new_conn_state->best_encoder) 1287 continue; 1288 1289 if (!new_conn_state->crtc->state->active || 1290 !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state)) 1291 continue; 1292 1293 encoder = new_conn_state->best_encoder; 1294 funcs = encoder->helper_private; 1295 1296 DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n", 1297 encoder->base.id, encoder->name); 1298 1299 /* 1300 * Each encoder has at most one connector (since we always steal 1301 * it away), so we won't call enable hooks twice. 1302 */ 1303 drm_bridge_pre_enable(encoder->bridge); 1304 1305 if (funcs) { 1306 if (funcs->enable) 1307 funcs->enable(encoder); 1308 else if (funcs->commit) 1309 funcs->commit(encoder); 1310 } 1311 1312 drm_bridge_enable(encoder->bridge); 1313 } 1314 1315 drm_atomic_helper_commit_writebacks(dev, old_state); 1316 } 1317 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); 1318 1319 /** 1320 * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state 1321 * @dev: DRM device 1322 * @state: atomic state object with old state structures 1323 * @pre_swap: If true, do an interruptible wait, and @state is the new state. 1324 * Otherwise @state is the old state. 1325 * 1326 * For implicit sync, driver should fish the exclusive fence out from the 1327 * incoming fb's and stash it in the drm_plane_state. This is called after 1328 * drm_atomic_helper_swap_state() so it uses the current plane state (and 1329 * just uses the atomic state to find the changed planes) 1330 * 1331 * Note that @pre_swap is needed since the point where we block for fences moves 1332 * around depending upon whether an atomic commit is blocking or 1333 * non-blocking. For non-blocking commit all waiting needs to happen after 1334 * drm_atomic_helper_swap_state() is called, but for blocking commits we want 1335 * to wait **before** we do anything that can't be easily rolled back. That is 1336 * before we call drm_atomic_helper_swap_state(). 1337 * 1338 * Returns zero if success or < 0 if dma_fence_wait() fails. 1339 */ 1340 int drm_atomic_helper_wait_for_fences(struct drm_device *dev, 1341 struct drm_atomic_state *state, 1342 bool pre_swap) 1343 { 1344 struct drm_plane *plane; 1345 struct drm_plane_state *new_plane_state; 1346 int i, ret; 1347 1348 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 1349 if (!new_plane_state->fence) 1350 continue; 1351 1352 WARN_ON(!new_plane_state->fb); 1353 1354 /* 1355 * If waiting for fences pre-swap (ie: nonblock), userspace can 1356 * still interrupt the operation. Instead of blocking until the 1357 * timer expires, make the wait interruptible. 1358 */ 1359 ret = dma_fence_wait(new_plane_state->fence, pre_swap); 1360 if (ret) 1361 return ret; 1362 1363 dma_fence_put(new_plane_state->fence); 1364 new_plane_state->fence = NULL; 1365 } 1366 1367 return 0; 1368 } 1369 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences); 1370 1371 /** 1372 * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs 1373 * @dev: DRM device 1374 * @old_state: atomic state object with old state structures 1375 * 1376 * Helper to, after atomic commit, wait for vblanks on all effected 1377 * crtcs (ie. before cleaning up old framebuffers using 1378 * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the 1379 * framebuffers have actually changed to optimize for the legacy cursor and 1380 * plane update use-case. 1381 * 1382 * Drivers using the nonblocking commit tracking support initialized by calling 1383 * drm_atomic_helper_setup_commit() should look at 1384 * drm_atomic_helper_wait_for_flip_done() as an alternative. 1385 */ 1386 void 1387 drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, 1388 struct drm_atomic_state *old_state) 1389 { 1390 struct drm_crtc *crtc; 1391 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1392 int i, ret; 1393 unsigned crtc_mask = 0; 1394 1395 /* 1396 * Legacy cursor ioctls are completely unsynced, and userspace 1397 * relies on that (by doing tons of cursor updates). 1398 */ 1399 if (old_state->legacy_cursor_update) 1400 return; 1401 1402 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { 1403 if (!new_crtc_state->active) 1404 continue; 1405 1406 ret = drm_crtc_vblank_get(crtc); 1407 if (ret != 0) 1408 continue; 1409 1410 crtc_mask |= drm_crtc_mask(crtc); 1411 old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc); 1412 } 1413 1414 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { 1415 if (!(crtc_mask & drm_crtc_mask(crtc))) 1416 continue; 1417 1418 ret = wait_event_timeout(dev->vblank[i].queue, 1419 old_state->crtcs[i].last_vblank_count != 1420 drm_crtc_vblank_count(crtc), 1421 msecs_to_jiffies(50)); 1422 1423 WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n", 1424 crtc->base.id, crtc->name); 1425 1426 drm_crtc_vblank_put(crtc); 1427 } 1428 } 1429 EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); 1430 1431 /** 1432 * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done 1433 * @dev: DRM device 1434 * @old_state: atomic state object with old state structures 1435 * 1436 * Helper to, after atomic commit, wait for page flips on all effected 1437 * crtcs (ie. before cleaning up old framebuffers using 1438 * drm_atomic_helper_cleanup_planes()). Compared to 1439 * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all 1440 * CRTCs, assuming that cursors-only updates are signalling their completion 1441 * immediately (or using a different path). 1442 * 1443 * This requires that drivers use the nonblocking commit tracking support 1444 * initialized using drm_atomic_helper_setup_commit(). 1445 */ 1446 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, 1447 struct drm_atomic_state *old_state) 1448 { 1449 struct drm_crtc *crtc; 1450 int i; 1451 1452 for (i = 0; i < dev->mode_config.num_crtc; i++) { 1453 struct drm_crtc_commit *commit = old_state->crtcs[i].commit; 1454 int ret; 1455 1456 crtc = old_state->crtcs[i].ptr; 1457 1458 if (!crtc || !commit) 1459 continue; 1460 1461 ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ); 1462 if (ret == 0) 1463 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", 1464 crtc->base.id, crtc->name); 1465 } 1466 1467 if (old_state->fake_commit) 1468 complete_all(&old_state->fake_commit->flip_done); 1469 } 1470 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done); 1471 1472 /** 1473 * drm_atomic_helper_commit_tail - commit atomic update to hardware 1474 * @old_state: atomic state object with old state structures 1475 * 1476 * This is the default implementation for the 1477 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers 1478 * that do not support runtime_pm or do not need the CRTC to be 1479 * enabled to perform a commit. Otherwise, see 1480 * drm_atomic_helper_commit_tail_rpm(). 1481 * 1482 * Note that the default ordering of how the various stages are called is to 1483 * match the legacy modeset helper library closest. 1484 */ 1485 void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state) 1486 { 1487 struct drm_device *dev = old_state->dev; 1488 1489 drm_atomic_helper_commit_modeset_disables(dev, old_state); 1490 1491 drm_atomic_helper_commit_planes(dev, old_state, 0); 1492 1493 drm_atomic_helper_commit_modeset_enables(dev, old_state); 1494 1495 drm_atomic_helper_fake_vblank(old_state); 1496 1497 drm_atomic_helper_commit_hw_done(old_state); 1498 1499 drm_atomic_helper_wait_for_vblanks(dev, old_state); 1500 1501 drm_atomic_helper_cleanup_planes(dev, old_state); 1502 } 1503 EXPORT_SYMBOL(drm_atomic_helper_commit_tail); 1504 1505 /** 1506 * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware 1507 * @old_state: new modeset state to be committed 1508 * 1509 * This is an alternative implementation for the 1510 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers 1511 * that support runtime_pm or need the CRTC to be enabled to perform a 1512 * commit. Otherwise, one should use the default implementation 1513 * drm_atomic_helper_commit_tail(). 1514 */ 1515 void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state) 1516 { 1517 struct drm_device *dev = old_state->dev; 1518 1519 drm_atomic_helper_commit_modeset_disables(dev, old_state); 1520 1521 drm_atomic_helper_commit_modeset_enables(dev, old_state); 1522 1523 drm_atomic_helper_commit_planes(dev, old_state, 1524 DRM_PLANE_COMMIT_ACTIVE_ONLY); 1525 1526 drm_atomic_helper_fake_vblank(old_state); 1527 1528 drm_atomic_helper_commit_hw_done(old_state); 1529 1530 drm_atomic_helper_wait_for_vblanks(dev, old_state); 1531 1532 drm_atomic_helper_cleanup_planes(dev, old_state); 1533 } 1534 EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm); 1535 1536 static void commit_tail(struct drm_atomic_state *old_state) 1537 { 1538 struct drm_device *dev = old_state->dev; 1539 const struct drm_mode_config_helper_funcs *funcs; 1540 1541 funcs = dev->mode_config.helper_private; 1542 1543 drm_atomic_helper_wait_for_fences(dev, old_state, false); 1544 1545 drm_atomic_helper_wait_for_dependencies(old_state); 1546 1547 if (funcs && funcs->atomic_commit_tail) 1548 funcs->atomic_commit_tail(old_state); 1549 else 1550 drm_atomic_helper_commit_tail(old_state); 1551 1552 drm_atomic_helper_commit_cleanup_done(old_state); 1553 1554 drm_atomic_state_put(old_state); 1555 } 1556 1557 static void commit_work(struct work_struct *work) 1558 { 1559 struct drm_atomic_state *state = container_of(work, 1560 struct drm_atomic_state, 1561 commit_work); 1562 commit_tail(state); 1563 } 1564 1565 /** 1566 * drm_atomic_helper_async_check - check if state can be commited asynchronously 1567 * @dev: DRM device 1568 * @state: the driver state object 1569 * 1570 * This helper will check if it is possible to commit the state asynchronously. 1571 * Async commits are not supposed to swap the states like normal sync commits 1572 * but just do in-place changes on the current state. 1573 * 1574 * It will return 0 if the commit can happen in an asynchronous fashion or error 1575 * if not. Note that error just mean it can't be commited asynchronously, if it 1576 * fails the commit should be treated like a normal synchronous commit. 1577 */ 1578 int drm_atomic_helper_async_check(struct drm_device *dev, 1579 struct drm_atomic_state *state) 1580 { 1581 struct drm_crtc *crtc; 1582 struct drm_crtc_state *crtc_state; 1583 struct drm_plane *plane = NULL; 1584 struct drm_plane_state *old_plane_state = NULL; 1585 struct drm_plane_state *new_plane_state = NULL; 1586 const struct drm_plane_helper_funcs *funcs; 1587 int i, n_planes = 0; 1588 1589 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1590 if (drm_atomic_crtc_needs_modeset(crtc_state)) 1591 return -EINVAL; 1592 } 1593 1594 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) 1595 n_planes++; 1596 1597 /* FIXME: we support only single plane updates for now */ 1598 if (n_planes != 1) 1599 return -EINVAL; 1600 1601 if (!new_plane_state->crtc || 1602 old_plane_state->crtc != new_plane_state->crtc) 1603 return -EINVAL; 1604 1605 /* 1606 * FIXME: Since prepare_fb and cleanup_fb are always called on 1607 * the new_plane_state for async updates we need to block framebuffer 1608 * changes. This prevents use of a fb that's been cleaned up and 1609 * double cleanups from occuring. 1610 */ 1611 if (old_plane_state->fb != new_plane_state->fb) 1612 return -EINVAL; 1613 1614 funcs = plane->helper_private; 1615 if (!funcs->atomic_async_update) 1616 return -EINVAL; 1617 1618 if (new_plane_state->fence) 1619 return -EINVAL; 1620 1621 /* 1622 * Don't do an async update if there is an outstanding commit modifying 1623 * the plane. This prevents our async update's changes from getting 1624 * overridden by a previous synchronous update's state. 1625 */ 1626 if (old_plane_state->commit && 1627 !try_wait_for_completion(&old_plane_state->commit->hw_done)) 1628 return -EBUSY; 1629 1630 return funcs->atomic_async_check(plane, new_plane_state); 1631 } 1632 EXPORT_SYMBOL(drm_atomic_helper_async_check); 1633 1634 /** 1635 * drm_atomic_helper_async_commit - commit state asynchronously 1636 * @dev: DRM device 1637 * @state: the driver state object 1638 * 1639 * This function commits a state asynchronously, i.e., not vblank 1640 * synchronized. It should be used on a state only when 1641 * drm_atomic_async_check() succeeds. Async commits are not supposed to swap 1642 * the states like normal sync commits, but just do in-place changes on the 1643 * current state. 1644 */ 1645 void drm_atomic_helper_async_commit(struct drm_device *dev, 1646 struct drm_atomic_state *state) 1647 { 1648 struct drm_plane *plane; 1649 struct drm_plane_state *plane_state; 1650 const struct drm_plane_helper_funcs *funcs; 1651 int i; 1652 1653 for_each_new_plane_in_state(state, plane, plane_state, i) { 1654 funcs = plane->helper_private; 1655 funcs->atomic_async_update(plane, plane_state); 1656 1657 /* 1658 * ->atomic_async_update() is supposed to update the 1659 * plane->state in-place, make sure at least common 1660 * properties have been properly updated. 1661 */ 1662 WARN_ON_ONCE(plane->state->fb != plane_state->fb); 1663 WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x); 1664 WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y); 1665 WARN_ON_ONCE(plane->state->src_x != plane_state->src_x); 1666 WARN_ON_ONCE(plane->state->src_y != plane_state->src_y); 1667 } 1668 } 1669 EXPORT_SYMBOL(drm_atomic_helper_async_commit); 1670 1671 /** 1672 * drm_atomic_helper_commit - commit validated state object 1673 * @dev: DRM device 1674 * @state: the driver state object 1675 * @nonblock: whether nonblocking behavior is requested. 1676 * 1677 * This function commits a with drm_atomic_helper_check() pre-validated state 1678 * object. This can still fail when e.g. the framebuffer reservation fails. This 1679 * function implements nonblocking commits, using 1680 * drm_atomic_helper_setup_commit() and related functions. 1681 * 1682 * Committing the actual hardware state is done through the 1683 * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or it's default 1684 * implementation drm_atomic_helper_commit_tail(). 1685 * 1686 * RETURNS: 1687 * Zero for success or -errno. 1688 */ 1689 int drm_atomic_helper_commit(struct drm_device *dev, 1690 struct drm_atomic_state *state, 1691 bool nonblock) 1692 { 1693 int ret; 1694 1695 if (state->async_update) { 1696 ret = drm_atomic_helper_prepare_planes(dev, state); 1697 if (ret) 1698 return ret; 1699 1700 drm_atomic_helper_async_commit(dev, state); 1701 drm_atomic_helper_cleanup_planes(dev, state); 1702 1703 return 0; 1704 } 1705 1706 ret = drm_atomic_helper_setup_commit(state, nonblock); 1707 if (ret) 1708 return ret; 1709 1710 INIT_WORK(&state->commit_work, commit_work); 1711 1712 ret = drm_atomic_helper_prepare_planes(dev, state); 1713 if (ret) 1714 return ret; 1715 1716 if (!nonblock) { 1717 ret = drm_atomic_helper_wait_for_fences(dev, state, true); 1718 if (ret) 1719 goto err; 1720 } 1721 1722 /* 1723 * This is the point of no return - everything below never fails except 1724 * when the hw goes bonghits. Which means we can commit the new state on 1725 * the software side now. 1726 */ 1727 1728 ret = drm_atomic_helper_swap_state(state, true); 1729 if (ret) 1730 goto err; 1731 1732 /* 1733 * Everything below can be run asynchronously without the need to grab 1734 * any modeset locks at all under one condition: It must be guaranteed 1735 * that the asynchronous work has either been cancelled (if the driver 1736 * supports it, which at least requires that the framebuffers get 1737 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed 1738 * before the new state gets committed on the software side with 1739 * drm_atomic_helper_swap_state(). 1740 * 1741 * This scheme allows new atomic state updates to be prepared and 1742 * checked in parallel to the asynchronous completion of the previous 1743 * update. Which is important since compositors need to figure out the 1744 * composition of the next frame right after having submitted the 1745 * current layout. 1746 * 1747 * NOTE: Commit work has multiple phases, first hardware commit, then 1748 * cleanup. We want them to overlap, hence need system_unbound_wq to 1749 * make sure work items don't artifically stall on each another. 1750 */ 1751 1752 drm_atomic_state_get(state); 1753 if (nonblock) 1754 queue_work(system_unbound_wq, &state->commit_work); 1755 else 1756 commit_tail(state); 1757 1758 return 0; 1759 1760 err: 1761 drm_atomic_helper_cleanup_planes(dev, state); 1762 return ret; 1763 } 1764 EXPORT_SYMBOL(drm_atomic_helper_commit); 1765 1766 /** 1767 * DOC: implementing nonblocking commit 1768 * 1769 * Nonblocking atomic commits have to be implemented in the following sequence: 1770 * 1771 * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function 1772 * which commit needs to call which can fail, so we want to run it first and 1773 * synchronously. 1774 * 1775 * 2. Synchronize with any outstanding nonblocking commit worker threads which 1776 * might be affected the new state update. This can be done by either cancelling 1777 * or flushing the work items, depending upon whether the driver can deal with 1778 * cancelled updates. Note that it is important to ensure that the framebuffer 1779 * cleanup is still done when cancelling. 1780 * 1781 * Asynchronous workers need to have sufficient parallelism to be able to run 1782 * different atomic commits on different CRTCs in parallel. The simplest way to 1783 * achive this is by running them on the &system_unbound_wq work queue. Note 1784 * that drivers are not required to split up atomic commits and run an 1785 * individual commit in parallel - userspace is supposed to do that if it cares. 1786 * But it might be beneficial to do that for modesets, since those necessarily 1787 * must be done as one global operation, and enabling or disabling a CRTC can 1788 * take a long time. But even that is not required. 1789 * 1790 * 3. The software state is updated synchronously with 1791 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset 1792 * locks means concurrent callers never see inconsistent state. And doing this 1793 * while it's guaranteed that no relevant nonblocking worker runs means that 1794 * nonblocking workers do not need grab any locks. Actually they must not grab 1795 * locks, for otherwise the work flushing will deadlock. 1796 * 1797 * 4. Schedule a work item to do all subsequent steps, using the split-out 1798 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and 1799 * then cleaning up the framebuffers after the old framebuffer is no longer 1800 * being displayed. 1801 * 1802 * The above scheme is implemented in the atomic helper libraries in 1803 * drm_atomic_helper_commit() using a bunch of helper functions. See 1804 * drm_atomic_helper_setup_commit() for a starting point. 1805 */ 1806 1807 static int stall_checks(struct drm_crtc *crtc, bool nonblock) 1808 { 1809 struct drm_crtc_commit *commit, *stall_commit = NULL; 1810 bool completed = true; 1811 int i; 1812 long ret = 0; 1813 1814 spin_lock(&crtc->commit_lock); 1815 i = 0; 1816 list_for_each_entry(commit, &crtc->commit_list, commit_entry) { 1817 if (i == 0) { 1818 completed = try_wait_for_completion(&commit->flip_done); 1819 /* Userspace is not allowed to get ahead of the previous 1820 * commit with nonblocking ones. */ 1821 if (!completed && nonblock) { 1822 spin_unlock(&crtc->commit_lock); 1823 return -EBUSY; 1824 } 1825 } else if (i == 1) { 1826 stall_commit = drm_crtc_commit_get(commit); 1827 break; 1828 } 1829 1830 i++; 1831 } 1832 spin_unlock(&crtc->commit_lock); 1833 1834 if (!stall_commit) 1835 return 0; 1836 1837 /* We don't want to let commits get ahead of cleanup work too much, 1838 * stalling on 2nd previous commit means triple-buffer won't ever stall. 1839 */ 1840 ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done, 1841 10*HZ); 1842 if (ret == 0) 1843 DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n", 1844 crtc->base.id, crtc->name); 1845 1846 drm_crtc_commit_put(stall_commit); 1847 1848 return ret < 0 ? ret : 0; 1849 } 1850 1851 static void release_crtc_commit(struct completion *completion) 1852 { 1853 struct drm_crtc_commit *commit = container_of(completion, 1854 typeof(*commit), 1855 flip_done); 1856 1857 drm_crtc_commit_put(commit); 1858 } 1859 1860 static void init_commit(struct drm_crtc_commit *commit, struct drm_crtc *crtc) 1861 { 1862 init_completion(&commit->flip_done); 1863 init_completion(&commit->hw_done); 1864 init_completion(&commit->cleanup_done); 1865 INIT_LIST_HEAD(&commit->commit_entry); 1866 kref_init(&commit->ref); 1867 commit->crtc = crtc; 1868 } 1869 1870 static struct drm_crtc_commit * 1871 crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc) 1872 { 1873 if (crtc) { 1874 struct drm_crtc_state *new_crtc_state; 1875 1876 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 1877 1878 return new_crtc_state->commit; 1879 } 1880 1881 if (!state->fake_commit) { 1882 state->fake_commit = kzalloc(sizeof(*state->fake_commit), GFP_KERNEL); 1883 if (!state->fake_commit) 1884 return NULL; 1885 1886 init_commit(state->fake_commit, NULL); 1887 } 1888 1889 return state->fake_commit; 1890 } 1891 1892 /** 1893 * drm_atomic_helper_setup_commit - setup possibly nonblocking commit 1894 * @state: new modeset state to be committed 1895 * @nonblock: whether nonblocking behavior is requested. 1896 * 1897 * This function prepares @state to be used by the atomic helper's support for 1898 * nonblocking commits. Drivers using the nonblocking commit infrastructure 1899 * should always call this function from their 1900 * &drm_mode_config_funcs.atomic_commit hook. 1901 * 1902 * To be able to use this support drivers need to use a few more helper 1903 * functions. drm_atomic_helper_wait_for_dependencies() must be called before 1904 * actually committing the hardware state, and for nonblocking commits this call 1905 * must be placed in the async worker. See also drm_atomic_helper_swap_state() 1906 * and it's stall parameter, for when a driver's commit hooks look at the 1907 * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly. 1908 * 1909 * Completion of the hardware commit step must be signalled using 1910 * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed 1911 * to read or change any permanent software or hardware modeset state. The only 1912 * exception is state protected by other means than &drm_modeset_lock locks. 1913 * Only the free standing @state with pointers to the old state structures can 1914 * be inspected, e.g. to clean up old buffers using 1915 * drm_atomic_helper_cleanup_planes(). 1916 * 1917 * At the very end, before cleaning up @state drivers must call 1918 * drm_atomic_helper_commit_cleanup_done(). 1919 * 1920 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a 1921 * complete and easy-to-use default implementation of the atomic_commit() hook. 1922 * 1923 * The tracking of asynchronously executed and still pending commits is done 1924 * using the core structure &drm_crtc_commit. 1925 * 1926 * By default there's no need to clean up resources allocated by this function 1927 * explicitly: drm_atomic_state_default_clear() will take care of that 1928 * automatically. 1929 * 1930 * Returns: 1931 * 1932 * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast, 1933 * -ENOMEM on allocation failures and -EINTR when a signal is pending. 1934 */ 1935 int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, 1936 bool nonblock) 1937 { 1938 struct drm_crtc *crtc; 1939 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1940 struct drm_connector *conn; 1941 struct drm_connector_state *old_conn_state, *new_conn_state; 1942 struct drm_plane *plane; 1943 struct drm_plane_state *old_plane_state, *new_plane_state; 1944 struct drm_crtc_commit *commit; 1945 int i, ret; 1946 1947 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1948 commit = kzalloc(sizeof(*commit), GFP_KERNEL); 1949 if (!commit) 1950 return -ENOMEM; 1951 1952 init_commit(commit, crtc); 1953 1954 new_crtc_state->commit = commit; 1955 1956 ret = stall_checks(crtc, nonblock); 1957 if (ret) 1958 return ret; 1959 1960 /* Drivers only send out events when at least either current or 1961 * new CRTC state is active. Complete right away if everything 1962 * stays off. */ 1963 if (!old_crtc_state->active && !new_crtc_state->active) { 1964 complete_all(&commit->flip_done); 1965 continue; 1966 } 1967 1968 /* Legacy cursor updates are fully unsynced. */ 1969 if (state->legacy_cursor_update) { 1970 complete_all(&commit->flip_done); 1971 continue; 1972 } 1973 1974 if (!new_crtc_state->event) { 1975 commit->event = kzalloc(sizeof(*commit->event), 1976 GFP_KERNEL); 1977 if (!commit->event) 1978 return -ENOMEM; 1979 1980 new_crtc_state->event = commit->event; 1981 } 1982 1983 new_crtc_state->event->base.completion = &commit->flip_done; 1984 new_crtc_state->event->base.completion_release = release_crtc_commit; 1985 drm_crtc_commit_get(commit); 1986 1987 commit->abort_completion = true; 1988 1989 state->crtcs[i].commit = commit; 1990 drm_crtc_commit_get(commit); 1991 } 1992 1993 for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) { 1994 /* Userspace is not allowed to get ahead of the previous 1995 * commit with nonblocking ones. */ 1996 if (nonblock && old_conn_state->commit && 1997 !try_wait_for_completion(&old_conn_state->commit->flip_done)) 1998 return -EBUSY; 1999 2000 /* Always track connectors explicitly for e.g. link retraining. */ 2001 commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc); 2002 if (!commit) 2003 return -ENOMEM; 2004 2005 new_conn_state->commit = drm_crtc_commit_get(commit); 2006 } 2007 2008 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 2009 /* Userspace is not allowed to get ahead of the previous 2010 * commit with nonblocking ones. */ 2011 if (nonblock && old_plane_state->commit && 2012 !try_wait_for_completion(&old_plane_state->commit->flip_done)) 2013 return -EBUSY; 2014 2015 /* Always track planes explicitly for async pageflip support. */ 2016 commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc); 2017 if (!commit) 2018 return -ENOMEM; 2019 2020 new_plane_state->commit = drm_crtc_commit_get(commit); 2021 } 2022 2023 return 0; 2024 } 2025 EXPORT_SYMBOL(drm_atomic_helper_setup_commit); 2026 2027 /** 2028 * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits 2029 * @old_state: atomic state object with old state structures 2030 * 2031 * This function waits for all preceeding commits that touch the same CRTC as 2032 * @old_state to both be committed to the hardware (as signalled by 2033 * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled 2034 * by calling drm_crtc_send_vblank_event() on the &drm_crtc_state.event). 2035 * 2036 * This is part of the atomic helper support for nonblocking commits, see 2037 * drm_atomic_helper_setup_commit() for an overview. 2038 */ 2039 void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state) 2040 { 2041 struct drm_crtc *crtc; 2042 struct drm_crtc_state *old_crtc_state; 2043 struct drm_plane *plane; 2044 struct drm_plane_state *old_plane_state; 2045 struct drm_connector *conn; 2046 struct drm_connector_state *old_conn_state; 2047 struct drm_crtc_commit *commit; 2048 int i; 2049 long ret; 2050 2051 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { 2052 commit = old_crtc_state->commit; 2053 2054 if (!commit) 2055 continue; 2056 2057 ret = wait_for_completion_timeout(&commit->hw_done, 2058 10*HZ); 2059 if (ret == 0) 2060 DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n", 2061 crtc->base.id, crtc->name); 2062 2063 /* Currently no support for overwriting flips, hence 2064 * stall for previous one to execute completely. */ 2065 ret = wait_for_completion_timeout(&commit->flip_done, 2066 10*HZ); 2067 if (ret == 0) 2068 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", 2069 crtc->base.id, crtc->name); 2070 } 2071 2072 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { 2073 commit = old_conn_state->commit; 2074 2075 if (!commit) 2076 continue; 2077 2078 ret = wait_for_completion_timeout(&commit->hw_done, 2079 10*HZ); 2080 if (ret == 0) 2081 DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n", 2082 conn->base.id, conn->name); 2083 2084 /* Currently no support for overwriting flips, hence 2085 * stall for previous one to execute completely. */ 2086 ret = wait_for_completion_timeout(&commit->flip_done, 2087 10*HZ); 2088 if (ret == 0) 2089 DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n", 2090 conn->base.id, conn->name); 2091 } 2092 2093 for_each_old_plane_in_state(old_state, plane, old_plane_state, i) { 2094 commit = old_plane_state->commit; 2095 2096 if (!commit) 2097 continue; 2098 2099 ret = wait_for_completion_timeout(&commit->hw_done, 2100 10*HZ); 2101 if (ret == 0) 2102 DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n", 2103 plane->base.id, plane->name); 2104 2105 /* Currently no support for overwriting flips, hence 2106 * stall for previous one to execute completely. */ 2107 ret = wait_for_completion_timeout(&commit->flip_done, 2108 10*HZ); 2109 if (ret == 0) 2110 DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n", 2111 plane->base.id, plane->name); 2112 } 2113 } 2114 EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies); 2115 2116 /** 2117 * drm_atomic_helper_fake_vblank - fake VBLANK events if needed 2118 * @old_state: atomic state object with old state structures 2119 * 2120 * This function walks all CRTCs and fake VBLANK events on those with 2121 * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL. 2122 * The primary use of this function is writeback connectors working in oneshot 2123 * mode and faking VBLANK events. In this case they only fake the VBLANK event 2124 * when a job is queued, and any change to the pipeline that does not touch the 2125 * connector is leading to timeouts when calling 2126 * drm_atomic_helper_wait_for_vblanks() or 2127 * drm_atomic_helper_wait_for_flip_done(). 2128 * 2129 * This is part of the atomic helper support for nonblocking commits, see 2130 * drm_atomic_helper_setup_commit() for an overview. 2131 */ 2132 void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state) 2133 { 2134 struct drm_crtc_state *new_crtc_state; 2135 struct drm_crtc *crtc; 2136 int i; 2137 2138 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { 2139 unsigned long flags; 2140 2141 if (!new_crtc_state->no_vblank) 2142 continue; 2143 2144 spin_lock_irqsave(&old_state->dev->event_lock, flags); 2145 if (new_crtc_state->event) { 2146 drm_crtc_send_vblank_event(crtc, 2147 new_crtc_state->event); 2148 new_crtc_state->event = NULL; 2149 } 2150 spin_unlock_irqrestore(&old_state->dev->event_lock, flags); 2151 } 2152 } 2153 EXPORT_SYMBOL(drm_atomic_helper_fake_vblank); 2154 2155 /** 2156 * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit 2157 * @old_state: atomic state object with old state structures 2158 * 2159 * This function is used to signal completion of the hardware commit step. After 2160 * this step the driver is not allowed to read or change any permanent software 2161 * or hardware modeset state. The only exception is state protected by other 2162 * means than &drm_modeset_lock locks. 2163 * 2164 * Drivers should try to postpone any expensive or delayed cleanup work after 2165 * this function is called. 2166 * 2167 * This is part of the atomic helper support for nonblocking commits, see 2168 * drm_atomic_helper_setup_commit() for an overview. 2169 */ 2170 void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state) 2171 { 2172 struct drm_crtc *crtc; 2173 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 2174 struct drm_crtc_commit *commit; 2175 int i; 2176 2177 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { 2178 commit = new_crtc_state->commit; 2179 if (!commit) 2180 continue; 2181 2182 /* 2183 * copy new_crtc_state->commit to old_crtc_state->commit, 2184 * it's unsafe to touch new_crtc_state after hw_done, 2185 * but we still need to do so in cleanup_done(). 2186 */ 2187 if (old_crtc_state->commit) 2188 drm_crtc_commit_put(old_crtc_state->commit); 2189 2190 old_crtc_state->commit = drm_crtc_commit_get(commit); 2191 2192 /* backend must have consumed any event by now */ 2193 WARN_ON(new_crtc_state->event); 2194 complete_all(&commit->hw_done); 2195 } 2196 2197 if (old_state->fake_commit) { 2198 complete_all(&old_state->fake_commit->hw_done); 2199 complete_all(&old_state->fake_commit->flip_done); 2200 } 2201 } 2202 EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done); 2203 2204 /** 2205 * drm_atomic_helper_commit_cleanup_done - signal completion of commit 2206 * @old_state: atomic state object with old state structures 2207 * 2208 * This signals completion of the atomic update @old_state, including any 2209 * cleanup work. If used, it must be called right before calling 2210 * drm_atomic_state_put(). 2211 * 2212 * This is part of the atomic helper support for nonblocking commits, see 2213 * drm_atomic_helper_setup_commit() for an overview. 2214 */ 2215 void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state) 2216 { 2217 struct drm_crtc *crtc; 2218 struct drm_crtc_state *old_crtc_state; 2219 struct drm_crtc_commit *commit; 2220 int i; 2221 2222 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { 2223 commit = old_crtc_state->commit; 2224 if (WARN_ON(!commit)) 2225 continue; 2226 2227 complete_all(&commit->cleanup_done); 2228 WARN_ON(!try_wait_for_completion(&commit->hw_done)); 2229 2230 spin_lock(&crtc->commit_lock); 2231 list_del(&commit->commit_entry); 2232 spin_unlock(&crtc->commit_lock); 2233 } 2234 2235 if (old_state->fake_commit) { 2236 complete_all(&old_state->fake_commit->cleanup_done); 2237 WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done)); 2238 } 2239 } 2240 EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done); 2241 2242 /** 2243 * drm_atomic_helper_prepare_planes - prepare plane resources before commit 2244 * @dev: DRM device 2245 * @state: atomic state object with new state structures 2246 * 2247 * This function prepares plane state, specifically framebuffers, for the new 2248 * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure 2249 * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on 2250 * any already successfully prepared framebuffer. 2251 * 2252 * Returns: 2253 * 0 on success, negative error code on failure. 2254 */ 2255 int drm_atomic_helper_prepare_planes(struct drm_device *dev, 2256 struct drm_atomic_state *state) 2257 { 2258 struct drm_plane *plane; 2259 struct drm_plane_state *new_plane_state; 2260 int ret, i, j; 2261 2262 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 2263 const struct drm_plane_helper_funcs *funcs; 2264 2265 funcs = plane->helper_private; 2266 2267 if (funcs->prepare_fb) { 2268 ret = funcs->prepare_fb(plane, new_plane_state); 2269 if (ret) 2270 goto fail; 2271 } 2272 } 2273 2274 return 0; 2275 2276 fail: 2277 for_each_new_plane_in_state(state, plane, new_plane_state, j) { 2278 const struct drm_plane_helper_funcs *funcs; 2279 2280 if (j >= i) 2281 continue; 2282 2283 funcs = plane->helper_private; 2284 2285 if (funcs->cleanup_fb) 2286 funcs->cleanup_fb(plane, new_plane_state); 2287 } 2288 2289 return ret; 2290 } 2291 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes); 2292 2293 static bool plane_crtc_active(const struct drm_plane_state *state) 2294 { 2295 return state->crtc && state->crtc->state->active; 2296 } 2297 2298 /** 2299 * drm_atomic_helper_commit_planes - commit plane state 2300 * @dev: DRM device 2301 * @old_state: atomic state object with old state structures 2302 * @flags: flags for committing plane state 2303 * 2304 * This function commits the new plane state using the plane and atomic helper 2305 * functions for planes and crtcs. It assumes that the atomic state has already 2306 * been pushed into the relevant object state pointers, since this step can no 2307 * longer fail. 2308 * 2309 * It still requires the global state object @old_state to know which planes and 2310 * crtcs need to be updated though. 2311 * 2312 * Note that this function does all plane updates across all CRTCs in one step. 2313 * If the hardware can't support this approach look at 2314 * drm_atomic_helper_commit_planes_on_crtc() instead. 2315 * 2316 * Plane parameters can be updated by applications while the associated CRTC is 2317 * disabled. The DRM/KMS core will store the parameters in the plane state, 2318 * which will be available to the driver when the CRTC is turned on. As a result 2319 * most drivers don't need to be immediately notified of plane updates for a 2320 * disabled CRTC. 2321 * 2322 * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in 2323 * @flags in order not to receive plane update notifications related to a 2324 * disabled CRTC. This avoids the need to manually ignore plane updates in 2325 * driver code when the driver and/or hardware can't or just don't need to deal 2326 * with updates on disabled CRTCs, for example when supporting runtime PM. 2327 * 2328 * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant 2329 * display controllers require to disable a CRTC's planes when the CRTC is 2330 * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable 2331 * call for a plane if the CRTC of the old plane state needs a modesetting 2332 * operation. Of course, the drivers need to disable the planes in their CRTC 2333 * disable callbacks since no one else would do that. 2334 * 2335 * The drm_atomic_helper_commit() default implementation doesn't set the 2336 * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers. 2337 * This should not be copied blindly by drivers. 2338 */ 2339 void drm_atomic_helper_commit_planes(struct drm_device *dev, 2340 struct drm_atomic_state *old_state, 2341 uint32_t flags) 2342 { 2343 struct drm_crtc *crtc; 2344 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 2345 struct drm_plane *plane; 2346 struct drm_plane_state *old_plane_state, *new_plane_state; 2347 int i; 2348 bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY; 2349 bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET; 2350 2351 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { 2352 const struct drm_crtc_helper_funcs *funcs; 2353 2354 funcs = crtc->helper_private; 2355 2356 if (!funcs || !funcs->atomic_begin) 2357 continue; 2358 2359 if (active_only && !new_crtc_state->active) 2360 continue; 2361 2362 funcs->atomic_begin(crtc, old_crtc_state); 2363 } 2364 2365 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) { 2366 const struct drm_plane_helper_funcs *funcs; 2367 bool disabling; 2368 2369 funcs = plane->helper_private; 2370 2371 if (!funcs) 2372 continue; 2373 2374 disabling = drm_atomic_plane_disabling(old_plane_state, 2375 new_plane_state); 2376 2377 if (active_only) { 2378 /* 2379 * Skip planes related to inactive CRTCs. If the plane 2380 * is enabled use the state of the current CRTC. If the 2381 * plane is being disabled use the state of the old 2382 * CRTC to avoid skipping planes being disabled on an 2383 * active CRTC. 2384 */ 2385 if (!disabling && !plane_crtc_active(new_plane_state)) 2386 continue; 2387 if (disabling && !plane_crtc_active(old_plane_state)) 2388 continue; 2389 } 2390 2391 /* 2392 * Special-case disabling the plane if drivers support it. 2393 */ 2394 if (disabling && funcs->atomic_disable) { 2395 struct drm_crtc_state *crtc_state; 2396 2397 crtc_state = old_plane_state->crtc->state; 2398 2399 if (drm_atomic_crtc_needs_modeset(crtc_state) && 2400 no_disable) 2401 continue; 2402 2403 funcs->atomic_disable(plane, old_plane_state); 2404 } else if (new_plane_state->crtc || disabling) { 2405 funcs->atomic_update(plane, old_plane_state); 2406 } 2407 } 2408 2409 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { 2410 const struct drm_crtc_helper_funcs *funcs; 2411 2412 funcs = crtc->helper_private; 2413 2414 if (!funcs || !funcs->atomic_flush) 2415 continue; 2416 2417 if (active_only && !new_crtc_state->active) 2418 continue; 2419 2420 funcs->atomic_flush(crtc, old_crtc_state); 2421 } 2422 } 2423 EXPORT_SYMBOL(drm_atomic_helper_commit_planes); 2424 2425 /** 2426 * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a crtc 2427 * @old_crtc_state: atomic state object with the old crtc state 2428 * 2429 * This function commits the new plane state using the plane and atomic helper 2430 * functions for planes on the specific crtc. It assumes that the atomic state 2431 * has already been pushed into the relevant object state pointers, since this 2432 * step can no longer fail. 2433 * 2434 * This function is useful when plane updates should be done crtc-by-crtc 2435 * instead of one global step like drm_atomic_helper_commit_planes() does. 2436 * 2437 * This function can only be savely used when planes are not allowed to move 2438 * between different CRTCs because this function doesn't handle inter-CRTC 2439 * depencies. Callers need to ensure that either no such depencies exist, 2440 * resolve them through ordering of commit calls or through some other means. 2441 */ 2442 void 2443 drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) 2444 { 2445 const struct drm_crtc_helper_funcs *crtc_funcs; 2446 struct drm_crtc *crtc = old_crtc_state->crtc; 2447 struct drm_atomic_state *old_state = old_crtc_state->state; 2448 struct drm_crtc_state *new_crtc_state = 2449 drm_atomic_get_new_crtc_state(old_state, crtc); 2450 struct drm_plane *plane; 2451 unsigned plane_mask; 2452 2453 plane_mask = old_crtc_state->plane_mask; 2454 plane_mask |= new_crtc_state->plane_mask; 2455 2456 crtc_funcs = crtc->helper_private; 2457 if (crtc_funcs && crtc_funcs->atomic_begin) 2458 crtc_funcs->atomic_begin(crtc, old_crtc_state); 2459 2460 drm_for_each_plane_mask(plane, crtc->dev, plane_mask) { 2461 struct drm_plane_state *old_plane_state = 2462 drm_atomic_get_old_plane_state(old_state, plane); 2463 struct drm_plane_state *new_plane_state = 2464 drm_atomic_get_new_plane_state(old_state, plane); 2465 const struct drm_plane_helper_funcs *plane_funcs; 2466 2467 plane_funcs = plane->helper_private; 2468 2469 if (!old_plane_state || !plane_funcs) 2470 continue; 2471 2472 WARN_ON(new_plane_state->crtc && 2473 new_plane_state->crtc != crtc); 2474 2475 if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) && 2476 plane_funcs->atomic_disable) 2477 plane_funcs->atomic_disable(plane, old_plane_state); 2478 else if (new_plane_state->crtc || 2479 drm_atomic_plane_disabling(old_plane_state, new_plane_state)) 2480 plane_funcs->atomic_update(plane, old_plane_state); 2481 } 2482 2483 if (crtc_funcs && crtc_funcs->atomic_flush) 2484 crtc_funcs->atomic_flush(crtc, old_crtc_state); 2485 } 2486 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); 2487 2488 /** 2489 * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes 2490 * @old_crtc_state: atomic state object with the old CRTC state 2491 * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks 2492 * 2493 * Disables all planes associated with the given CRTC. This can be 2494 * used for instance in the CRTC helper atomic_disable callback to disable 2495 * all planes. 2496 * 2497 * If the atomic-parameter is set the function calls the CRTC's 2498 * atomic_begin hook before and atomic_flush hook after disabling the 2499 * planes. 2500 * 2501 * It is a bug to call this function without having implemented the 2502 * &drm_plane_helper_funcs.atomic_disable plane hook. 2503 */ 2504 void 2505 drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, 2506 bool atomic) 2507 { 2508 struct drm_crtc *crtc = old_crtc_state->crtc; 2509 const struct drm_crtc_helper_funcs *crtc_funcs = 2510 crtc->helper_private; 2511 struct drm_plane *plane; 2512 2513 if (atomic && crtc_funcs && crtc_funcs->atomic_begin) 2514 crtc_funcs->atomic_begin(crtc, NULL); 2515 2516 drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) { 2517 const struct drm_plane_helper_funcs *plane_funcs = 2518 plane->helper_private; 2519 2520 if (!plane_funcs) 2521 continue; 2522 2523 WARN_ON(!plane_funcs->atomic_disable); 2524 if (plane_funcs->atomic_disable) 2525 plane_funcs->atomic_disable(plane, NULL); 2526 } 2527 2528 if (atomic && crtc_funcs && crtc_funcs->atomic_flush) 2529 crtc_funcs->atomic_flush(crtc, NULL); 2530 } 2531 EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc); 2532 2533 /** 2534 * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit 2535 * @dev: DRM device 2536 * @old_state: atomic state object with old state structures 2537 * 2538 * This function cleans up plane state, specifically framebuffers, from the old 2539 * configuration. Hence the old configuration must be perserved in @old_state to 2540 * be able to call this function. 2541 * 2542 * This function must also be called on the new state when the atomic update 2543 * fails at any point after calling drm_atomic_helper_prepare_planes(). 2544 */ 2545 void drm_atomic_helper_cleanup_planes(struct drm_device *dev, 2546 struct drm_atomic_state *old_state) 2547 { 2548 struct drm_plane *plane; 2549 struct drm_plane_state *old_plane_state, *new_plane_state; 2550 int i; 2551 2552 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) { 2553 const struct drm_plane_helper_funcs *funcs; 2554 struct drm_plane_state *plane_state; 2555 2556 /* 2557 * This might be called before swapping when commit is aborted, 2558 * in which case we have to cleanup the new state. 2559 */ 2560 if (old_plane_state == plane->state) 2561 plane_state = new_plane_state; 2562 else 2563 plane_state = old_plane_state; 2564 2565 funcs = plane->helper_private; 2566 2567 if (funcs->cleanup_fb) 2568 funcs->cleanup_fb(plane, plane_state); 2569 } 2570 } 2571 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); 2572 2573 /** 2574 * drm_atomic_helper_swap_state - store atomic state into current sw state 2575 * @state: atomic state 2576 * @stall: stall for preceeding commits 2577 * 2578 * This function stores the atomic state into the current state pointers in all 2579 * driver objects. It should be called after all failing steps have been done 2580 * and succeeded, but before the actual hardware state is committed. 2581 * 2582 * For cleanup and error recovery the current state for all changed objects will 2583 * be swapped into @state. 2584 * 2585 * With that sequence it fits perfectly into the plane prepare/cleanup sequence: 2586 * 2587 * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state. 2588 * 2589 * 2. Do any other steps that might fail. 2590 * 2591 * 3. Put the staged state into the current state pointers with this function. 2592 * 2593 * 4. Actually commit the hardware state. 2594 * 2595 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3 2596 * contains the old state. Also do any other cleanup required with that state. 2597 * 2598 * @stall must be set when nonblocking commits for this driver directly access 2599 * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With 2600 * the current atomic helpers this is almost always the case, since the helpers 2601 * don't pass the right state structures to the callbacks. 2602 * 2603 * Returns: 2604 * 2605 * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the 2606 * waiting for the previous commits has been interrupted. 2607 */ 2608 int drm_atomic_helper_swap_state(struct drm_atomic_state *state, 2609 bool stall) 2610 { 2611 int i, ret; 2612 struct drm_connector *connector; 2613 struct drm_connector_state *old_conn_state, *new_conn_state; 2614 struct drm_crtc *crtc; 2615 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 2616 struct drm_plane *plane; 2617 struct drm_plane_state *old_plane_state, *new_plane_state; 2618 struct drm_crtc_commit *commit; 2619 struct drm_private_obj *obj; 2620 struct drm_private_state *old_obj_state, *new_obj_state; 2621 2622 if (stall) { 2623 /* 2624 * We have to stall for hw_done here before 2625 * drm_atomic_helper_wait_for_dependencies() because flip 2626 * depth > 1 is not yet supported by all drivers. As long as 2627 * obj->state is directly dereferenced anywhere in the drivers 2628 * atomic_commit_tail function, then it's unsafe to swap state 2629 * before drm_atomic_helper_commit_hw_done() is called. 2630 */ 2631 2632 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { 2633 commit = old_crtc_state->commit; 2634 2635 if (!commit) 2636 continue; 2637 2638 ret = wait_for_completion_interruptible(&commit->hw_done); 2639 if (ret) 2640 return ret; 2641 } 2642 2643 for_each_old_connector_in_state(state, connector, old_conn_state, i) { 2644 commit = old_conn_state->commit; 2645 2646 if (!commit) 2647 continue; 2648 2649 ret = wait_for_completion_interruptible(&commit->hw_done); 2650 if (ret) 2651 return ret; 2652 } 2653 2654 for_each_old_plane_in_state(state, plane, old_plane_state, i) { 2655 commit = old_plane_state->commit; 2656 2657 if (!commit) 2658 continue; 2659 2660 ret = wait_for_completion_interruptible(&commit->hw_done); 2661 if (ret) 2662 return ret; 2663 } 2664 } 2665 2666 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) { 2667 WARN_ON(connector->state != old_conn_state); 2668 2669 old_conn_state->state = state; 2670 new_conn_state->state = NULL; 2671 2672 state->connectors[i].state = old_conn_state; 2673 connector->state = new_conn_state; 2674 } 2675 2676 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 2677 WARN_ON(crtc->state != old_crtc_state); 2678 2679 old_crtc_state->state = state; 2680 new_crtc_state->state = NULL; 2681 2682 state->crtcs[i].state = old_crtc_state; 2683 crtc->state = new_crtc_state; 2684 2685 if (new_crtc_state->commit) { 2686 spin_lock(&crtc->commit_lock); 2687 list_add(&new_crtc_state->commit->commit_entry, 2688 &crtc->commit_list); 2689 spin_unlock(&crtc->commit_lock); 2690 2691 new_crtc_state->commit->event = NULL; 2692 } 2693 } 2694 2695 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 2696 WARN_ON(plane->state != old_plane_state); 2697 2698 old_plane_state->state = state; 2699 new_plane_state->state = NULL; 2700 2701 state->planes[i].state = old_plane_state; 2702 plane->state = new_plane_state; 2703 } 2704 2705 for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) { 2706 WARN_ON(obj->state != old_obj_state); 2707 2708 old_obj_state->state = state; 2709 new_obj_state->state = NULL; 2710 2711 state->private_objs[i].state = old_obj_state; 2712 obj->state = new_obj_state; 2713 } 2714 2715 return 0; 2716 } 2717 EXPORT_SYMBOL(drm_atomic_helper_swap_state); 2718 2719 /** 2720 * drm_atomic_helper_update_plane - Helper for primary plane update using atomic 2721 * @plane: plane object to update 2722 * @crtc: owning CRTC of owning plane 2723 * @fb: framebuffer to flip onto plane 2724 * @crtc_x: x offset of primary plane on crtc 2725 * @crtc_y: y offset of primary plane on crtc 2726 * @crtc_w: width of primary plane rectangle on crtc 2727 * @crtc_h: height of primary plane rectangle on crtc 2728 * @src_x: x offset of @fb for panning 2729 * @src_y: y offset of @fb for panning 2730 * @src_w: width of source rectangle in @fb 2731 * @src_h: height of source rectangle in @fb 2732 * @ctx: lock acquire context 2733 * 2734 * Provides a default plane update handler using the atomic driver interface. 2735 * 2736 * RETURNS: 2737 * Zero on success, error code on failure 2738 */ 2739 int drm_atomic_helper_update_plane(struct drm_plane *plane, 2740 struct drm_crtc *crtc, 2741 struct drm_framebuffer *fb, 2742 int crtc_x, int crtc_y, 2743 unsigned int crtc_w, unsigned int crtc_h, 2744 uint32_t src_x, uint32_t src_y, 2745 uint32_t src_w, uint32_t src_h, 2746 struct drm_modeset_acquire_ctx *ctx) 2747 { 2748 struct drm_atomic_state *state; 2749 struct drm_plane_state *plane_state; 2750 int ret = 0; 2751 2752 state = drm_atomic_state_alloc(plane->dev); 2753 if (!state) 2754 return -ENOMEM; 2755 2756 state->acquire_ctx = ctx; 2757 plane_state = drm_atomic_get_plane_state(state, plane); 2758 if (IS_ERR(plane_state)) { 2759 ret = PTR_ERR(plane_state); 2760 goto fail; 2761 } 2762 2763 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 2764 if (ret != 0) 2765 goto fail; 2766 drm_atomic_set_fb_for_plane(plane_state, fb); 2767 plane_state->crtc_x = crtc_x; 2768 plane_state->crtc_y = crtc_y; 2769 plane_state->crtc_w = crtc_w; 2770 plane_state->crtc_h = crtc_h; 2771 plane_state->src_x = src_x; 2772 plane_state->src_y = src_y; 2773 plane_state->src_w = src_w; 2774 plane_state->src_h = src_h; 2775 2776 if (plane == crtc->cursor) 2777 state->legacy_cursor_update = true; 2778 2779 ret = drm_atomic_commit(state); 2780 fail: 2781 drm_atomic_state_put(state); 2782 return ret; 2783 } 2784 EXPORT_SYMBOL(drm_atomic_helper_update_plane); 2785 2786 /** 2787 * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic 2788 * @plane: plane to disable 2789 * @ctx: lock acquire context 2790 * 2791 * Provides a default plane disable handler using the atomic driver interface. 2792 * 2793 * RETURNS: 2794 * Zero on success, error code on failure 2795 */ 2796 int drm_atomic_helper_disable_plane(struct drm_plane *plane, 2797 struct drm_modeset_acquire_ctx *ctx) 2798 { 2799 struct drm_atomic_state *state; 2800 struct drm_plane_state *plane_state; 2801 int ret = 0; 2802 2803 state = drm_atomic_state_alloc(plane->dev); 2804 if (!state) 2805 return -ENOMEM; 2806 2807 state->acquire_ctx = ctx; 2808 plane_state = drm_atomic_get_plane_state(state, plane); 2809 if (IS_ERR(plane_state)) { 2810 ret = PTR_ERR(plane_state); 2811 goto fail; 2812 } 2813 2814 if (plane_state->crtc && plane_state->crtc->cursor == plane) 2815 plane_state->state->legacy_cursor_update = true; 2816 2817 ret = __drm_atomic_helper_disable_plane(plane, plane_state); 2818 if (ret != 0) 2819 goto fail; 2820 2821 ret = drm_atomic_commit(state); 2822 fail: 2823 drm_atomic_state_put(state); 2824 return ret; 2825 } 2826 EXPORT_SYMBOL(drm_atomic_helper_disable_plane); 2827 2828 /* just used from fb-helper and atomic-helper: */ 2829 int __drm_atomic_helper_disable_plane(struct drm_plane *plane, 2830 struct drm_plane_state *plane_state) 2831 { 2832 int ret; 2833 2834 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 2835 if (ret != 0) 2836 return ret; 2837 2838 drm_atomic_set_fb_for_plane(plane_state, NULL); 2839 plane_state->crtc_x = 0; 2840 plane_state->crtc_y = 0; 2841 plane_state->crtc_w = 0; 2842 plane_state->crtc_h = 0; 2843 plane_state->src_x = 0; 2844 plane_state->src_y = 0; 2845 plane_state->src_w = 0; 2846 plane_state->src_h = 0; 2847 2848 return 0; 2849 } 2850 2851 static int update_output_state(struct drm_atomic_state *state, 2852 struct drm_mode_set *set) 2853 { 2854 struct drm_device *dev = set->crtc->dev; 2855 struct drm_crtc *crtc; 2856 struct drm_crtc_state *new_crtc_state; 2857 struct drm_connector *connector; 2858 struct drm_connector_state *new_conn_state; 2859 int ret, i; 2860 2861 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 2862 state->acquire_ctx); 2863 if (ret) 2864 return ret; 2865 2866 /* First disable all connectors on the target crtc. */ 2867 ret = drm_atomic_add_affected_connectors(state, set->crtc); 2868 if (ret) 2869 return ret; 2870 2871 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 2872 if (new_conn_state->crtc == set->crtc) { 2873 ret = drm_atomic_set_crtc_for_connector(new_conn_state, 2874 NULL); 2875 if (ret) 2876 return ret; 2877 2878 /* Make sure legacy setCrtc always re-trains */ 2879 new_conn_state->link_status = DRM_LINK_STATUS_GOOD; 2880 } 2881 } 2882 2883 /* Then set all connectors from set->connectors on the target crtc */ 2884 for (i = 0; i < set->num_connectors; i++) { 2885 new_conn_state = drm_atomic_get_connector_state(state, 2886 set->connectors[i]); 2887 if (IS_ERR(new_conn_state)) 2888 return PTR_ERR(new_conn_state); 2889 2890 ret = drm_atomic_set_crtc_for_connector(new_conn_state, 2891 set->crtc); 2892 if (ret) 2893 return ret; 2894 } 2895 2896 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 2897 /* Don't update ->enable for the CRTC in the set_config request, 2898 * since a mismatch would indicate a bug in the upper layers. 2899 * The actual modeset code later on will catch any 2900 * inconsistencies here. */ 2901 if (crtc == set->crtc) 2902 continue; 2903 2904 if (!new_crtc_state->connector_mask) { 2905 ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state, 2906 NULL); 2907 if (ret < 0) 2908 return ret; 2909 2910 new_crtc_state->active = false; 2911 } 2912 } 2913 2914 return 0; 2915 } 2916 2917 /** 2918 * drm_atomic_helper_set_config - set a new config from userspace 2919 * @set: mode set configuration 2920 * @ctx: lock acquisition context 2921 * 2922 * Provides a default crtc set_config handler using the atomic driver interface. 2923 * 2924 * NOTE: For backwards compatibility with old userspace this automatically 2925 * resets the "link-status" property to GOOD, to force any link 2926 * re-training. The SETCRTC ioctl does not define whether an update does 2927 * need a full modeset or just a plane update, hence we're allowed to do 2928 * that. See also drm_connector_set_link_status_property(). 2929 * 2930 * Returns: 2931 * Returns 0 on success, negative errno numbers on failure. 2932 */ 2933 int drm_atomic_helper_set_config(struct drm_mode_set *set, 2934 struct drm_modeset_acquire_ctx *ctx) 2935 { 2936 struct drm_atomic_state *state; 2937 struct drm_crtc *crtc = set->crtc; 2938 int ret = 0; 2939 2940 state = drm_atomic_state_alloc(crtc->dev); 2941 if (!state) 2942 return -ENOMEM; 2943 2944 state->acquire_ctx = ctx; 2945 ret = __drm_atomic_helper_set_config(set, state); 2946 if (ret != 0) 2947 goto fail; 2948 2949 ret = handle_conflicting_encoders(state, true); 2950 if (ret) 2951 return ret; 2952 2953 ret = drm_atomic_commit(state); 2954 2955 fail: 2956 drm_atomic_state_put(state); 2957 return ret; 2958 } 2959 EXPORT_SYMBOL(drm_atomic_helper_set_config); 2960 2961 /* just used from fb-helper and atomic-helper: */ 2962 int __drm_atomic_helper_set_config(struct drm_mode_set *set, 2963 struct drm_atomic_state *state) 2964 { 2965 struct drm_crtc_state *crtc_state; 2966 struct drm_plane_state *primary_state; 2967 struct drm_crtc *crtc = set->crtc; 2968 int hdisplay, vdisplay; 2969 int ret; 2970 2971 crtc_state = drm_atomic_get_crtc_state(state, crtc); 2972 if (IS_ERR(crtc_state)) 2973 return PTR_ERR(crtc_state); 2974 2975 primary_state = drm_atomic_get_plane_state(state, crtc->primary); 2976 if (IS_ERR(primary_state)) 2977 return PTR_ERR(primary_state); 2978 2979 if (!set->mode) { 2980 WARN_ON(set->fb); 2981 WARN_ON(set->num_connectors); 2982 2983 ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL); 2984 if (ret != 0) 2985 return ret; 2986 2987 crtc_state->active = false; 2988 2989 ret = drm_atomic_set_crtc_for_plane(primary_state, NULL); 2990 if (ret != 0) 2991 return ret; 2992 2993 drm_atomic_set_fb_for_plane(primary_state, NULL); 2994 2995 goto commit; 2996 } 2997 2998 WARN_ON(!set->fb); 2999 WARN_ON(!set->num_connectors); 3000 3001 ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode); 3002 if (ret != 0) 3003 return ret; 3004 3005 crtc_state->active = true; 3006 3007 ret = drm_atomic_set_crtc_for_plane(primary_state, crtc); 3008 if (ret != 0) 3009 return ret; 3010 3011 drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay); 3012 3013 drm_atomic_set_fb_for_plane(primary_state, set->fb); 3014 primary_state->crtc_x = 0; 3015 primary_state->crtc_y = 0; 3016 primary_state->crtc_w = hdisplay; 3017 primary_state->crtc_h = vdisplay; 3018 primary_state->src_x = set->x << 16; 3019 primary_state->src_y = set->y << 16; 3020 if (drm_rotation_90_or_270(primary_state->rotation)) { 3021 primary_state->src_w = vdisplay << 16; 3022 primary_state->src_h = hdisplay << 16; 3023 } else { 3024 primary_state->src_w = hdisplay << 16; 3025 primary_state->src_h = vdisplay << 16; 3026 } 3027 3028 commit: 3029 ret = update_output_state(state, set); 3030 if (ret) 3031 return ret; 3032 3033 return 0; 3034 } 3035 3036 static int __drm_atomic_helper_disable_all(struct drm_device *dev, 3037 struct drm_modeset_acquire_ctx *ctx, 3038 bool clean_old_fbs) 3039 { 3040 struct drm_atomic_state *state; 3041 struct drm_connector_state *conn_state; 3042 struct drm_connector *conn; 3043 struct drm_plane_state *plane_state; 3044 struct drm_plane *plane; 3045 struct drm_crtc_state *crtc_state; 3046 struct drm_crtc *crtc; 3047 int ret, i; 3048 3049 state = drm_atomic_state_alloc(dev); 3050 if (!state) 3051 return -ENOMEM; 3052 3053 state->acquire_ctx = ctx; 3054 3055 drm_for_each_crtc(crtc, dev) { 3056 crtc_state = drm_atomic_get_crtc_state(state, crtc); 3057 if (IS_ERR(crtc_state)) { 3058 ret = PTR_ERR(crtc_state); 3059 goto free; 3060 } 3061 3062 crtc_state->active = false; 3063 3064 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL); 3065 if (ret < 0) 3066 goto free; 3067 3068 ret = drm_atomic_add_affected_planes(state, crtc); 3069 if (ret < 0) 3070 goto free; 3071 3072 ret = drm_atomic_add_affected_connectors(state, crtc); 3073 if (ret < 0) 3074 goto free; 3075 } 3076 3077 for_each_new_connector_in_state(state, conn, conn_state, i) { 3078 ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); 3079 if (ret < 0) 3080 goto free; 3081 } 3082 3083 for_each_new_plane_in_state(state, plane, plane_state, i) { 3084 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 3085 if (ret < 0) 3086 goto free; 3087 3088 drm_atomic_set_fb_for_plane(plane_state, NULL); 3089 } 3090 3091 ret = drm_atomic_commit(state); 3092 free: 3093 drm_atomic_state_put(state); 3094 return ret; 3095 } 3096 3097 /** 3098 * drm_atomic_helper_disable_all - disable all currently active outputs 3099 * @dev: DRM device 3100 * @ctx: lock acquisition context 3101 * 3102 * Loops through all connectors, finding those that aren't turned off and then 3103 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC 3104 * that they are connected to. 3105 * 3106 * This is used for example in suspend/resume to disable all currently active 3107 * functions when suspending. If you just want to shut down everything at e.g. 3108 * driver unload, look at drm_atomic_helper_shutdown(). 3109 * 3110 * Note that if callers haven't already acquired all modeset locks this might 3111 * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). 3112 * 3113 * Returns: 3114 * 0 on success or a negative error code on failure. 3115 * 3116 * See also: 3117 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and 3118 * drm_atomic_helper_shutdown(). 3119 */ 3120 int drm_atomic_helper_disable_all(struct drm_device *dev, 3121 struct drm_modeset_acquire_ctx *ctx) 3122 { 3123 return __drm_atomic_helper_disable_all(dev, ctx, false); 3124 } 3125 EXPORT_SYMBOL(drm_atomic_helper_disable_all); 3126 3127 /** 3128 * drm_atomic_helper_shutdown - shutdown all CRTC 3129 * @dev: DRM device 3130 * 3131 * This shuts down all CRTC, which is useful for driver unloading. Shutdown on 3132 * suspend should instead be handled with drm_atomic_helper_suspend(), since 3133 * that also takes a snapshot of the modeset state to be restored on resume. 3134 * 3135 * This is just a convenience wrapper around drm_atomic_helper_disable_all(), 3136 * and it is the atomic version of drm_crtc_force_disable_all(). 3137 */ 3138 void drm_atomic_helper_shutdown(struct drm_device *dev) 3139 { 3140 struct drm_modeset_acquire_ctx ctx; 3141 int ret; 3142 3143 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); 3144 3145 ret = __drm_atomic_helper_disable_all(dev, &ctx, true); 3146 if (ret) 3147 DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); 3148 3149 DRM_MODESET_LOCK_ALL_END(ctx, ret); 3150 } 3151 EXPORT_SYMBOL(drm_atomic_helper_shutdown); 3152 3153 /** 3154 * drm_atomic_helper_duplicate_state - duplicate an atomic state object 3155 * @dev: DRM device 3156 * @ctx: lock acquisition context 3157 * 3158 * Makes a copy of the current atomic state by looping over all objects and 3159 * duplicating their respective states. This is used for example by suspend/ 3160 * resume support code to save the state prior to suspend such that it can 3161 * be restored upon resume. 3162 * 3163 * Note that this treats atomic state as persistent between save and restore. 3164 * Drivers must make sure that this is possible and won't result in confusion 3165 * or erroneous behaviour. 3166 * 3167 * Note that if callers haven't already acquired all modeset locks this might 3168 * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). 3169 * 3170 * Returns: 3171 * A pointer to the copy of the atomic state object on success or an 3172 * ERR_PTR()-encoded error code on failure. 3173 * 3174 * See also: 3175 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() 3176 */ 3177 struct drm_atomic_state * 3178 drm_atomic_helper_duplicate_state(struct drm_device *dev, 3179 struct drm_modeset_acquire_ctx *ctx) 3180 { 3181 struct drm_atomic_state *state; 3182 struct drm_connector *conn; 3183 struct drm_connector_list_iter conn_iter; 3184 struct drm_plane *plane; 3185 struct drm_crtc *crtc; 3186 int err = 0; 3187 3188 state = drm_atomic_state_alloc(dev); 3189 if (!state) 3190 return ERR_PTR(-ENOMEM); 3191 3192 state->acquire_ctx = ctx; 3193 3194 drm_for_each_crtc(crtc, dev) { 3195 struct drm_crtc_state *crtc_state; 3196 3197 crtc_state = drm_atomic_get_crtc_state(state, crtc); 3198 if (IS_ERR(crtc_state)) { 3199 err = PTR_ERR(crtc_state); 3200 goto free; 3201 } 3202 } 3203 3204 drm_for_each_plane(plane, dev) { 3205 struct drm_plane_state *plane_state; 3206 3207 plane_state = drm_atomic_get_plane_state(state, plane); 3208 if (IS_ERR(plane_state)) { 3209 err = PTR_ERR(plane_state); 3210 goto free; 3211 } 3212 } 3213 3214 drm_connector_list_iter_begin(dev, &conn_iter); 3215 drm_for_each_connector_iter(conn, &conn_iter) { 3216 struct drm_connector_state *conn_state; 3217 3218 conn_state = drm_atomic_get_connector_state(state, conn); 3219 if (IS_ERR(conn_state)) { 3220 err = PTR_ERR(conn_state); 3221 drm_connector_list_iter_end(&conn_iter); 3222 goto free; 3223 } 3224 } 3225 drm_connector_list_iter_end(&conn_iter); 3226 3227 /* clear the acquire context so that it isn't accidentally reused */ 3228 state->acquire_ctx = NULL; 3229 3230 free: 3231 if (err < 0) { 3232 drm_atomic_state_put(state); 3233 state = ERR_PTR(err); 3234 } 3235 3236 return state; 3237 } 3238 EXPORT_SYMBOL(drm_atomic_helper_duplicate_state); 3239 3240 /** 3241 * drm_atomic_helper_suspend - subsystem-level suspend helper 3242 * @dev: DRM device 3243 * 3244 * Duplicates the current atomic state, disables all active outputs and then 3245 * returns a pointer to the original atomic state to the caller. Drivers can 3246 * pass this pointer to the drm_atomic_helper_resume() helper upon resume to 3247 * restore the output configuration that was active at the time the system 3248 * entered suspend. 3249 * 3250 * Note that it is potentially unsafe to use this. The atomic state object 3251 * returned by this function is assumed to be persistent. Drivers must ensure 3252 * that this holds true. Before calling this function, drivers must make sure 3253 * to suspend fbdev emulation so that nothing can be using the device. 3254 * 3255 * Returns: 3256 * A pointer to a copy of the state before suspend on success or an ERR_PTR()- 3257 * encoded error code on failure. Drivers should store the returned atomic 3258 * state object and pass it to the drm_atomic_helper_resume() helper upon 3259 * resume. 3260 * 3261 * See also: 3262 * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(), 3263 * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state() 3264 */ 3265 struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev) 3266 { 3267 struct drm_modeset_acquire_ctx ctx; 3268 struct drm_atomic_state *state; 3269 int err; 3270 3271 /* This can never be returned, but it makes the compiler happy */ 3272 state = ERR_PTR(-EINVAL); 3273 3274 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err); 3275 3276 state = drm_atomic_helper_duplicate_state(dev, &ctx); 3277 if (IS_ERR(state)) 3278 goto unlock; 3279 3280 err = drm_atomic_helper_disable_all(dev, &ctx); 3281 if (err < 0) { 3282 drm_atomic_state_put(state); 3283 state = ERR_PTR(err); 3284 goto unlock; 3285 } 3286 3287 unlock: 3288 DRM_MODESET_LOCK_ALL_END(ctx, err); 3289 if (err) 3290 return ERR_PTR(err); 3291 3292 return state; 3293 } 3294 EXPORT_SYMBOL(drm_atomic_helper_suspend); 3295 3296 /** 3297 * drm_atomic_helper_commit_duplicated_state - commit duplicated state 3298 * @state: duplicated atomic state to commit 3299 * @ctx: pointer to acquire_ctx to use for commit. 3300 * 3301 * The state returned by drm_atomic_helper_duplicate_state() and 3302 * drm_atomic_helper_suspend() is partially invalid, and needs to 3303 * be fixed up before commit. 3304 * 3305 * Returns: 3306 * 0 on success or a negative error code on failure. 3307 * 3308 * See also: 3309 * drm_atomic_helper_suspend() 3310 */ 3311 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, 3312 struct drm_modeset_acquire_ctx *ctx) 3313 { 3314 int i, ret; 3315 struct drm_plane *plane; 3316 struct drm_plane_state *new_plane_state; 3317 struct drm_connector *connector; 3318 struct drm_connector_state *new_conn_state; 3319 struct drm_crtc *crtc; 3320 struct drm_crtc_state *new_crtc_state; 3321 3322 state->acquire_ctx = ctx; 3323 3324 for_each_new_plane_in_state(state, plane, new_plane_state, i) 3325 state->planes[i].old_state = plane->state; 3326 3327 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) 3328 state->crtcs[i].old_state = crtc->state; 3329 3330 for_each_new_connector_in_state(state, connector, new_conn_state, i) 3331 state->connectors[i].old_state = connector->state; 3332 3333 ret = drm_atomic_commit(state); 3334 3335 state->acquire_ctx = NULL; 3336 3337 return ret; 3338 } 3339 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state); 3340 3341 /** 3342 * drm_atomic_helper_resume - subsystem-level resume helper 3343 * @dev: DRM device 3344 * @state: atomic state to resume to 3345 * 3346 * Calls drm_mode_config_reset() to synchronize hardware and software states, 3347 * grabs all modeset locks and commits the atomic state object. This can be 3348 * used in conjunction with the drm_atomic_helper_suspend() helper to 3349 * implement suspend/resume for drivers that support atomic mode-setting. 3350 * 3351 * Returns: 3352 * 0 on success or a negative error code on failure. 3353 * 3354 * See also: 3355 * drm_atomic_helper_suspend() 3356 */ 3357 int drm_atomic_helper_resume(struct drm_device *dev, 3358 struct drm_atomic_state *state) 3359 { 3360 struct drm_modeset_acquire_ctx ctx; 3361 int err; 3362 3363 drm_mode_config_reset(dev); 3364 3365 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err); 3366 3367 err = drm_atomic_helper_commit_duplicated_state(state, &ctx); 3368 3369 DRM_MODESET_LOCK_ALL_END(ctx, err); 3370 drm_atomic_state_put(state); 3371 3372 return err; 3373 } 3374 EXPORT_SYMBOL(drm_atomic_helper_resume); 3375 3376 static int page_flip_common(struct drm_atomic_state *state, 3377 struct drm_crtc *crtc, 3378 struct drm_framebuffer *fb, 3379 struct drm_pending_vblank_event *event, 3380 uint32_t flags) 3381 { 3382 struct drm_plane *plane = crtc->primary; 3383 struct drm_plane_state *plane_state; 3384 struct drm_crtc_state *crtc_state; 3385 int ret = 0; 3386 3387 crtc_state = drm_atomic_get_crtc_state(state, crtc); 3388 if (IS_ERR(crtc_state)) 3389 return PTR_ERR(crtc_state); 3390 3391 crtc_state->event = event; 3392 crtc_state->pageflip_flags = flags; 3393 3394 plane_state = drm_atomic_get_plane_state(state, plane); 3395 if (IS_ERR(plane_state)) 3396 return PTR_ERR(plane_state); 3397 3398 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 3399 if (ret != 0) 3400 return ret; 3401 drm_atomic_set_fb_for_plane(plane_state, fb); 3402 3403 /* Make sure we don't accidentally do a full modeset. */ 3404 state->allow_modeset = false; 3405 if (!crtc_state->active) { 3406 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n", 3407 crtc->base.id, crtc->name); 3408 return -EINVAL; 3409 } 3410 3411 return ret; 3412 } 3413 3414 /** 3415 * drm_atomic_helper_page_flip - execute a legacy page flip 3416 * @crtc: DRM crtc 3417 * @fb: DRM framebuffer 3418 * @event: optional DRM event to signal upon completion 3419 * @flags: flip flags for non-vblank sync'ed updates 3420 * @ctx: lock acquisition context 3421 * 3422 * Provides a default &drm_crtc_funcs.page_flip implementation 3423 * using the atomic driver interface. 3424 * 3425 * Returns: 3426 * Returns 0 on success, negative errno numbers on failure. 3427 * 3428 * See also: 3429 * drm_atomic_helper_page_flip_target() 3430 */ 3431 int drm_atomic_helper_page_flip(struct drm_crtc *crtc, 3432 struct drm_framebuffer *fb, 3433 struct drm_pending_vblank_event *event, 3434 uint32_t flags, 3435 struct drm_modeset_acquire_ctx *ctx) 3436 { 3437 struct drm_plane *plane = crtc->primary; 3438 struct drm_atomic_state *state; 3439 int ret = 0; 3440 3441 state = drm_atomic_state_alloc(plane->dev); 3442 if (!state) 3443 return -ENOMEM; 3444 3445 state->acquire_ctx = ctx; 3446 3447 ret = page_flip_common(state, crtc, fb, event, flags); 3448 if (ret != 0) 3449 goto fail; 3450 3451 ret = drm_atomic_nonblocking_commit(state); 3452 fail: 3453 drm_atomic_state_put(state); 3454 return ret; 3455 } 3456 EXPORT_SYMBOL(drm_atomic_helper_page_flip); 3457 3458 /** 3459 * drm_atomic_helper_page_flip_target - do page flip on target vblank period. 3460 * @crtc: DRM crtc 3461 * @fb: DRM framebuffer 3462 * @event: optional DRM event to signal upon completion 3463 * @flags: flip flags for non-vblank sync'ed updates 3464 * @target: specifying the target vblank period when the flip to take effect 3465 * @ctx: lock acquisition context 3466 * 3467 * Provides a default &drm_crtc_funcs.page_flip_target implementation. 3468 * Similar to drm_atomic_helper_page_flip() with extra parameter to specify 3469 * target vblank period to flip. 3470 * 3471 * Returns: 3472 * Returns 0 on success, negative errno numbers on failure. 3473 */ 3474 int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc, 3475 struct drm_framebuffer *fb, 3476 struct drm_pending_vblank_event *event, 3477 uint32_t flags, 3478 uint32_t target, 3479 struct drm_modeset_acquire_ctx *ctx) 3480 { 3481 struct drm_plane *plane = crtc->primary; 3482 struct drm_atomic_state *state; 3483 struct drm_crtc_state *crtc_state; 3484 int ret = 0; 3485 3486 state = drm_atomic_state_alloc(plane->dev); 3487 if (!state) 3488 return -ENOMEM; 3489 3490 state->acquire_ctx = ctx; 3491 3492 ret = page_flip_common(state, crtc, fb, event, flags); 3493 if (ret != 0) 3494 goto fail; 3495 3496 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 3497 if (WARN_ON(!crtc_state)) { 3498 ret = -EINVAL; 3499 goto fail; 3500 } 3501 crtc_state->target_vblank = target; 3502 3503 ret = drm_atomic_nonblocking_commit(state); 3504 fail: 3505 drm_atomic_state_put(state); 3506 return ret; 3507 } 3508 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target); 3509 3510 /** 3511 * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table 3512 * @crtc: CRTC object 3513 * @red: red correction table 3514 * @green: green correction table 3515 * @blue: green correction table 3516 * @size: size of the tables 3517 * @ctx: lock acquire context 3518 * 3519 * Implements support for legacy gamma correction table for drivers 3520 * that support color management through the DEGAMMA_LUT/GAMMA_LUT 3521 * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for 3522 * how the atomic color management and gamma tables work. 3523 */ 3524 int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, 3525 u16 *red, u16 *green, u16 *blue, 3526 uint32_t size, 3527 struct drm_modeset_acquire_ctx *ctx) 3528 { 3529 struct drm_device *dev = crtc->dev; 3530 struct drm_atomic_state *state; 3531 struct drm_crtc_state *crtc_state; 3532 struct drm_property_blob *blob = NULL; 3533 struct drm_color_lut *blob_data; 3534 int i, ret = 0; 3535 bool replaced; 3536 3537 state = drm_atomic_state_alloc(crtc->dev); 3538 if (!state) 3539 return -ENOMEM; 3540 3541 blob = drm_property_create_blob(dev, 3542 sizeof(struct drm_color_lut) * size, 3543 NULL); 3544 if (IS_ERR(blob)) { 3545 ret = PTR_ERR(blob); 3546 blob = NULL; 3547 goto fail; 3548 } 3549 3550 /* Prepare GAMMA_LUT with the legacy values. */ 3551 blob_data = blob->data; 3552 for (i = 0; i < size; i++) { 3553 blob_data[i].red = red[i]; 3554 blob_data[i].green = green[i]; 3555 blob_data[i].blue = blue[i]; 3556 } 3557 3558 state->acquire_ctx = ctx; 3559 crtc_state = drm_atomic_get_crtc_state(state, crtc); 3560 if (IS_ERR(crtc_state)) { 3561 ret = PTR_ERR(crtc_state); 3562 goto fail; 3563 } 3564 3565 /* Reset DEGAMMA_LUT and CTM properties. */ 3566 replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); 3567 replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); 3568 replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob); 3569 crtc_state->color_mgmt_changed |= replaced; 3570 3571 ret = drm_atomic_commit(state); 3572 3573 fail: 3574 drm_atomic_state_put(state); 3575 drm_property_blob_put(blob); 3576 return ret; 3577 } 3578 EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set); 3579