1 /* 2 * Copyright (C) 2014 Red Hat 3 * Copyright (C) 2014 Intel Corp. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robdclark@gmail.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 */ 27 28 #include <drm/drmP.h> 29 #include <drm/drm_atomic.h> 30 #include <drm/drm_plane_helper.h> 31 #include <drm/drm_crtc_helper.h> 32 #include <drm/drm_atomic_helper.h> 33 #include <linux/dma-fence.h> 34 35 #include "drm_crtc_helper_internal.h" 36 #include "drm_crtc_internal.h" 37 38 /** 39 * DOC: overview 40 * 41 * This helper library provides implementations of check and commit functions on 42 * top of the CRTC modeset helper callbacks and the plane helper callbacks. It 43 * also provides convenience implementations for the atomic state handling 44 * callbacks for drivers which don't need to subclass the drm core structures to 45 * add their own additional internal state. 46 * 47 * This library also provides default implementations for the check callback in 48 * drm_atomic_helper_check() and for the commit callback with 49 * drm_atomic_helper_commit(). But the individual stages and callbacks are 50 * exposed to allow drivers to mix and match and e.g. use the plane helpers only 51 * together with a driver private modeset implementation. 52 * 53 * This library also provides implementations for all the legacy driver 54 * interfaces on top of the atomic interface. See drm_atomic_helper_set_config(), 55 * drm_atomic_helper_disable_plane(), drm_atomic_helper_disable_plane() and the 56 * various functions to implement set_property callbacks. New drivers must not 57 * implement these functions themselves but must use the provided helpers. 58 * 59 * The atomic helper uses the same function table structures as all other 60 * modesetting helpers. See the documentation for &struct drm_crtc_helper_funcs, 61 * struct &drm_encoder_helper_funcs and &struct drm_connector_helper_funcs. It 62 * also shares the &struct drm_plane_helper_funcs function table with the plane 63 * helpers. 64 */ 65 static void 66 drm_atomic_helper_plane_changed(struct drm_atomic_state *state, 67 struct drm_plane_state *old_plane_state, 68 struct drm_plane_state *plane_state, 69 struct drm_plane *plane) 70 { 71 struct drm_crtc_state *crtc_state; 72 73 if (old_plane_state->crtc) { 74 crtc_state = drm_atomic_get_new_crtc_state(state, 75 old_plane_state->crtc); 76 77 if (WARN_ON(!crtc_state)) 78 return; 79 80 crtc_state->planes_changed = true; 81 } 82 83 if (plane_state->crtc) { 84 crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); 85 86 if (WARN_ON(!crtc_state)) 87 return; 88 89 crtc_state->planes_changed = true; 90 } 91 } 92 93 static int handle_conflicting_encoders(struct drm_atomic_state *state, 94 bool disable_conflicting_encoders) 95 { 96 struct drm_connector_state *new_conn_state; 97 struct drm_connector *connector; 98 struct drm_connector_list_iter conn_iter; 99 struct drm_encoder *encoder; 100 unsigned encoder_mask = 0; 101 int i, ret = 0; 102 103 /* 104 * First loop, find all newly assigned encoders from the connectors 105 * part of the state. If the same encoder is assigned to multiple 106 * connectors bail out. 107 */ 108 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 109 const struct drm_connector_helper_funcs *funcs = connector->helper_private; 110 struct drm_encoder *new_encoder; 111 112 if (!new_conn_state->crtc) 113 continue; 114 115 if (funcs->atomic_best_encoder) 116 new_encoder = funcs->atomic_best_encoder(connector, new_conn_state); 117 else if (funcs->best_encoder) 118 new_encoder = funcs->best_encoder(connector); 119 else 120 new_encoder = drm_atomic_helper_best_encoder(connector); 121 122 if (new_encoder) { 123 if (encoder_mask & (1 << drm_encoder_index(new_encoder))) { 124 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n", 125 new_encoder->base.id, new_encoder->name, 126 connector->base.id, connector->name); 127 128 return -EINVAL; 129 } 130 131 encoder_mask |= 1 << drm_encoder_index(new_encoder); 132 } 133 } 134 135 if (!encoder_mask) 136 return 0; 137 138 /* 139 * Second loop, iterate over all connectors not part of the state. 140 * 141 * If a conflicting encoder is found and disable_conflicting_encoders 142 * is not set, an error is returned. Userspace can provide a solution 143 * through the atomic ioctl. 144 * 145 * If the flag is set conflicting connectors are removed from the crtc 146 * and the crtc is disabled if no encoder is left. This preserves 147 * compatibility with the legacy set_config behavior. 148 */ 149 drm_connector_list_iter_begin(state->dev, &conn_iter); 150 drm_for_each_connector_iter(connector, &conn_iter) { 151 struct drm_crtc_state *crtc_state; 152 153 if (drm_atomic_get_new_connector_state(state, connector)) 154 continue; 155 156 encoder = connector->state->best_encoder; 157 if (!encoder || !(encoder_mask & (1 << drm_encoder_index(encoder)))) 158 continue; 159 160 if (!disable_conflicting_encoders) { 161 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n", 162 encoder->base.id, encoder->name, 163 connector->state->crtc->base.id, 164 connector->state->crtc->name, 165 connector->base.id, connector->name); 166 ret = -EINVAL; 167 goto out; 168 } 169 170 new_conn_state = drm_atomic_get_connector_state(state, connector); 171 if (IS_ERR(new_conn_state)) { 172 ret = PTR_ERR(new_conn_state); 173 goto out; 174 } 175 176 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n", 177 encoder->base.id, encoder->name, 178 new_conn_state->crtc->base.id, new_conn_state->crtc->name, 179 connector->base.id, connector->name); 180 181 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 182 183 ret = drm_atomic_set_crtc_for_connector(new_conn_state, NULL); 184 if (ret) 185 goto out; 186 187 if (!crtc_state->connector_mask) { 188 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, 189 NULL); 190 if (ret < 0) 191 goto out; 192 193 crtc_state->active = false; 194 } 195 } 196 out: 197 drm_connector_list_iter_end(&conn_iter); 198 199 return ret; 200 } 201 202 static void 203 set_best_encoder(struct drm_atomic_state *state, 204 struct drm_connector_state *conn_state, 205 struct drm_encoder *encoder) 206 { 207 struct drm_crtc_state *crtc_state; 208 struct drm_crtc *crtc; 209 210 if (conn_state->best_encoder) { 211 /* Unset the encoder_mask in the old crtc state. */ 212 crtc = conn_state->connector->state->crtc; 213 214 /* A NULL crtc is an error here because we should have 215 * duplicated a NULL best_encoder when crtc was NULL. 216 * As an exception restoring duplicated atomic state 217 * during resume is allowed, so don't warn when 218 * best_encoder is equal to encoder we intend to set. 219 */ 220 WARN_ON(!crtc && encoder != conn_state->best_encoder); 221 if (crtc) { 222 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 223 224 crtc_state->encoder_mask &= 225 ~(1 << drm_encoder_index(conn_state->best_encoder)); 226 } 227 } 228 229 if (encoder) { 230 crtc = conn_state->crtc; 231 WARN_ON(!crtc); 232 if (crtc) { 233 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 234 235 crtc_state->encoder_mask |= 236 1 << drm_encoder_index(encoder); 237 } 238 } 239 240 conn_state->best_encoder = encoder; 241 } 242 243 static void 244 steal_encoder(struct drm_atomic_state *state, 245 struct drm_encoder *encoder) 246 { 247 struct drm_crtc_state *crtc_state; 248 struct drm_connector *connector; 249 struct drm_connector_state *old_connector_state, *new_connector_state; 250 int i; 251 252 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 253 struct drm_crtc *encoder_crtc; 254 255 if (new_connector_state->best_encoder != encoder) 256 continue; 257 258 encoder_crtc = old_connector_state->crtc; 259 260 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n", 261 encoder->base.id, encoder->name, 262 encoder_crtc->base.id, encoder_crtc->name); 263 264 set_best_encoder(state, new_connector_state, NULL); 265 266 crtc_state = drm_atomic_get_new_crtc_state(state, encoder_crtc); 267 crtc_state->connectors_changed = true; 268 269 return; 270 } 271 } 272 273 static int 274 update_connector_routing(struct drm_atomic_state *state, 275 struct drm_connector *connector, 276 struct drm_connector_state *old_connector_state, 277 struct drm_connector_state *new_connector_state) 278 { 279 const struct drm_connector_helper_funcs *funcs; 280 struct drm_encoder *new_encoder; 281 struct drm_crtc_state *crtc_state; 282 283 DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n", 284 connector->base.id, 285 connector->name); 286 287 if (old_connector_state->crtc != new_connector_state->crtc) { 288 if (old_connector_state->crtc) { 289 crtc_state = drm_atomic_get_new_crtc_state(state, old_connector_state->crtc); 290 crtc_state->connectors_changed = true; 291 } 292 293 if (new_connector_state->crtc) { 294 crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc); 295 crtc_state->connectors_changed = true; 296 } 297 } 298 299 if (!new_connector_state->crtc) { 300 DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n", 301 connector->base.id, 302 connector->name); 303 304 set_best_encoder(state, new_connector_state, NULL); 305 306 return 0; 307 } 308 309 funcs = connector->helper_private; 310 311 if (funcs->atomic_best_encoder) 312 new_encoder = funcs->atomic_best_encoder(connector, 313 new_connector_state); 314 else if (funcs->best_encoder) 315 new_encoder = funcs->best_encoder(connector); 316 else 317 new_encoder = drm_atomic_helper_best_encoder(connector); 318 319 if (!new_encoder) { 320 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", 321 connector->base.id, 322 connector->name); 323 return -EINVAL; 324 } 325 326 if (!drm_encoder_crtc_ok(new_encoder, new_connector_state->crtc)) { 327 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d:%s]\n", 328 new_encoder->base.id, 329 new_encoder->name, 330 new_connector_state->crtc->base.id, 331 new_connector_state->crtc->name); 332 return -EINVAL; 333 } 334 335 if (new_encoder == new_connector_state->best_encoder) { 336 set_best_encoder(state, new_connector_state, new_encoder); 337 338 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n", 339 connector->base.id, 340 connector->name, 341 new_encoder->base.id, 342 new_encoder->name, 343 new_connector_state->crtc->base.id, 344 new_connector_state->crtc->name); 345 346 return 0; 347 } 348 349 steal_encoder(state, new_encoder); 350 351 set_best_encoder(state, new_connector_state, new_encoder); 352 353 crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc); 354 crtc_state->connectors_changed = true; 355 356 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n", 357 connector->base.id, 358 connector->name, 359 new_encoder->base.id, 360 new_encoder->name, 361 new_connector_state->crtc->base.id, 362 new_connector_state->crtc->name); 363 364 return 0; 365 } 366 367 static int 368 mode_fixup(struct drm_atomic_state *state) 369 { 370 struct drm_crtc *crtc; 371 struct drm_crtc_state *new_crtc_state; 372 struct drm_connector *connector; 373 struct drm_connector_state *new_conn_state; 374 int i; 375 int ret; 376 377 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 378 if (!new_crtc_state->mode_changed && 379 !new_crtc_state->connectors_changed) 380 continue; 381 382 drm_mode_copy(&new_crtc_state->adjusted_mode, &new_crtc_state->mode); 383 } 384 385 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 386 const struct drm_encoder_helper_funcs *funcs; 387 struct drm_encoder *encoder; 388 389 WARN_ON(!!new_conn_state->best_encoder != !!new_conn_state->crtc); 390 391 if (!new_conn_state->crtc || !new_conn_state->best_encoder) 392 continue; 393 394 new_crtc_state = 395 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 396 397 /* 398 * Each encoder has at most one connector (since we always steal 399 * it away), so we won't call ->mode_fixup twice. 400 */ 401 encoder = new_conn_state->best_encoder; 402 funcs = encoder->helper_private; 403 404 ret = drm_bridge_mode_fixup(encoder->bridge, &new_crtc_state->mode, 405 &new_crtc_state->adjusted_mode); 406 if (!ret) { 407 DRM_DEBUG_ATOMIC("Bridge fixup failed\n"); 408 return -EINVAL; 409 } 410 411 if (funcs && funcs->atomic_check) { 412 ret = funcs->atomic_check(encoder, new_crtc_state, 413 new_conn_state); 414 if (ret) { 415 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n", 416 encoder->base.id, encoder->name); 417 return ret; 418 } 419 } else if (funcs && funcs->mode_fixup) { 420 ret = funcs->mode_fixup(encoder, &new_crtc_state->mode, 421 &new_crtc_state->adjusted_mode); 422 if (!ret) { 423 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n", 424 encoder->base.id, encoder->name); 425 return -EINVAL; 426 } 427 } 428 } 429 430 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 431 const struct drm_crtc_helper_funcs *funcs; 432 433 if (!new_crtc_state->enable) 434 continue; 435 436 if (!new_crtc_state->mode_changed && 437 !new_crtc_state->connectors_changed) 438 continue; 439 440 funcs = crtc->helper_private; 441 if (!funcs->mode_fixup) 442 continue; 443 444 ret = funcs->mode_fixup(crtc, &new_crtc_state->mode, 445 &new_crtc_state->adjusted_mode); 446 if (!ret) { 447 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] fixup failed\n", 448 crtc->base.id, crtc->name); 449 return -EINVAL; 450 } 451 } 452 453 return 0; 454 } 455 456 static enum drm_mode_status mode_valid_path(struct drm_connector *connector, 457 struct drm_encoder *encoder, 458 struct drm_crtc *crtc, 459 struct drm_display_mode *mode) 460 { 461 enum drm_mode_status ret; 462 463 ret = drm_encoder_mode_valid(encoder, mode); 464 if (ret != MODE_OK) { 465 DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] mode_valid() failed\n", 466 encoder->base.id, encoder->name); 467 return ret; 468 } 469 470 ret = drm_bridge_mode_valid(encoder->bridge, mode); 471 if (ret != MODE_OK) { 472 DRM_DEBUG_ATOMIC("[BRIDGE] mode_valid() failed\n"); 473 return ret; 474 } 475 476 ret = drm_crtc_mode_valid(crtc, mode); 477 if (ret != MODE_OK) { 478 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode_valid() failed\n", 479 crtc->base.id, crtc->name); 480 return ret; 481 } 482 483 return ret; 484 } 485 486 static int 487 mode_valid(struct drm_atomic_state *state) 488 { 489 struct drm_connector_state *conn_state; 490 struct drm_connector *connector; 491 int i; 492 493 for_each_new_connector_in_state(state, connector, conn_state, i) { 494 struct drm_encoder *encoder = conn_state->best_encoder; 495 struct drm_crtc *crtc = conn_state->crtc; 496 struct drm_crtc_state *crtc_state; 497 enum drm_mode_status mode_status; 498 struct drm_display_mode *mode; 499 500 if (!crtc || !encoder) 501 continue; 502 503 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 504 if (!crtc_state) 505 continue; 506 if (!crtc_state->mode_changed && !crtc_state->connectors_changed) 507 continue; 508 509 mode = &crtc_state->mode; 510 511 mode_status = mode_valid_path(connector, encoder, crtc, mode); 512 if (mode_status != MODE_OK) 513 return -EINVAL; 514 } 515 516 return 0; 517 } 518 519 /** 520 * drm_atomic_helper_check_modeset - validate state object for modeset changes 521 * @dev: DRM device 522 * @state: the driver state object 523 * 524 * Check the state object to see if the requested state is physically possible. 525 * This does all the crtc and connector related computations for an atomic 526 * update and adds any additional connectors needed for full modesets. It calls 527 * the various per-object callbacks in the follow order: 528 * 529 * 1. &drm_connector_helper_funcs.atomic_best_encoder for determining the new encoder. 530 * 2. &drm_connector_helper_funcs.atomic_check to validate the connector state. 531 * 3. If it's determined a modeset is needed then all connectors on the affected crtc 532 * crtc are added and &drm_connector_helper_funcs.atomic_check is run on them. 533 * 4. &drm_encoder_helper_funcs.mode_valid, &drm_bridge_funcs.mode_valid and 534 * &drm_crtc_helper_funcs.mode_valid are called on the affected components. 535 * 5. &drm_bridge_funcs.mode_fixup is called on all encoder bridges. 536 * 6. &drm_encoder_helper_funcs.atomic_check is called to validate any encoder state. 537 * This function is only called when the encoder will be part of a configured crtc, 538 * it must not be used for implementing connector property validation. 539 * If this function is NULL, &drm_atomic_encoder_helper_funcs.mode_fixup is called 540 * instead. 541 * 7. &drm_crtc_helper_funcs.mode_fixup is called last, to fix up the mode with crtc constraints. 542 * 543 * &drm_crtc_state.mode_changed is set when the input mode is changed. 544 * &drm_crtc_state.connectors_changed is set when a connector is added or 545 * removed from the crtc. &drm_crtc_state.active_changed is set when 546 * &drm_crtc_state.active changes, which is used for DPMS. 547 * See also: drm_atomic_crtc_needs_modeset() 548 * 549 * IMPORTANT: 550 * 551 * Drivers which set &drm_crtc_state.mode_changed (e.g. in their 552 * &drm_plane_helper_funcs.atomic_check hooks if a plane update can't be done 553 * without a full modeset) _must_ call this function afterwards after that 554 * change. It is permitted to call this function multiple times for the same 555 * update, e.g. when the &drm_crtc_helper_funcs.atomic_check functions depend 556 * upon the adjusted dotclock for fifo space allocation and watermark 557 * computation. 558 * 559 * RETURNS: 560 * Zero for success or -errno 561 */ 562 int 563 drm_atomic_helper_check_modeset(struct drm_device *dev, 564 struct drm_atomic_state *state) 565 { 566 struct drm_crtc *crtc; 567 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 568 struct drm_connector *connector; 569 struct drm_connector_state *old_connector_state, *new_connector_state; 570 int i, ret; 571 unsigned connectors_mask = 0; 572 573 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 574 bool has_connectors = 575 !!new_crtc_state->connector_mask; 576 577 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 578 579 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) { 580 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", 581 crtc->base.id, crtc->name); 582 new_crtc_state->mode_changed = true; 583 } 584 585 if (old_crtc_state->enable != new_crtc_state->enable) { 586 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enable changed\n", 587 crtc->base.id, crtc->name); 588 589 /* 590 * For clarity this assignment is done here, but 591 * enable == 0 is only true when there are no 592 * connectors and a NULL mode. 593 * 594 * The other way around is true as well. enable != 0 595 * iff connectors are attached and a mode is set. 596 */ 597 new_crtc_state->mode_changed = true; 598 new_crtc_state->connectors_changed = true; 599 } 600 601 if (old_crtc_state->active != new_crtc_state->active) { 602 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active changed\n", 603 crtc->base.id, crtc->name); 604 new_crtc_state->active_changed = true; 605 } 606 607 if (new_crtc_state->enable != has_connectors) { 608 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled/connectors mismatch\n", 609 crtc->base.id, crtc->name); 610 611 return -EINVAL; 612 } 613 } 614 615 ret = handle_conflicting_encoders(state, false); 616 if (ret) 617 return ret; 618 619 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 620 const struct drm_connector_helper_funcs *funcs = connector->helper_private; 621 622 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 623 624 /* 625 * This only sets crtc->connectors_changed for routing changes, 626 * drivers must set crtc->connectors_changed themselves when 627 * connector properties need to be updated. 628 */ 629 ret = update_connector_routing(state, connector, 630 old_connector_state, 631 new_connector_state); 632 if (ret) 633 return ret; 634 if (old_connector_state->crtc) { 635 new_crtc_state = drm_atomic_get_new_crtc_state(state, 636 old_connector_state->crtc); 637 if (old_connector_state->link_status != 638 new_connector_state->link_status) 639 new_crtc_state->connectors_changed = true; 640 } 641 642 if (funcs->atomic_check) 643 ret = funcs->atomic_check(connector, new_connector_state); 644 if (ret) 645 return ret; 646 647 connectors_mask += BIT(i); 648 } 649 650 /* 651 * After all the routing has been prepared we need to add in any 652 * connector which is itself unchanged, but who's crtc changes it's 653 * configuration. This must be done before calling mode_fixup in case a 654 * crtc only changed its mode but has the same set of connectors. 655 */ 656 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 657 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 658 continue; 659 660 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs all connectors, enable: %c, active: %c\n", 661 crtc->base.id, crtc->name, 662 new_crtc_state->enable ? 'y' : 'n', 663 new_crtc_state->active ? 'y' : 'n'); 664 665 ret = drm_atomic_add_affected_connectors(state, crtc); 666 if (ret != 0) 667 return ret; 668 669 ret = drm_atomic_add_affected_planes(state, crtc); 670 if (ret != 0) 671 return ret; 672 } 673 674 /* 675 * Iterate over all connectors again, to make sure atomic_check() 676 * has been called on them when a modeset is forced. 677 */ 678 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 679 const struct drm_connector_helper_funcs *funcs = connector->helper_private; 680 681 if (connectors_mask & BIT(i)) 682 continue; 683 684 if (funcs->atomic_check) 685 ret = funcs->atomic_check(connector, new_connector_state); 686 if (ret) 687 return ret; 688 } 689 690 ret = mode_valid(state); 691 if (ret) 692 return ret; 693 694 return mode_fixup(state); 695 } 696 EXPORT_SYMBOL(drm_atomic_helper_check_modeset); 697 698 /** 699 * drm_atomic_helper_check_planes - validate state object for planes changes 700 * @dev: DRM device 701 * @state: the driver state object 702 * 703 * Check the state object to see if the requested state is physically possible. 704 * This does all the plane update related checks using by calling into the 705 * &drm_crtc_helper_funcs.atomic_check and &drm_plane_helper_funcs.atomic_check 706 * hooks provided by the driver. 707 * 708 * It also sets &drm_crtc_state.planes_changed to indicate that a crtc has 709 * updated planes. 710 * 711 * RETURNS: 712 * Zero for success or -errno 713 */ 714 int 715 drm_atomic_helper_check_planes(struct drm_device *dev, 716 struct drm_atomic_state *state) 717 { 718 struct drm_crtc *crtc; 719 struct drm_crtc_state *new_crtc_state; 720 struct drm_plane *plane; 721 struct drm_plane_state *new_plane_state, *old_plane_state; 722 int i, ret = 0; 723 724 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 725 const struct drm_plane_helper_funcs *funcs; 726 727 WARN_ON(!drm_modeset_is_locked(&plane->mutex)); 728 729 funcs = plane->helper_private; 730 731 drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane); 732 733 if (!funcs || !funcs->atomic_check) 734 continue; 735 736 ret = funcs->atomic_check(plane, new_plane_state); 737 if (ret) { 738 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", 739 plane->base.id, plane->name); 740 return ret; 741 } 742 } 743 744 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 745 const struct drm_crtc_helper_funcs *funcs; 746 747 funcs = crtc->helper_private; 748 749 if (!funcs || !funcs->atomic_check) 750 continue; 751 752 ret = funcs->atomic_check(crtc, new_crtc_state); 753 if (ret) { 754 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", 755 crtc->base.id, crtc->name); 756 return ret; 757 } 758 } 759 760 return ret; 761 } 762 EXPORT_SYMBOL(drm_atomic_helper_check_planes); 763 764 /** 765 * drm_atomic_helper_check - validate state object 766 * @dev: DRM device 767 * @state: the driver state object 768 * 769 * Check the state object to see if the requested state is physically possible. 770 * Only crtcs and planes have check callbacks, so for any additional (global) 771 * checking that a driver needs it can simply wrap that around this function. 772 * Drivers without such needs can directly use this as their 773 * &drm_mode_config_funcs.atomic_check callback. 774 * 775 * This just wraps the two parts of the state checking for planes and modeset 776 * state in the default order: First it calls drm_atomic_helper_check_modeset() 777 * and then drm_atomic_helper_check_planes(). The assumption is that the 778 * @drm_plane_helper_funcs.atomic_check and @drm_crtc_helper_funcs.atomic_check 779 * functions depend upon an updated adjusted_mode.clock to e.g. properly compute 780 * watermarks. 781 * 782 * RETURNS: 783 * Zero for success or -errno 784 */ 785 int drm_atomic_helper_check(struct drm_device *dev, 786 struct drm_atomic_state *state) 787 { 788 int ret; 789 790 ret = drm_atomic_helper_check_modeset(dev, state); 791 if (ret) 792 return ret; 793 794 ret = drm_atomic_helper_check_planes(dev, state); 795 if (ret) 796 return ret; 797 798 if (state->legacy_cursor_update) 799 state->async_update = !drm_atomic_helper_async_check(dev, state); 800 801 return ret; 802 } 803 EXPORT_SYMBOL(drm_atomic_helper_check); 804 805 static void 806 disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) 807 { 808 struct drm_connector *connector; 809 struct drm_connector_state *old_conn_state, *new_conn_state; 810 struct drm_crtc *crtc; 811 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 812 int i; 813 814 for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) { 815 const struct drm_encoder_helper_funcs *funcs; 816 struct drm_encoder *encoder; 817 818 /* Shut down everything that's in the changeset and currently 819 * still on. So need to check the old, saved state. */ 820 if (!old_conn_state->crtc) 821 continue; 822 823 old_crtc_state = drm_atomic_get_old_crtc_state(old_state, old_conn_state->crtc); 824 825 if (!old_crtc_state->active || 826 !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state)) 827 continue; 828 829 encoder = old_conn_state->best_encoder; 830 831 /* We shouldn't get this far if we didn't previously have 832 * an encoder.. but WARN_ON() rather than explode. 833 */ 834 if (WARN_ON(!encoder)) 835 continue; 836 837 funcs = encoder->helper_private; 838 839 DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n", 840 encoder->base.id, encoder->name); 841 842 /* 843 * Each encoder has at most one connector (since we always steal 844 * it away), so we won't call disable hooks twice. 845 */ 846 drm_bridge_disable(encoder->bridge); 847 848 /* Right function depends upon target state. */ 849 if (funcs) { 850 if (new_conn_state->crtc && funcs->prepare) 851 funcs->prepare(encoder); 852 else if (funcs->disable) 853 funcs->disable(encoder); 854 else if (funcs->dpms) 855 funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 856 } 857 858 drm_bridge_post_disable(encoder->bridge); 859 } 860 861 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { 862 const struct drm_crtc_helper_funcs *funcs; 863 864 /* Shut down everything that needs a full modeset. */ 865 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 866 continue; 867 868 if (!old_crtc_state->active) 869 continue; 870 871 funcs = crtc->helper_private; 872 873 DRM_DEBUG_ATOMIC("disabling [CRTC:%d:%s]\n", 874 crtc->base.id, crtc->name); 875 876 877 /* Right function depends upon target state. */ 878 if (new_crtc_state->enable && funcs->prepare) 879 funcs->prepare(crtc); 880 else if (funcs->atomic_disable) 881 funcs->atomic_disable(crtc, old_crtc_state); 882 else if (funcs->disable) 883 funcs->disable(crtc); 884 else 885 funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 886 } 887 } 888 889 /** 890 * drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state 891 * @dev: DRM device 892 * @old_state: atomic state object with old state structures 893 * 894 * This function updates all the various legacy modeset state pointers in 895 * connectors, encoders and crtcs. It also updates the timestamping constants 896 * used for precise vblank timestamps by calling 897 * drm_calc_timestamping_constants(). 898 * 899 * Drivers can use this for building their own atomic commit if they don't have 900 * a pure helper-based modeset implementation. 901 */ 902 void 903 drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, 904 struct drm_atomic_state *old_state) 905 { 906 struct drm_connector *connector; 907 struct drm_connector_state *old_conn_state, *new_conn_state; 908 struct drm_crtc *crtc; 909 struct drm_crtc_state *new_crtc_state; 910 int i; 911 912 /* clear out existing links and update dpms */ 913 for_each_oldnew_connector_in_state(old_state, connector, old_conn_state, new_conn_state, i) { 914 if (connector->encoder) { 915 WARN_ON(!connector->encoder->crtc); 916 917 connector->encoder->crtc = NULL; 918 connector->encoder = NULL; 919 } 920 921 crtc = new_conn_state->crtc; 922 if ((!crtc && old_conn_state->crtc) || 923 (crtc && drm_atomic_crtc_needs_modeset(crtc->state))) { 924 int mode = DRM_MODE_DPMS_OFF; 925 926 if (crtc && crtc->state->active) 927 mode = DRM_MODE_DPMS_ON; 928 929 connector->dpms = mode; 930 } 931 } 932 933 /* set new links */ 934 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) { 935 if (!new_conn_state->crtc) 936 continue; 937 938 if (WARN_ON(!new_conn_state->best_encoder)) 939 continue; 940 941 connector->encoder = new_conn_state->best_encoder; 942 connector->encoder->crtc = new_conn_state->crtc; 943 } 944 945 /* set legacy state in the crtc structure */ 946 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { 947 struct drm_plane *primary = crtc->primary; 948 struct drm_plane_state *new_plane_state; 949 950 crtc->mode = new_crtc_state->mode; 951 crtc->enabled = new_crtc_state->enable; 952 953 new_plane_state = 954 drm_atomic_get_new_plane_state(old_state, primary); 955 956 if (new_plane_state && new_plane_state->crtc == crtc) { 957 crtc->x = new_plane_state->src_x >> 16; 958 crtc->y = new_plane_state->src_y >> 16; 959 } 960 961 if (new_crtc_state->enable) 962 drm_calc_timestamping_constants(crtc, 963 &new_crtc_state->adjusted_mode); 964 } 965 } 966 EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state); 967 968 static void 969 crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) 970 { 971 struct drm_crtc *crtc; 972 struct drm_crtc_state *new_crtc_state; 973 struct drm_connector *connector; 974 struct drm_connector_state *new_conn_state; 975 int i; 976 977 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { 978 const struct drm_crtc_helper_funcs *funcs; 979 980 if (!new_crtc_state->mode_changed) 981 continue; 982 983 funcs = crtc->helper_private; 984 985 if (new_crtc_state->enable && funcs->mode_set_nofb) { 986 DRM_DEBUG_ATOMIC("modeset on [CRTC:%d:%s]\n", 987 crtc->base.id, crtc->name); 988 989 funcs->mode_set_nofb(crtc); 990 } 991 } 992 993 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) { 994 const struct drm_encoder_helper_funcs *funcs; 995 struct drm_encoder *encoder; 996 struct drm_display_mode *mode, *adjusted_mode; 997 998 if (!new_conn_state->best_encoder) 999 continue; 1000 1001 encoder = new_conn_state->best_encoder; 1002 funcs = encoder->helper_private; 1003 new_crtc_state = new_conn_state->crtc->state; 1004 mode = &new_crtc_state->mode; 1005 adjusted_mode = &new_crtc_state->adjusted_mode; 1006 1007 if (!new_crtc_state->mode_changed) 1008 continue; 1009 1010 DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n", 1011 encoder->base.id, encoder->name); 1012 1013 /* 1014 * Each encoder has at most one connector (since we always steal 1015 * it away), so we won't call mode_set hooks twice. 1016 */ 1017 if (funcs && funcs->atomic_mode_set) { 1018 funcs->atomic_mode_set(encoder, new_crtc_state, 1019 new_conn_state); 1020 } else if (funcs && funcs->mode_set) { 1021 funcs->mode_set(encoder, mode, adjusted_mode); 1022 } 1023 1024 drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode); 1025 } 1026 } 1027 1028 /** 1029 * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs 1030 * @dev: DRM device 1031 * @old_state: atomic state object with old state structures 1032 * 1033 * This function shuts down all the outputs that need to be shut down and 1034 * prepares them (if required) with the new mode. 1035 * 1036 * For compatibility with legacy crtc helpers this should be called before 1037 * drm_atomic_helper_commit_planes(), which is what the default commit function 1038 * does. But drivers with different needs can group the modeset commits together 1039 * and do the plane commits at the end. This is useful for drivers doing runtime 1040 * PM since planes updates then only happen when the CRTC is actually enabled. 1041 */ 1042 void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, 1043 struct drm_atomic_state *old_state) 1044 { 1045 disable_outputs(dev, old_state); 1046 1047 drm_atomic_helper_update_legacy_modeset_state(dev, old_state); 1048 1049 crtc_set_mode(dev, old_state); 1050 } 1051 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables); 1052 1053 /** 1054 * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs 1055 * @dev: DRM device 1056 * @old_state: atomic state object with old state structures 1057 * 1058 * This function enables all the outputs with the new configuration which had to 1059 * be turned off for the update. 1060 * 1061 * For compatibility with legacy crtc helpers this should be called after 1062 * drm_atomic_helper_commit_planes(), which is what the default commit function 1063 * does. But drivers with different needs can group the modeset commits together 1064 * and do the plane commits at the end. This is useful for drivers doing runtime 1065 * PM since planes updates then only happen when the CRTC is actually enabled. 1066 */ 1067 void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, 1068 struct drm_atomic_state *old_state) 1069 { 1070 struct drm_crtc *crtc; 1071 struct drm_crtc_state *old_crtc_state; 1072 struct drm_crtc_state *new_crtc_state; 1073 struct drm_connector *connector; 1074 struct drm_connector_state *new_conn_state; 1075 int i; 1076 1077 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { 1078 const struct drm_crtc_helper_funcs *funcs; 1079 1080 /* Need to filter out CRTCs where only planes change. */ 1081 if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 1082 continue; 1083 1084 if (!new_crtc_state->active) 1085 continue; 1086 1087 funcs = crtc->helper_private; 1088 1089 if (new_crtc_state->enable) { 1090 DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n", 1091 crtc->base.id, crtc->name); 1092 1093 if (funcs->atomic_enable) 1094 funcs->atomic_enable(crtc, old_crtc_state); 1095 else 1096 funcs->commit(crtc); 1097 } 1098 } 1099 1100 for_each_new_connector_in_state(old_state, connector, new_conn_state, i) { 1101 const struct drm_encoder_helper_funcs *funcs; 1102 struct drm_encoder *encoder; 1103 1104 if (!new_conn_state->best_encoder) 1105 continue; 1106 1107 if (!new_conn_state->crtc->state->active || 1108 !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state)) 1109 continue; 1110 1111 encoder = new_conn_state->best_encoder; 1112 funcs = encoder->helper_private; 1113 1114 DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n", 1115 encoder->base.id, encoder->name); 1116 1117 /* 1118 * Each encoder has at most one connector (since we always steal 1119 * it away), so we won't call enable hooks twice. 1120 */ 1121 drm_bridge_pre_enable(encoder->bridge); 1122 1123 if (funcs) { 1124 if (funcs->enable) 1125 funcs->enable(encoder); 1126 else if (funcs->commit) 1127 funcs->commit(encoder); 1128 } 1129 1130 drm_bridge_enable(encoder->bridge); 1131 } 1132 } 1133 EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); 1134 1135 /** 1136 * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state 1137 * @dev: DRM device 1138 * @state: atomic state object with old state structures 1139 * @pre_swap: If true, do an interruptible wait, and @state is the new state. 1140 * Otherwise @state is the old state. 1141 * 1142 * For implicit sync, driver should fish the exclusive fence out from the 1143 * incoming fb's and stash it in the drm_plane_state. This is called after 1144 * drm_atomic_helper_swap_state() so it uses the current plane state (and 1145 * just uses the atomic state to find the changed planes) 1146 * 1147 * Note that @pre_swap is needed since the point where we block for fences moves 1148 * around depending upon whether an atomic commit is blocking or 1149 * non-blocking. For non-blocking commit all waiting needs to happen after 1150 * drm_atomic_helper_swap_state() is called, but for blocking commits we want 1151 * to wait **before** we do anything that can't be easily rolled back. That is 1152 * before we call drm_atomic_helper_swap_state(). 1153 * 1154 * Returns zero if success or < 0 if dma_fence_wait() fails. 1155 */ 1156 int drm_atomic_helper_wait_for_fences(struct drm_device *dev, 1157 struct drm_atomic_state *state, 1158 bool pre_swap) 1159 { 1160 struct drm_plane *plane; 1161 struct drm_plane_state *new_plane_state; 1162 int i, ret; 1163 1164 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 1165 if (!new_plane_state->fence) 1166 continue; 1167 1168 WARN_ON(!new_plane_state->fb); 1169 1170 /* 1171 * If waiting for fences pre-swap (ie: nonblock), userspace can 1172 * still interrupt the operation. Instead of blocking until the 1173 * timer expires, make the wait interruptible. 1174 */ 1175 ret = dma_fence_wait(new_plane_state->fence, pre_swap); 1176 if (ret) 1177 return ret; 1178 1179 dma_fence_put(new_plane_state->fence); 1180 new_plane_state->fence = NULL; 1181 } 1182 1183 return 0; 1184 } 1185 EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences); 1186 1187 /** 1188 * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs 1189 * @dev: DRM device 1190 * @old_state: atomic state object with old state structures 1191 * 1192 * Helper to, after atomic commit, wait for vblanks on all effected 1193 * crtcs (ie. before cleaning up old framebuffers using 1194 * drm_atomic_helper_cleanup_planes()). It will only wait on CRTCs where the 1195 * framebuffers have actually changed to optimize for the legacy cursor and 1196 * plane update use-case. 1197 * 1198 * Drivers using the nonblocking commit tracking support initialized by calling 1199 * drm_atomic_helper_setup_commit() should look at 1200 * drm_atomic_helper_wait_for_flip_done() as an alternative. 1201 */ 1202 void 1203 drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, 1204 struct drm_atomic_state *old_state) 1205 { 1206 struct drm_crtc *crtc; 1207 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1208 int i, ret; 1209 unsigned crtc_mask = 0; 1210 1211 /* 1212 * Legacy cursor ioctls are completely unsynced, and userspace 1213 * relies on that (by doing tons of cursor updates). 1214 */ 1215 if (old_state->legacy_cursor_update) 1216 return; 1217 1218 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { 1219 if (!new_crtc_state->active || !new_crtc_state->planes_changed) 1220 continue; 1221 1222 ret = drm_crtc_vblank_get(crtc); 1223 if (ret != 0) 1224 continue; 1225 1226 crtc_mask |= drm_crtc_mask(crtc); 1227 old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc); 1228 } 1229 1230 for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { 1231 if (!(crtc_mask & drm_crtc_mask(crtc))) 1232 continue; 1233 1234 ret = wait_event_timeout(dev->vblank[i].queue, 1235 old_state->crtcs[i].last_vblank_count != 1236 drm_crtc_vblank_count(crtc), 1237 msecs_to_jiffies(50)); 1238 1239 WARN(!ret, "[CRTC:%d:%s] vblank wait timed out\n", 1240 crtc->base.id, crtc->name); 1241 1242 drm_crtc_vblank_put(crtc); 1243 } 1244 } 1245 EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); 1246 1247 /** 1248 * drm_atomic_helper_wait_for_flip_done - wait for all page flips to be done 1249 * @dev: DRM device 1250 * @old_state: atomic state object with old state structures 1251 * 1252 * Helper to, after atomic commit, wait for page flips on all effected 1253 * crtcs (ie. before cleaning up old framebuffers using 1254 * drm_atomic_helper_cleanup_planes()). Compared to 1255 * drm_atomic_helper_wait_for_vblanks() this waits for the completion of on all 1256 * CRTCs, assuming that cursors-only updates are signalling their completion 1257 * immediately (or using a different path). 1258 * 1259 * This requires that drivers use the nonblocking commit tracking support 1260 * initialized using drm_atomic_helper_setup_commit(). 1261 */ 1262 void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev, 1263 struct drm_atomic_state *old_state) 1264 { 1265 struct drm_crtc_state *unused; 1266 struct drm_crtc *crtc; 1267 int i; 1268 1269 for_each_new_crtc_in_state(old_state, crtc, unused, i) { 1270 struct drm_crtc_commit *commit = old_state->crtcs[i].commit; 1271 int ret; 1272 1273 if (!commit) 1274 continue; 1275 1276 ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ); 1277 if (ret == 0) 1278 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", 1279 crtc->base.id, crtc->name); 1280 } 1281 } 1282 EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done); 1283 1284 /** 1285 * drm_atomic_helper_commit_tail - commit atomic update to hardware 1286 * @old_state: atomic state object with old state structures 1287 * 1288 * This is the default implementation for the 1289 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers 1290 * that do not support runtime_pm or do not need the CRTC to be 1291 * enabled to perform a commit. Otherwise, see 1292 * drm_atomic_helper_commit_tail_rpm(). 1293 * 1294 * Note that the default ordering of how the various stages are called is to 1295 * match the legacy modeset helper library closest. 1296 */ 1297 void drm_atomic_helper_commit_tail(struct drm_atomic_state *old_state) 1298 { 1299 struct drm_device *dev = old_state->dev; 1300 1301 drm_atomic_helper_commit_modeset_disables(dev, old_state); 1302 1303 drm_atomic_helper_commit_planes(dev, old_state, 0); 1304 1305 drm_atomic_helper_commit_modeset_enables(dev, old_state); 1306 1307 drm_atomic_helper_commit_hw_done(old_state); 1308 1309 drm_atomic_helper_wait_for_vblanks(dev, old_state); 1310 1311 drm_atomic_helper_cleanup_planes(dev, old_state); 1312 } 1313 EXPORT_SYMBOL(drm_atomic_helper_commit_tail); 1314 1315 /** 1316 * drm_atomic_helper_commit_tail_rpm - commit atomic update to hardware 1317 * @old_state: new modeset state to be committed 1318 * 1319 * This is an alternative implementation for the 1320 * &drm_mode_config_helper_funcs.atomic_commit_tail hook, for drivers 1321 * that support runtime_pm or need the CRTC to be enabled to perform a 1322 * commit. Otherwise, one should use the default implementation 1323 * drm_atomic_helper_commit_tail(). 1324 */ 1325 void drm_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state) 1326 { 1327 struct drm_device *dev = old_state->dev; 1328 1329 drm_atomic_helper_commit_modeset_disables(dev, old_state); 1330 1331 drm_atomic_helper_commit_modeset_enables(dev, old_state); 1332 1333 drm_atomic_helper_commit_planes(dev, old_state, 1334 DRM_PLANE_COMMIT_ACTIVE_ONLY); 1335 1336 drm_atomic_helper_commit_hw_done(old_state); 1337 1338 drm_atomic_helper_wait_for_vblanks(dev, old_state); 1339 1340 drm_atomic_helper_cleanup_planes(dev, old_state); 1341 } 1342 EXPORT_SYMBOL(drm_atomic_helper_commit_tail_rpm); 1343 1344 static void commit_tail(struct drm_atomic_state *old_state) 1345 { 1346 struct drm_device *dev = old_state->dev; 1347 const struct drm_mode_config_helper_funcs *funcs; 1348 1349 funcs = dev->mode_config.helper_private; 1350 1351 drm_atomic_helper_wait_for_fences(dev, old_state, false); 1352 1353 drm_atomic_helper_wait_for_dependencies(old_state); 1354 1355 if (funcs && funcs->atomic_commit_tail) 1356 funcs->atomic_commit_tail(old_state); 1357 else 1358 drm_atomic_helper_commit_tail(old_state); 1359 1360 drm_atomic_helper_commit_cleanup_done(old_state); 1361 1362 drm_atomic_state_put(old_state); 1363 } 1364 1365 static void commit_work(struct work_struct *work) 1366 { 1367 struct drm_atomic_state *state = container_of(work, 1368 struct drm_atomic_state, 1369 commit_work); 1370 commit_tail(state); 1371 } 1372 1373 /** 1374 * drm_atomic_helper_async_check - check if state can be commited asynchronously 1375 * @dev: DRM device 1376 * @state: the driver state object 1377 * 1378 * This helper will check if it is possible to commit the state asynchronously. 1379 * Async commits are not supposed to swap the states like normal sync commits 1380 * but just do in-place changes on the current state. 1381 * 1382 * It will return 0 if the commit can happen in an asynchronous fashion or error 1383 * if not. Note that error just mean it can't be commited asynchronously, if it 1384 * fails the commit should be treated like a normal synchronous commit. 1385 */ 1386 int drm_atomic_helper_async_check(struct drm_device *dev, 1387 struct drm_atomic_state *state) 1388 { 1389 struct drm_crtc *crtc; 1390 struct drm_crtc_state *crtc_state; 1391 struct drm_crtc_commit *commit; 1392 struct drm_plane *__plane, *plane = NULL; 1393 struct drm_plane_state *__plane_state, *plane_state = NULL; 1394 const struct drm_plane_helper_funcs *funcs; 1395 int i, j, n_planes = 0; 1396 1397 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1398 if (drm_atomic_crtc_needs_modeset(crtc_state)) 1399 return -EINVAL; 1400 } 1401 1402 for_each_new_plane_in_state(state, __plane, __plane_state, i) { 1403 n_planes++; 1404 plane = __plane; 1405 plane_state = __plane_state; 1406 } 1407 1408 /* FIXME: we support only single plane updates for now */ 1409 if (!plane || n_planes != 1) 1410 return -EINVAL; 1411 1412 if (!plane_state->crtc) 1413 return -EINVAL; 1414 1415 funcs = plane->helper_private; 1416 if (!funcs->atomic_async_update) 1417 return -EINVAL; 1418 1419 if (plane_state->fence) 1420 return -EINVAL; 1421 1422 /* 1423 * Don't do an async update if there is an outstanding commit modifying 1424 * the plane. This prevents our async update's changes from getting 1425 * overridden by a previous synchronous update's state. 1426 */ 1427 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1428 if (plane->crtc != crtc) 1429 continue; 1430 1431 spin_lock(&crtc->commit_lock); 1432 commit = list_first_entry_or_null(&crtc->commit_list, 1433 struct drm_crtc_commit, 1434 commit_entry); 1435 if (!commit) { 1436 spin_unlock(&crtc->commit_lock); 1437 continue; 1438 } 1439 spin_unlock(&crtc->commit_lock); 1440 1441 if (!crtc->state->state) 1442 continue; 1443 1444 for_each_plane_in_state(crtc->state->state, __plane, 1445 __plane_state, j) { 1446 if (__plane == plane) 1447 return -EINVAL; 1448 } 1449 } 1450 1451 return funcs->atomic_async_check(plane, plane_state); 1452 } 1453 EXPORT_SYMBOL(drm_atomic_helper_async_check); 1454 1455 /** 1456 * drm_atomic_helper_async_commit - commit state asynchronously 1457 * @dev: DRM device 1458 * @state: the driver state object 1459 * 1460 * This function commits a state asynchronously, i.e., not vblank 1461 * synchronized. It should be used on a state only when 1462 * drm_atomic_async_check() succeeds. Async commits are not supposed to swap 1463 * the states like normal sync commits, but just do in-place changes on the 1464 * current state. 1465 */ 1466 void drm_atomic_helper_async_commit(struct drm_device *dev, 1467 struct drm_atomic_state *state) 1468 { 1469 struct drm_plane *plane; 1470 struct drm_plane_state *plane_state; 1471 const struct drm_plane_helper_funcs *funcs; 1472 int i; 1473 1474 for_each_new_plane_in_state(state, plane, plane_state, i) { 1475 funcs = plane->helper_private; 1476 funcs->atomic_async_update(plane, plane_state); 1477 } 1478 } 1479 EXPORT_SYMBOL(drm_atomic_helper_async_commit); 1480 1481 /** 1482 * drm_atomic_helper_commit - commit validated state object 1483 * @dev: DRM device 1484 * @state: the driver state object 1485 * @nonblock: whether nonblocking behavior is requested. 1486 * 1487 * This function commits a with drm_atomic_helper_check() pre-validated state 1488 * object. This can still fail when e.g. the framebuffer reservation fails. This 1489 * function implements nonblocking commits, using 1490 * drm_atomic_helper_setup_commit() and related functions. 1491 * 1492 * Committing the actual hardware state is done through the 1493 * &drm_mode_config_helper_funcs.atomic_commit_tail callback, or it's default 1494 * implementation drm_atomic_helper_commit_tail(). 1495 * 1496 * RETURNS: 1497 * Zero for success or -errno. 1498 */ 1499 int drm_atomic_helper_commit(struct drm_device *dev, 1500 struct drm_atomic_state *state, 1501 bool nonblock) 1502 { 1503 int ret; 1504 1505 if (state->async_update) { 1506 ret = drm_atomic_helper_prepare_planes(dev, state); 1507 if (ret) 1508 return ret; 1509 1510 drm_atomic_helper_async_commit(dev, state); 1511 drm_atomic_helper_cleanup_planes(dev, state); 1512 1513 return 0; 1514 } 1515 1516 ret = drm_atomic_helper_setup_commit(state, nonblock); 1517 if (ret) 1518 return ret; 1519 1520 INIT_WORK(&state->commit_work, commit_work); 1521 1522 ret = drm_atomic_helper_prepare_planes(dev, state); 1523 if (ret) 1524 return ret; 1525 1526 if (!nonblock) { 1527 ret = drm_atomic_helper_wait_for_fences(dev, state, true); 1528 if (ret) 1529 goto err; 1530 } 1531 1532 /* 1533 * This is the point of no return - everything below never fails except 1534 * when the hw goes bonghits. Which means we can commit the new state on 1535 * the software side now. 1536 */ 1537 1538 ret = drm_atomic_helper_swap_state(state, true); 1539 if (ret) 1540 goto err; 1541 1542 /* 1543 * Everything below can be run asynchronously without the need to grab 1544 * any modeset locks at all under one condition: It must be guaranteed 1545 * that the asynchronous work has either been cancelled (if the driver 1546 * supports it, which at least requires that the framebuffers get 1547 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed 1548 * before the new state gets committed on the software side with 1549 * drm_atomic_helper_swap_state(). 1550 * 1551 * This scheme allows new atomic state updates to be prepared and 1552 * checked in parallel to the asynchronous completion of the previous 1553 * update. Which is important since compositors need to figure out the 1554 * composition of the next frame right after having submitted the 1555 * current layout. 1556 * 1557 * NOTE: Commit work has multiple phases, first hardware commit, then 1558 * cleanup. We want them to overlap, hence need system_unbound_wq to 1559 * make sure work items don't artifically stall on each another. 1560 */ 1561 1562 drm_atomic_state_get(state); 1563 if (nonblock) 1564 queue_work(system_unbound_wq, &state->commit_work); 1565 else 1566 commit_tail(state); 1567 1568 return 0; 1569 1570 err: 1571 drm_atomic_helper_cleanup_planes(dev, state); 1572 return ret; 1573 } 1574 EXPORT_SYMBOL(drm_atomic_helper_commit); 1575 1576 /** 1577 * DOC: implementing nonblocking commit 1578 * 1579 * Nonblocking atomic commits have to be implemented in the following sequence: 1580 * 1581 * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function 1582 * which commit needs to call which can fail, so we want to run it first and 1583 * synchronously. 1584 * 1585 * 2. Synchronize with any outstanding nonblocking commit worker threads which 1586 * might be affected the new state update. This can be done by either cancelling 1587 * or flushing the work items, depending upon whether the driver can deal with 1588 * cancelled updates. Note that it is important to ensure that the framebuffer 1589 * cleanup is still done when cancelling. 1590 * 1591 * Asynchronous workers need to have sufficient parallelism to be able to run 1592 * different atomic commits on different CRTCs in parallel. The simplest way to 1593 * achive this is by running them on the &system_unbound_wq work queue. Note 1594 * that drivers are not required to split up atomic commits and run an 1595 * individual commit in parallel - userspace is supposed to do that if it cares. 1596 * But it might be beneficial to do that for modesets, since those necessarily 1597 * must be done as one global operation, and enabling or disabling a CRTC can 1598 * take a long time. But even that is not required. 1599 * 1600 * 3. The software state is updated synchronously with 1601 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset 1602 * locks means concurrent callers never see inconsistent state. And doing this 1603 * while it's guaranteed that no relevant nonblocking worker runs means that 1604 * nonblocking workers do not need grab any locks. Actually they must not grab 1605 * locks, for otherwise the work flushing will deadlock. 1606 * 1607 * 4. Schedule a work item to do all subsequent steps, using the split-out 1608 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and 1609 * then cleaning up the framebuffers after the old framebuffer is no longer 1610 * being displayed. 1611 * 1612 * The above scheme is implemented in the atomic helper libraries in 1613 * drm_atomic_helper_commit() using a bunch of helper functions. See 1614 * drm_atomic_helper_setup_commit() for a starting point. 1615 */ 1616 1617 static int stall_checks(struct drm_crtc *crtc, bool nonblock) 1618 { 1619 struct drm_crtc_commit *commit, *stall_commit = NULL; 1620 bool completed = true; 1621 int i; 1622 long ret = 0; 1623 1624 spin_lock(&crtc->commit_lock); 1625 i = 0; 1626 list_for_each_entry(commit, &crtc->commit_list, commit_entry) { 1627 if (i == 0) { 1628 completed = try_wait_for_completion(&commit->flip_done); 1629 /* Userspace is not allowed to get ahead of the previous 1630 * commit with nonblocking ones. */ 1631 if (!completed && nonblock) { 1632 spin_unlock(&crtc->commit_lock); 1633 return -EBUSY; 1634 } 1635 } else if (i == 1) { 1636 stall_commit = commit; 1637 drm_crtc_commit_get(stall_commit); 1638 break; 1639 } 1640 1641 i++; 1642 } 1643 spin_unlock(&crtc->commit_lock); 1644 1645 if (!stall_commit) 1646 return 0; 1647 1648 /* We don't want to let commits get ahead of cleanup work too much, 1649 * stalling on 2nd previous commit means triple-buffer won't ever stall. 1650 */ 1651 ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done, 1652 10*HZ); 1653 if (ret == 0) 1654 DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n", 1655 crtc->base.id, crtc->name); 1656 1657 drm_crtc_commit_put(stall_commit); 1658 1659 return ret < 0 ? ret : 0; 1660 } 1661 1662 static void release_crtc_commit(struct completion *completion) 1663 { 1664 struct drm_crtc_commit *commit = container_of(completion, 1665 typeof(*commit), 1666 flip_done); 1667 1668 drm_crtc_commit_put(commit); 1669 } 1670 1671 /** 1672 * drm_atomic_helper_setup_commit - setup possibly nonblocking commit 1673 * @state: new modeset state to be committed 1674 * @nonblock: whether nonblocking behavior is requested. 1675 * 1676 * This function prepares @state to be used by the atomic helper's support for 1677 * nonblocking commits. Drivers using the nonblocking commit infrastructure 1678 * should always call this function from their 1679 * &drm_mode_config_funcs.atomic_commit hook. 1680 * 1681 * To be able to use this support drivers need to use a few more helper 1682 * functions. drm_atomic_helper_wait_for_dependencies() must be called before 1683 * actually committing the hardware state, and for nonblocking commits this call 1684 * must be placed in the async worker. See also drm_atomic_helper_swap_state() 1685 * and it's stall parameter, for when a driver's commit hooks look at the 1686 * &drm_crtc.state, &drm_plane.state or &drm_connector.state pointer directly. 1687 * 1688 * Completion of the hardware commit step must be signalled using 1689 * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed 1690 * to read or change any permanent software or hardware modeset state. The only 1691 * exception is state protected by other means than &drm_modeset_lock locks. 1692 * Only the free standing @state with pointers to the old state structures can 1693 * be inspected, e.g. to clean up old buffers using 1694 * drm_atomic_helper_cleanup_planes(). 1695 * 1696 * At the very end, before cleaning up @state drivers must call 1697 * drm_atomic_helper_commit_cleanup_done(). 1698 * 1699 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a 1700 * complete and esay-to-use default implementation of the atomic_commit() hook. 1701 * 1702 * The tracking of asynchronously executed and still pending commits is done 1703 * using the core structure &drm_crtc_commit. 1704 * 1705 * By default there's no need to clean up resources allocated by this function 1706 * explicitly: drm_atomic_state_default_clear() will take care of that 1707 * automatically. 1708 * 1709 * Returns: 1710 * 1711 * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast, 1712 * -ENOMEM on allocation failures and -EINTR when a signal is pending. 1713 */ 1714 int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, 1715 bool nonblock) 1716 { 1717 struct drm_crtc *crtc; 1718 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1719 struct drm_crtc_commit *commit; 1720 int i, ret; 1721 1722 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1723 commit = kzalloc(sizeof(*commit), GFP_KERNEL); 1724 if (!commit) 1725 return -ENOMEM; 1726 1727 init_completion(&commit->flip_done); 1728 init_completion(&commit->hw_done); 1729 init_completion(&commit->cleanup_done); 1730 INIT_LIST_HEAD(&commit->commit_entry); 1731 kref_init(&commit->ref); 1732 commit->crtc = crtc; 1733 1734 state->crtcs[i].commit = commit; 1735 1736 ret = stall_checks(crtc, nonblock); 1737 if (ret) 1738 return ret; 1739 1740 /* Drivers only send out events when at least either current or 1741 * new CRTC state is active. Complete right away if everything 1742 * stays off. */ 1743 if (!old_crtc_state->active && !new_crtc_state->active) { 1744 complete_all(&commit->flip_done); 1745 continue; 1746 } 1747 1748 /* Legacy cursor updates are fully unsynced. */ 1749 if (state->legacy_cursor_update) { 1750 complete_all(&commit->flip_done); 1751 continue; 1752 } 1753 1754 if (!new_crtc_state->event) { 1755 commit->event = kzalloc(sizeof(*commit->event), 1756 GFP_KERNEL); 1757 if (!commit->event) 1758 return -ENOMEM; 1759 1760 new_crtc_state->event = commit->event; 1761 } 1762 1763 new_crtc_state->event->base.completion = &commit->flip_done; 1764 new_crtc_state->event->base.completion_release = release_crtc_commit; 1765 drm_crtc_commit_get(commit); 1766 } 1767 1768 return 0; 1769 } 1770 EXPORT_SYMBOL(drm_atomic_helper_setup_commit); 1771 1772 1773 static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc) 1774 { 1775 struct drm_crtc_commit *commit; 1776 int i = 0; 1777 1778 list_for_each_entry(commit, &crtc->commit_list, commit_entry) { 1779 /* skip the first entry, that's the current commit */ 1780 if (i == 1) 1781 return commit; 1782 i++; 1783 } 1784 1785 return NULL; 1786 } 1787 1788 /** 1789 * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits 1790 * @old_state: atomic state object with old state structures 1791 * 1792 * This function waits for all preceeding commits that touch the same CRTC as 1793 * @old_state to both be committed to the hardware (as signalled by 1794 * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled 1795 * by calling drm_crtc_vblank_send_event() on the &drm_crtc_state.event). 1796 * 1797 * This is part of the atomic helper support for nonblocking commits, see 1798 * drm_atomic_helper_setup_commit() for an overview. 1799 */ 1800 void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state) 1801 { 1802 struct drm_crtc *crtc; 1803 struct drm_crtc_state *new_crtc_state; 1804 struct drm_crtc_commit *commit; 1805 int i; 1806 long ret; 1807 1808 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { 1809 spin_lock(&crtc->commit_lock); 1810 commit = preceeding_commit(crtc); 1811 if (commit) 1812 drm_crtc_commit_get(commit); 1813 spin_unlock(&crtc->commit_lock); 1814 1815 if (!commit) 1816 continue; 1817 1818 ret = wait_for_completion_timeout(&commit->hw_done, 1819 10*HZ); 1820 if (ret == 0) 1821 DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n", 1822 crtc->base.id, crtc->name); 1823 1824 /* Currently no support for overwriting flips, hence 1825 * stall for previous one to execute completely. */ 1826 ret = wait_for_completion_timeout(&commit->flip_done, 1827 10*HZ); 1828 if (ret == 0) 1829 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", 1830 crtc->base.id, crtc->name); 1831 1832 drm_crtc_commit_put(commit); 1833 } 1834 } 1835 EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies); 1836 1837 /** 1838 * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit 1839 * @old_state: atomic state object with old state structures 1840 * 1841 * This function is used to signal completion of the hardware commit step. After 1842 * this step the driver is not allowed to read or change any permanent software 1843 * or hardware modeset state. The only exception is state protected by other 1844 * means than &drm_modeset_lock locks. 1845 * 1846 * Drivers should try to postpone any expensive or delayed cleanup work after 1847 * this function is called. 1848 * 1849 * This is part of the atomic helper support for nonblocking commits, see 1850 * drm_atomic_helper_setup_commit() for an overview. 1851 */ 1852 void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *old_state) 1853 { 1854 struct drm_crtc *crtc; 1855 struct drm_crtc_state *new_crtc_state; 1856 struct drm_crtc_commit *commit; 1857 int i; 1858 1859 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { 1860 commit = old_state->crtcs[i].commit; 1861 if (!commit) 1862 continue; 1863 1864 /* backend must have consumed any event by now */ 1865 WARN_ON(new_crtc_state->event); 1866 complete_all(&commit->hw_done); 1867 } 1868 } 1869 EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done); 1870 1871 /** 1872 * drm_atomic_helper_commit_cleanup_done - signal completion of commit 1873 * @old_state: atomic state object with old state structures 1874 * 1875 * This signals completion of the atomic update @old_state, including any 1876 * cleanup work. If used, it must be called right before calling 1877 * drm_atomic_state_put(). 1878 * 1879 * This is part of the atomic helper support for nonblocking commits, see 1880 * drm_atomic_helper_setup_commit() for an overview. 1881 */ 1882 void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state) 1883 { 1884 struct drm_crtc *crtc; 1885 struct drm_crtc_state *new_crtc_state; 1886 struct drm_crtc_commit *commit; 1887 int i; 1888 long ret; 1889 1890 for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) { 1891 commit = old_state->crtcs[i].commit; 1892 if (WARN_ON(!commit)) 1893 continue; 1894 1895 complete_all(&commit->cleanup_done); 1896 WARN_ON(!try_wait_for_completion(&commit->hw_done)); 1897 1898 /* commit_list borrows our reference, need to remove before we 1899 * clean up our drm_atomic_state. But only after it actually 1900 * completed, otherwise subsequent commits won't stall properly. */ 1901 if (try_wait_for_completion(&commit->flip_done)) 1902 goto del_commit; 1903 1904 /* We must wait for the vblank event to signal our completion 1905 * before releasing our reference, since the vblank work does 1906 * not hold a reference of its own. */ 1907 ret = wait_for_completion_timeout(&commit->flip_done, 1908 10*HZ); 1909 if (ret == 0) 1910 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", 1911 crtc->base.id, crtc->name); 1912 1913 del_commit: 1914 spin_lock(&crtc->commit_lock); 1915 list_del(&commit->commit_entry); 1916 spin_unlock(&crtc->commit_lock); 1917 } 1918 } 1919 EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done); 1920 1921 /** 1922 * drm_atomic_helper_prepare_planes - prepare plane resources before commit 1923 * @dev: DRM device 1924 * @state: atomic state object with new state structures 1925 * 1926 * This function prepares plane state, specifically framebuffers, for the new 1927 * configuration, by calling &drm_plane_helper_funcs.prepare_fb. If any failure 1928 * is encountered this function will call &drm_plane_helper_funcs.cleanup_fb on 1929 * any already successfully prepared framebuffer. 1930 * 1931 * Returns: 1932 * 0 on success, negative error code on failure. 1933 */ 1934 int drm_atomic_helper_prepare_planes(struct drm_device *dev, 1935 struct drm_atomic_state *state) 1936 { 1937 struct drm_plane *plane; 1938 struct drm_plane_state *new_plane_state; 1939 int ret, i, j; 1940 1941 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 1942 const struct drm_plane_helper_funcs *funcs; 1943 1944 funcs = plane->helper_private; 1945 1946 if (funcs->prepare_fb) { 1947 ret = funcs->prepare_fb(plane, new_plane_state); 1948 if (ret) 1949 goto fail; 1950 } 1951 } 1952 1953 return 0; 1954 1955 fail: 1956 for_each_new_plane_in_state(state, plane, new_plane_state, j) { 1957 const struct drm_plane_helper_funcs *funcs; 1958 1959 if (j >= i) 1960 continue; 1961 1962 funcs = plane->helper_private; 1963 1964 if (funcs->cleanup_fb) 1965 funcs->cleanup_fb(plane, new_plane_state); 1966 } 1967 1968 return ret; 1969 } 1970 EXPORT_SYMBOL(drm_atomic_helper_prepare_planes); 1971 1972 static bool plane_crtc_active(const struct drm_plane_state *state) 1973 { 1974 return state->crtc && state->crtc->state->active; 1975 } 1976 1977 /** 1978 * drm_atomic_helper_commit_planes - commit plane state 1979 * @dev: DRM device 1980 * @old_state: atomic state object with old state structures 1981 * @flags: flags for committing plane state 1982 * 1983 * This function commits the new plane state using the plane and atomic helper 1984 * functions for planes and crtcs. It assumes that the atomic state has already 1985 * been pushed into the relevant object state pointers, since this step can no 1986 * longer fail. 1987 * 1988 * It still requires the global state object @old_state to know which planes and 1989 * crtcs need to be updated though. 1990 * 1991 * Note that this function does all plane updates across all CRTCs in one step. 1992 * If the hardware can't support this approach look at 1993 * drm_atomic_helper_commit_planes_on_crtc() instead. 1994 * 1995 * Plane parameters can be updated by applications while the associated CRTC is 1996 * disabled. The DRM/KMS core will store the parameters in the plane state, 1997 * which will be available to the driver when the CRTC is turned on. As a result 1998 * most drivers don't need to be immediately notified of plane updates for a 1999 * disabled CRTC. 2000 * 2001 * Unless otherwise needed, drivers are advised to set the ACTIVE_ONLY flag in 2002 * @flags in order not to receive plane update notifications related to a 2003 * disabled CRTC. This avoids the need to manually ignore plane updates in 2004 * driver code when the driver and/or hardware can't or just don't need to deal 2005 * with updates on disabled CRTCs, for example when supporting runtime PM. 2006 * 2007 * Drivers may set the NO_DISABLE_AFTER_MODESET flag in @flags if the relevant 2008 * display controllers require to disable a CRTC's planes when the CRTC is 2009 * disabled. This function would skip the &drm_plane_helper_funcs.atomic_disable 2010 * call for a plane if the CRTC of the old plane state needs a modesetting 2011 * operation. Of course, the drivers need to disable the planes in their CRTC 2012 * disable callbacks since no one else would do that. 2013 * 2014 * The drm_atomic_helper_commit() default implementation doesn't set the 2015 * ACTIVE_ONLY flag to most closely match the behaviour of the legacy helpers. 2016 * This should not be copied blindly by drivers. 2017 */ 2018 void drm_atomic_helper_commit_planes(struct drm_device *dev, 2019 struct drm_atomic_state *old_state, 2020 uint32_t flags) 2021 { 2022 struct drm_crtc *crtc; 2023 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 2024 struct drm_plane *plane; 2025 struct drm_plane_state *old_plane_state, *new_plane_state; 2026 int i; 2027 bool active_only = flags & DRM_PLANE_COMMIT_ACTIVE_ONLY; 2028 bool no_disable = flags & DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET; 2029 2030 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { 2031 const struct drm_crtc_helper_funcs *funcs; 2032 2033 funcs = crtc->helper_private; 2034 2035 if (!funcs || !funcs->atomic_begin) 2036 continue; 2037 2038 if (active_only && !new_crtc_state->active) 2039 continue; 2040 2041 funcs->atomic_begin(crtc, old_crtc_state); 2042 } 2043 2044 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) { 2045 const struct drm_plane_helper_funcs *funcs; 2046 bool disabling; 2047 2048 funcs = plane->helper_private; 2049 2050 if (!funcs) 2051 continue; 2052 2053 disabling = drm_atomic_plane_disabling(old_plane_state, 2054 new_plane_state); 2055 2056 if (active_only) { 2057 /* 2058 * Skip planes related to inactive CRTCs. If the plane 2059 * is enabled use the state of the current CRTC. If the 2060 * plane is being disabled use the state of the old 2061 * CRTC to avoid skipping planes being disabled on an 2062 * active CRTC. 2063 */ 2064 if (!disabling && !plane_crtc_active(new_plane_state)) 2065 continue; 2066 if (disabling && !plane_crtc_active(old_plane_state)) 2067 continue; 2068 } 2069 2070 /* 2071 * Special-case disabling the plane if drivers support it. 2072 */ 2073 if (disabling && funcs->atomic_disable) { 2074 struct drm_crtc_state *crtc_state; 2075 2076 crtc_state = old_plane_state->crtc->state; 2077 2078 if (drm_atomic_crtc_needs_modeset(crtc_state) && 2079 no_disable) 2080 continue; 2081 2082 funcs->atomic_disable(plane, old_plane_state); 2083 } else if (new_plane_state->crtc || disabling) { 2084 funcs->atomic_update(plane, old_plane_state); 2085 } 2086 } 2087 2088 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { 2089 const struct drm_crtc_helper_funcs *funcs; 2090 2091 funcs = crtc->helper_private; 2092 2093 if (!funcs || !funcs->atomic_flush) 2094 continue; 2095 2096 if (active_only && !new_crtc_state->active) 2097 continue; 2098 2099 funcs->atomic_flush(crtc, old_crtc_state); 2100 } 2101 } 2102 EXPORT_SYMBOL(drm_atomic_helper_commit_planes); 2103 2104 /** 2105 * drm_atomic_helper_commit_planes_on_crtc - commit plane state for a crtc 2106 * @old_crtc_state: atomic state object with the old crtc state 2107 * 2108 * This function commits the new plane state using the plane and atomic helper 2109 * functions for planes on the specific crtc. It assumes that the atomic state 2110 * has already been pushed into the relevant object state pointers, since this 2111 * step can no longer fail. 2112 * 2113 * This function is useful when plane updates should be done crtc-by-crtc 2114 * instead of one global step like drm_atomic_helper_commit_planes() does. 2115 * 2116 * This function can only be savely used when planes are not allowed to move 2117 * between different CRTCs because this function doesn't handle inter-CRTC 2118 * depencies. Callers need to ensure that either no such depencies exist, 2119 * resolve them through ordering of commit calls or through some other means. 2120 */ 2121 void 2122 drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state) 2123 { 2124 const struct drm_crtc_helper_funcs *crtc_funcs; 2125 struct drm_crtc *crtc = old_crtc_state->crtc; 2126 struct drm_atomic_state *old_state = old_crtc_state->state; 2127 struct drm_plane *plane; 2128 unsigned plane_mask; 2129 2130 plane_mask = old_crtc_state->plane_mask; 2131 plane_mask |= crtc->state->plane_mask; 2132 2133 crtc_funcs = crtc->helper_private; 2134 if (crtc_funcs && crtc_funcs->atomic_begin) 2135 crtc_funcs->atomic_begin(crtc, old_crtc_state); 2136 2137 drm_for_each_plane_mask(plane, crtc->dev, plane_mask) { 2138 struct drm_plane_state *old_plane_state = 2139 drm_atomic_get_old_plane_state(old_state, plane); 2140 const struct drm_plane_helper_funcs *plane_funcs; 2141 2142 plane_funcs = plane->helper_private; 2143 2144 if (!old_plane_state || !plane_funcs) 2145 continue; 2146 2147 WARN_ON(plane->state->crtc && plane->state->crtc != crtc); 2148 2149 if (drm_atomic_plane_disabling(old_plane_state, plane->state) && 2150 plane_funcs->atomic_disable) 2151 plane_funcs->atomic_disable(plane, old_plane_state); 2152 else if (plane->state->crtc || 2153 drm_atomic_plane_disabling(old_plane_state, plane->state)) 2154 plane_funcs->atomic_update(plane, old_plane_state); 2155 } 2156 2157 if (crtc_funcs && crtc_funcs->atomic_flush) 2158 crtc_funcs->atomic_flush(crtc, old_crtc_state); 2159 } 2160 EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc); 2161 2162 /** 2163 * drm_atomic_helper_disable_planes_on_crtc - helper to disable CRTC's planes 2164 * @old_crtc_state: atomic state object with the old CRTC state 2165 * @atomic: if set, synchronize with CRTC's atomic_begin/flush hooks 2166 * 2167 * Disables all planes associated with the given CRTC. This can be 2168 * used for instance in the CRTC helper atomic_disable callback to disable 2169 * all planes. 2170 * 2171 * If the atomic-parameter is set the function calls the CRTC's 2172 * atomic_begin hook before and atomic_flush hook after disabling the 2173 * planes. 2174 * 2175 * It is a bug to call this function without having implemented the 2176 * &drm_plane_helper_funcs.atomic_disable plane hook. 2177 */ 2178 void 2179 drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc_state *old_crtc_state, 2180 bool atomic) 2181 { 2182 struct drm_crtc *crtc = old_crtc_state->crtc; 2183 const struct drm_crtc_helper_funcs *crtc_funcs = 2184 crtc->helper_private; 2185 struct drm_plane *plane; 2186 2187 if (atomic && crtc_funcs && crtc_funcs->atomic_begin) 2188 crtc_funcs->atomic_begin(crtc, NULL); 2189 2190 drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) { 2191 const struct drm_plane_helper_funcs *plane_funcs = 2192 plane->helper_private; 2193 2194 if (!plane_funcs) 2195 continue; 2196 2197 WARN_ON(!plane_funcs->atomic_disable); 2198 if (plane_funcs->atomic_disable) 2199 plane_funcs->atomic_disable(plane, NULL); 2200 } 2201 2202 if (atomic && crtc_funcs && crtc_funcs->atomic_flush) 2203 crtc_funcs->atomic_flush(crtc, NULL); 2204 } 2205 EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc); 2206 2207 /** 2208 * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit 2209 * @dev: DRM device 2210 * @old_state: atomic state object with old state structures 2211 * 2212 * This function cleans up plane state, specifically framebuffers, from the old 2213 * configuration. Hence the old configuration must be perserved in @old_state to 2214 * be able to call this function. 2215 * 2216 * This function must also be called on the new state when the atomic update 2217 * fails at any point after calling drm_atomic_helper_prepare_planes(). 2218 */ 2219 void drm_atomic_helper_cleanup_planes(struct drm_device *dev, 2220 struct drm_atomic_state *old_state) 2221 { 2222 struct drm_plane *plane; 2223 struct drm_plane_state *old_plane_state, *new_plane_state; 2224 int i; 2225 2226 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) { 2227 const struct drm_plane_helper_funcs *funcs; 2228 struct drm_plane_state *plane_state; 2229 2230 /* 2231 * This might be called before swapping when commit is aborted, 2232 * in which case we have to cleanup the new state. 2233 */ 2234 if (old_plane_state == plane->state) 2235 plane_state = new_plane_state; 2236 else 2237 plane_state = old_plane_state; 2238 2239 funcs = plane->helper_private; 2240 2241 if (funcs->cleanup_fb) 2242 funcs->cleanup_fb(plane, plane_state); 2243 } 2244 } 2245 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); 2246 2247 /** 2248 * drm_atomic_helper_swap_state - store atomic state into current sw state 2249 * @state: atomic state 2250 * @stall: stall for preceeding commits 2251 * 2252 * This function stores the atomic state into the current state pointers in all 2253 * driver objects. It should be called after all failing steps have been done 2254 * and succeeded, but before the actual hardware state is committed. 2255 * 2256 * For cleanup and error recovery the current state for all changed objects will 2257 * be swapped into @state. 2258 * 2259 * With that sequence it fits perfectly into the plane prepare/cleanup sequence: 2260 * 2261 * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state. 2262 * 2263 * 2. Do any other steps that might fail. 2264 * 2265 * 3. Put the staged state into the current state pointers with this function. 2266 * 2267 * 4. Actually commit the hardware state. 2268 * 2269 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3 2270 * contains the old state. Also do any other cleanup required with that state. 2271 * 2272 * @stall must be set when nonblocking commits for this driver directly access 2273 * the &drm_plane.state, &drm_crtc.state or &drm_connector.state pointer. With 2274 * the current atomic helpers this is almost always the case, since the helpers 2275 * don't pass the right state structures to the callbacks. 2276 * 2277 * Returns: 2278 * 2279 * Returns 0 on success. Can return -ERESTARTSYS when @stall is true and the 2280 * waiting for the previous commits has been interrupted. 2281 */ 2282 int drm_atomic_helper_swap_state(struct drm_atomic_state *state, 2283 bool stall) 2284 { 2285 int i, ret; 2286 struct drm_connector *connector; 2287 struct drm_connector_state *old_conn_state, *new_conn_state; 2288 struct drm_crtc *crtc; 2289 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 2290 struct drm_plane *plane; 2291 struct drm_plane_state *old_plane_state, *new_plane_state; 2292 struct drm_crtc_commit *commit; 2293 struct drm_private_obj *obj; 2294 struct drm_private_state *old_obj_state, *new_obj_state; 2295 2296 if (stall) { 2297 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 2298 spin_lock(&crtc->commit_lock); 2299 commit = list_first_entry_or_null(&crtc->commit_list, 2300 struct drm_crtc_commit, commit_entry); 2301 if (commit) 2302 drm_crtc_commit_get(commit); 2303 spin_unlock(&crtc->commit_lock); 2304 2305 if (!commit) 2306 continue; 2307 2308 ret = wait_for_completion_interruptible(&commit->hw_done); 2309 drm_crtc_commit_put(commit); 2310 2311 if (ret) 2312 return ret; 2313 } 2314 } 2315 2316 for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) { 2317 WARN_ON(connector->state != old_conn_state); 2318 2319 old_conn_state->state = state; 2320 new_conn_state->state = NULL; 2321 2322 state->connectors[i].state = old_conn_state; 2323 connector->state = new_conn_state; 2324 } 2325 2326 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 2327 WARN_ON(crtc->state != old_crtc_state); 2328 2329 old_crtc_state->state = state; 2330 new_crtc_state->state = NULL; 2331 2332 state->crtcs[i].state = old_crtc_state; 2333 crtc->state = new_crtc_state; 2334 2335 if (state->crtcs[i].commit) { 2336 spin_lock(&crtc->commit_lock); 2337 list_add(&state->crtcs[i].commit->commit_entry, 2338 &crtc->commit_list); 2339 spin_unlock(&crtc->commit_lock); 2340 2341 state->crtcs[i].commit->event = NULL; 2342 } 2343 } 2344 2345 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 2346 WARN_ON(plane->state != old_plane_state); 2347 2348 old_plane_state->state = state; 2349 new_plane_state->state = NULL; 2350 2351 state->planes[i].state = old_plane_state; 2352 plane->state = new_plane_state; 2353 } 2354 2355 for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) { 2356 WARN_ON(obj->state != old_obj_state); 2357 2358 old_obj_state->state = state; 2359 new_obj_state->state = NULL; 2360 2361 state->private_objs[i].state = old_obj_state; 2362 obj->state = new_obj_state; 2363 } 2364 2365 return 0; 2366 } 2367 EXPORT_SYMBOL(drm_atomic_helper_swap_state); 2368 2369 /** 2370 * drm_atomic_helper_update_plane - Helper for primary plane update using atomic 2371 * @plane: plane object to update 2372 * @crtc: owning CRTC of owning plane 2373 * @fb: framebuffer to flip onto plane 2374 * @crtc_x: x offset of primary plane on crtc 2375 * @crtc_y: y offset of primary plane on crtc 2376 * @crtc_w: width of primary plane rectangle on crtc 2377 * @crtc_h: height of primary plane rectangle on crtc 2378 * @src_x: x offset of @fb for panning 2379 * @src_y: y offset of @fb for panning 2380 * @src_w: width of source rectangle in @fb 2381 * @src_h: height of source rectangle in @fb 2382 * @ctx: lock acquire context 2383 * 2384 * Provides a default plane update handler using the atomic driver interface. 2385 * 2386 * RETURNS: 2387 * Zero on success, error code on failure 2388 */ 2389 int drm_atomic_helper_update_plane(struct drm_plane *plane, 2390 struct drm_crtc *crtc, 2391 struct drm_framebuffer *fb, 2392 int crtc_x, int crtc_y, 2393 unsigned int crtc_w, unsigned int crtc_h, 2394 uint32_t src_x, uint32_t src_y, 2395 uint32_t src_w, uint32_t src_h, 2396 struct drm_modeset_acquire_ctx *ctx) 2397 { 2398 struct drm_atomic_state *state; 2399 struct drm_plane_state *plane_state; 2400 int ret = 0; 2401 2402 state = drm_atomic_state_alloc(plane->dev); 2403 if (!state) 2404 return -ENOMEM; 2405 2406 state->acquire_ctx = ctx; 2407 plane_state = drm_atomic_get_plane_state(state, plane); 2408 if (IS_ERR(plane_state)) { 2409 ret = PTR_ERR(plane_state); 2410 goto fail; 2411 } 2412 2413 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 2414 if (ret != 0) 2415 goto fail; 2416 drm_atomic_set_fb_for_plane(plane_state, fb); 2417 plane_state->crtc_x = crtc_x; 2418 plane_state->crtc_y = crtc_y; 2419 plane_state->crtc_w = crtc_w; 2420 plane_state->crtc_h = crtc_h; 2421 plane_state->src_x = src_x; 2422 plane_state->src_y = src_y; 2423 plane_state->src_w = src_w; 2424 plane_state->src_h = src_h; 2425 2426 if (plane == crtc->cursor) 2427 state->legacy_cursor_update = true; 2428 2429 ret = drm_atomic_commit(state); 2430 fail: 2431 drm_atomic_state_put(state); 2432 return ret; 2433 } 2434 EXPORT_SYMBOL(drm_atomic_helper_update_plane); 2435 2436 /** 2437 * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic 2438 * @plane: plane to disable 2439 * @ctx: lock acquire context 2440 * 2441 * Provides a default plane disable handler using the atomic driver interface. 2442 * 2443 * RETURNS: 2444 * Zero on success, error code on failure 2445 */ 2446 int drm_atomic_helper_disable_plane(struct drm_plane *plane, 2447 struct drm_modeset_acquire_ctx *ctx) 2448 { 2449 struct drm_atomic_state *state; 2450 struct drm_plane_state *plane_state; 2451 int ret = 0; 2452 2453 state = drm_atomic_state_alloc(plane->dev); 2454 if (!state) 2455 return -ENOMEM; 2456 2457 state->acquire_ctx = ctx; 2458 plane_state = drm_atomic_get_plane_state(state, plane); 2459 if (IS_ERR(plane_state)) { 2460 ret = PTR_ERR(plane_state); 2461 goto fail; 2462 } 2463 2464 if (plane_state->crtc && (plane == plane->crtc->cursor)) 2465 plane_state->state->legacy_cursor_update = true; 2466 2467 ret = __drm_atomic_helper_disable_plane(plane, plane_state); 2468 if (ret != 0) 2469 goto fail; 2470 2471 ret = drm_atomic_commit(state); 2472 fail: 2473 drm_atomic_state_put(state); 2474 return ret; 2475 } 2476 EXPORT_SYMBOL(drm_atomic_helper_disable_plane); 2477 2478 /* just used from fb-helper and atomic-helper: */ 2479 int __drm_atomic_helper_disable_plane(struct drm_plane *plane, 2480 struct drm_plane_state *plane_state) 2481 { 2482 int ret; 2483 2484 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 2485 if (ret != 0) 2486 return ret; 2487 2488 drm_atomic_set_fb_for_plane(plane_state, NULL); 2489 plane_state->crtc_x = 0; 2490 plane_state->crtc_y = 0; 2491 plane_state->crtc_w = 0; 2492 plane_state->crtc_h = 0; 2493 plane_state->src_x = 0; 2494 plane_state->src_y = 0; 2495 plane_state->src_w = 0; 2496 plane_state->src_h = 0; 2497 2498 return 0; 2499 } 2500 2501 static int update_output_state(struct drm_atomic_state *state, 2502 struct drm_mode_set *set) 2503 { 2504 struct drm_device *dev = set->crtc->dev; 2505 struct drm_crtc *crtc; 2506 struct drm_crtc_state *new_crtc_state; 2507 struct drm_connector *connector; 2508 struct drm_connector_state *new_conn_state; 2509 int ret, i; 2510 2511 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 2512 state->acquire_ctx); 2513 if (ret) 2514 return ret; 2515 2516 /* First disable all connectors on the target crtc. */ 2517 ret = drm_atomic_add_affected_connectors(state, set->crtc); 2518 if (ret) 2519 return ret; 2520 2521 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 2522 if (new_conn_state->crtc == set->crtc) { 2523 ret = drm_atomic_set_crtc_for_connector(new_conn_state, 2524 NULL); 2525 if (ret) 2526 return ret; 2527 2528 /* Make sure legacy setCrtc always re-trains */ 2529 new_conn_state->link_status = DRM_LINK_STATUS_GOOD; 2530 } 2531 } 2532 2533 /* Then set all connectors from set->connectors on the target crtc */ 2534 for (i = 0; i < set->num_connectors; i++) { 2535 new_conn_state = drm_atomic_get_connector_state(state, 2536 set->connectors[i]); 2537 if (IS_ERR(new_conn_state)) 2538 return PTR_ERR(new_conn_state); 2539 2540 ret = drm_atomic_set_crtc_for_connector(new_conn_state, 2541 set->crtc); 2542 if (ret) 2543 return ret; 2544 } 2545 2546 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 2547 /* Don't update ->enable for the CRTC in the set_config request, 2548 * since a mismatch would indicate a bug in the upper layers. 2549 * The actual modeset code later on will catch any 2550 * inconsistencies here. */ 2551 if (crtc == set->crtc) 2552 continue; 2553 2554 if (!new_crtc_state->connector_mask) { 2555 ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state, 2556 NULL); 2557 if (ret < 0) 2558 return ret; 2559 2560 new_crtc_state->active = false; 2561 } 2562 } 2563 2564 return 0; 2565 } 2566 2567 /** 2568 * drm_atomic_helper_set_config - set a new config from userspace 2569 * @set: mode set configuration 2570 * @ctx: lock acquisition context 2571 * 2572 * Provides a default crtc set_config handler using the atomic driver interface. 2573 * 2574 * NOTE: For backwards compatibility with old userspace this automatically 2575 * resets the "link-status" property to GOOD, to force any link 2576 * re-training. The SETCRTC ioctl does not define whether an update does 2577 * need a full modeset or just a plane update, hence we're allowed to do 2578 * that. See also drm_mode_connector_set_link_status_property(). 2579 * 2580 * Returns: 2581 * Returns 0 on success, negative errno numbers on failure. 2582 */ 2583 int drm_atomic_helper_set_config(struct drm_mode_set *set, 2584 struct drm_modeset_acquire_ctx *ctx) 2585 { 2586 struct drm_atomic_state *state; 2587 struct drm_crtc *crtc = set->crtc; 2588 int ret = 0; 2589 2590 state = drm_atomic_state_alloc(crtc->dev); 2591 if (!state) 2592 return -ENOMEM; 2593 2594 state->acquire_ctx = ctx; 2595 ret = __drm_atomic_helper_set_config(set, state); 2596 if (ret != 0) 2597 goto fail; 2598 2599 ret = handle_conflicting_encoders(state, true); 2600 if (ret) 2601 return ret; 2602 2603 ret = drm_atomic_commit(state); 2604 2605 fail: 2606 drm_atomic_state_put(state); 2607 return ret; 2608 } 2609 EXPORT_SYMBOL(drm_atomic_helper_set_config); 2610 2611 /* just used from fb-helper and atomic-helper: */ 2612 int __drm_atomic_helper_set_config(struct drm_mode_set *set, 2613 struct drm_atomic_state *state) 2614 { 2615 struct drm_crtc_state *crtc_state; 2616 struct drm_plane_state *primary_state; 2617 struct drm_crtc *crtc = set->crtc; 2618 int hdisplay, vdisplay; 2619 int ret; 2620 2621 crtc_state = drm_atomic_get_crtc_state(state, crtc); 2622 if (IS_ERR(crtc_state)) 2623 return PTR_ERR(crtc_state); 2624 2625 primary_state = drm_atomic_get_plane_state(state, crtc->primary); 2626 if (IS_ERR(primary_state)) 2627 return PTR_ERR(primary_state); 2628 2629 if (!set->mode) { 2630 WARN_ON(set->fb); 2631 WARN_ON(set->num_connectors); 2632 2633 ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL); 2634 if (ret != 0) 2635 return ret; 2636 2637 crtc_state->active = false; 2638 2639 ret = drm_atomic_set_crtc_for_plane(primary_state, NULL); 2640 if (ret != 0) 2641 return ret; 2642 2643 drm_atomic_set_fb_for_plane(primary_state, NULL); 2644 2645 goto commit; 2646 } 2647 2648 WARN_ON(!set->fb); 2649 WARN_ON(!set->num_connectors); 2650 2651 ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode); 2652 if (ret != 0) 2653 return ret; 2654 2655 crtc_state->active = true; 2656 2657 ret = drm_atomic_set_crtc_for_plane(primary_state, crtc); 2658 if (ret != 0) 2659 return ret; 2660 2661 drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay); 2662 2663 drm_atomic_set_fb_for_plane(primary_state, set->fb); 2664 primary_state->crtc_x = 0; 2665 primary_state->crtc_y = 0; 2666 primary_state->crtc_w = hdisplay; 2667 primary_state->crtc_h = vdisplay; 2668 primary_state->src_x = set->x << 16; 2669 primary_state->src_y = set->y << 16; 2670 if (drm_rotation_90_or_270(primary_state->rotation)) { 2671 primary_state->src_w = vdisplay << 16; 2672 primary_state->src_h = hdisplay << 16; 2673 } else { 2674 primary_state->src_w = hdisplay << 16; 2675 primary_state->src_h = vdisplay << 16; 2676 } 2677 2678 commit: 2679 ret = update_output_state(state, set); 2680 if (ret) 2681 return ret; 2682 2683 return 0; 2684 } 2685 2686 /** 2687 * drm_atomic_helper_disable_all - disable all currently active outputs 2688 * @dev: DRM device 2689 * @ctx: lock acquisition context 2690 * 2691 * Loops through all connectors, finding those that aren't turned off and then 2692 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC 2693 * that they are connected to. 2694 * 2695 * This is used for example in suspend/resume to disable all currently active 2696 * functions when suspending. If you just want to shut down everything at e.g. 2697 * driver unload, look at drm_atomic_helper_shutdown(). 2698 * 2699 * Note that if callers haven't already acquired all modeset locks this might 2700 * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). 2701 * 2702 * Returns: 2703 * 0 on success or a negative error code on failure. 2704 * 2705 * See also: 2706 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and 2707 * drm_atomic_helper_shutdown(). 2708 */ 2709 int drm_atomic_helper_disable_all(struct drm_device *dev, 2710 struct drm_modeset_acquire_ctx *ctx) 2711 { 2712 struct drm_atomic_state *state; 2713 struct drm_connector_state *conn_state; 2714 struct drm_connector *conn; 2715 struct drm_plane_state *plane_state; 2716 struct drm_plane *plane; 2717 struct drm_crtc_state *crtc_state; 2718 struct drm_crtc *crtc; 2719 unsigned plane_mask = 0; 2720 int ret, i; 2721 2722 state = drm_atomic_state_alloc(dev); 2723 if (!state) 2724 return -ENOMEM; 2725 2726 state->acquire_ctx = ctx; 2727 2728 drm_for_each_crtc(crtc, dev) { 2729 crtc_state = drm_atomic_get_crtc_state(state, crtc); 2730 if (IS_ERR(crtc_state)) { 2731 ret = PTR_ERR(crtc_state); 2732 goto free; 2733 } 2734 2735 crtc_state->active = false; 2736 2737 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL); 2738 if (ret < 0) 2739 goto free; 2740 2741 ret = drm_atomic_add_affected_planes(state, crtc); 2742 if (ret < 0) 2743 goto free; 2744 2745 ret = drm_atomic_add_affected_connectors(state, crtc); 2746 if (ret < 0) 2747 goto free; 2748 } 2749 2750 for_each_new_connector_in_state(state, conn, conn_state, i) { 2751 ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); 2752 if (ret < 0) 2753 goto free; 2754 } 2755 2756 for_each_new_plane_in_state(state, plane, plane_state, i) { 2757 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 2758 if (ret < 0) 2759 goto free; 2760 2761 drm_atomic_set_fb_for_plane(plane_state, NULL); 2762 plane_mask |= BIT(drm_plane_index(plane)); 2763 plane->old_fb = plane->fb; 2764 } 2765 2766 ret = drm_atomic_commit(state); 2767 free: 2768 if (plane_mask) 2769 drm_atomic_clean_old_fb(dev, plane_mask, ret); 2770 drm_atomic_state_put(state); 2771 return ret; 2772 } 2773 2774 EXPORT_SYMBOL(drm_atomic_helper_disable_all); 2775 2776 /** 2777 * drm_atomic_helper_shutdown - shutdown all CRTC 2778 * @dev: DRM device 2779 * 2780 * This shuts down all CRTC, which is useful for driver unloading. Shutdown on 2781 * suspend should instead be handled with drm_atomic_helper_suspend(), since 2782 * that also takes a snapshot of the modeset state to be restored on resume. 2783 * 2784 * This is just a convenience wrapper around drm_atomic_helper_disable_all(), 2785 * and it is the atomic version of drm_crtc_force_disable_all(). 2786 */ 2787 void drm_atomic_helper_shutdown(struct drm_device *dev) 2788 { 2789 struct drm_modeset_acquire_ctx ctx; 2790 int ret; 2791 2792 drm_modeset_acquire_init(&ctx, 0); 2793 while (1) { 2794 ret = drm_modeset_lock_all_ctx(dev, &ctx); 2795 if (!ret) 2796 ret = drm_atomic_helper_disable_all(dev, &ctx); 2797 2798 if (ret != -EDEADLK) 2799 break; 2800 2801 drm_modeset_backoff(&ctx); 2802 } 2803 2804 if (ret) 2805 DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); 2806 2807 drm_modeset_drop_locks(&ctx); 2808 drm_modeset_acquire_fini(&ctx); 2809 } 2810 EXPORT_SYMBOL(drm_atomic_helper_shutdown); 2811 2812 /** 2813 * drm_atomic_helper_suspend - subsystem-level suspend helper 2814 * @dev: DRM device 2815 * 2816 * Duplicates the current atomic state, disables all active outputs and then 2817 * returns a pointer to the original atomic state to the caller. Drivers can 2818 * pass this pointer to the drm_atomic_helper_resume() helper upon resume to 2819 * restore the output configuration that was active at the time the system 2820 * entered suspend. 2821 * 2822 * Note that it is potentially unsafe to use this. The atomic state object 2823 * returned by this function is assumed to be persistent. Drivers must ensure 2824 * that this holds true. Before calling this function, drivers must make sure 2825 * to suspend fbdev emulation so that nothing can be using the device. 2826 * 2827 * Returns: 2828 * A pointer to a copy of the state before suspend on success or an ERR_PTR()- 2829 * encoded error code on failure. Drivers should store the returned atomic 2830 * state object and pass it to the drm_atomic_helper_resume() helper upon 2831 * resume. 2832 * 2833 * See also: 2834 * drm_atomic_helper_duplicate_state(), drm_atomic_helper_disable_all(), 2835 * drm_atomic_helper_resume(), drm_atomic_helper_commit_duplicated_state() 2836 */ 2837 struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev) 2838 { 2839 struct drm_modeset_acquire_ctx ctx; 2840 struct drm_atomic_state *state; 2841 int err; 2842 2843 drm_modeset_acquire_init(&ctx, 0); 2844 2845 retry: 2846 err = drm_modeset_lock_all_ctx(dev, &ctx); 2847 if (err < 0) { 2848 state = ERR_PTR(err); 2849 goto unlock; 2850 } 2851 2852 state = drm_atomic_helper_duplicate_state(dev, &ctx); 2853 if (IS_ERR(state)) 2854 goto unlock; 2855 2856 err = drm_atomic_helper_disable_all(dev, &ctx); 2857 if (err < 0) { 2858 drm_atomic_state_put(state); 2859 state = ERR_PTR(err); 2860 goto unlock; 2861 } 2862 2863 unlock: 2864 if (PTR_ERR(state) == -EDEADLK) { 2865 drm_modeset_backoff(&ctx); 2866 goto retry; 2867 } 2868 2869 drm_modeset_drop_locks(&ctx); 2870 drm_modeset_acquire_fini(&ctx); 2871 return state; 2872 } 2873 EXPORT_SYMBOL(drm_atomic_helper_suspend); 2874 2875 /** 2876 * drm_atomic_helper_commit_duplicated_state - commit duplicated state 2877 * @state: duplicated atomic state to commit 2878 * @ctx: pointer to acquire_ctx to use for commit. 2879 * 2880 * The state returned by drm_atomic_helper_duplicate_state() and 2881 * drm_atomic_helper_suspend() is partially invalid, and needs to 2882 * be fixed up before commit. 2883 * 2884 * Returns: 2885 * 0 on success or a negative error code on failure. 2886 * 2887 * See also: 2888 * drm_atomic_helper_suspend() 2889 */ 2890 int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, 2891 struct drm_modeset_acquire_ctx *ctx) 2892 { 2893 int i; 2894 struct drm_plane *plane; 2895 struct drm_plane_state *new_plane_state; 2896 struct drm_connector *connector; 2897 struct drm_connector_state *new_conn_state; 2898 struct drm_crtc *crtc; 2899 struct drm_crtc_state *new_crtc_state; 2900 unsigned plane_mask = 0; 2901 struct drm_device *dev = state->dev; 2902 int ret; 2903 2904 state->acquire_ctx = ctx; 2905 2906 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 2907 plane_mask |= BIT(drm_plane_index(plane)); 2908 state->planes[i].old_state = plane->state; 2909 } 2910 2911 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) 2912 state->crtcs[i].old_state = crtc->state; 2913 2914 for_each_new_connector_in_state(state, connector, new_conn_state, i) 2915 state->connectors[i].old_state = connector->state; 2916 2917 ret = drm_atomic_commit(state); 2918 if (plane_mask) 2919 drm_atomic_clean_old_fb(dev, plane_mask, ret); 2920 2921 return ret; 2922 } 2923 EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state); 2924 2925 /** 2926 * drm_atomic_helper_resume - subsystem-level resume helper 2927 * @dev: DRM device 2928 * @state: atomic state to resume to 2929 * 2930 * Calls drm_mode_config_reset() to synchronize hardware and software states, 2931 * grabs all modeset locks and commits the atomic state object. This can be 2932 * used in conjunction with the drm_atomic_helper_suspend() helper to 2933 * implement suspend/resume for drivers that support atomic mode-setting. 2934 * 2935 * Returns: 2936 * 0 on success or a negative error code on failure. 2937 * 2938 * See also: 2939 * drm_atomic_helper_suspend() 2940 */ 2941 int drm_atomic_helper_resume(struct drm_device *dev, 2942 struct drm_atomic_state *state) 2943 { 2944 struct drm_modeset_acquire_ctx ctx; 2945 int err; 2946 2947 drm_mode_config_reset(dev); 2948 2949 drm_modeset_acquire_init(&ctx, 0); 2950 while (1) { 2951 err = drm_modeset_lock_all_ctx(dev, &ctx); 2952 if (err) 2953 goto out; 2954 2955 err = drm_atomic_helper_commit_duplicated_state(state, &ctx); 2956 out: 2957 if (err != -EDEADLK) 2958 break; 2959 2960 drm_modeset_backoff(&ctx); 2961 } 2962 2963 drm_atomic_state_put(state); 2964 drm_modeset_drop_locks(&ctx); 2965 drm_modeset_acquire_fini(&ctx); 2966 2967 return err; 2968 } 2969 EXPORT_SYMBOL(drm_atomic_helper_resume); 2970 2971 static int page_flip_common(struct drm_atomic_state *state, 2972 struct drm_crtc *crtc, 2973 struct drm_framebuffer *fb, 2974 struct drm_pending_vblank_event *event, 2975 uint32_t flags) 2976 { 2977 struct drm_plane *plane = crtc->primary; 2978 struct drm_plane_state *plane_state; 2979 struct drm_crtc_state *crtc_state; 2980 int ret = 0; 2981 2982 crtc_state = drm_atomic_get_crtc_state(state, crtc); 2983 if (IS_ERR(crtc_state)) 2984 return PTR_ERR(crtc_state); 2985 2986 crtc_state->event = event; 2987 crtc_state->pageflip_flags = flags; 2988 2989 plane_state = drm_atomic_get_plane_state(state, plane); 2990 if (IS_ERR(plane_state)) 2991 return PTR_ERR(plane_state); 2992 2993 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 2994 if (ret != 0) 2995 return ret; 2996 drm_atomic_set_fb_for_plane(plane_state, fb); 2997 2998 /* Make sure we don't accidentally do a full modeset. */ 2999 state->allow_modeset = false; 3000 if (!crtc_state->active) { 3001 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled, rejecting legacy flip\n", 3002 crtc->base.id, crtc->name); 3003 return -EINVAL; 3004 } 3005 3006 return ret; 3007 } 3008 3009 /** 3010 * drm_atomic_helper_page_flip - execute a legacy page flip 3011 * @crtc: DRM crtc 3012 * @fb: DRM framebuffer 3013 * @event: optional DRM event to signal upon completion 3014 * @flags: flip flags for non-vblank sync'ed updates 3015 * @ctx: lock acquisition context 3016 * 3017 * Provides a default &drm_crtc_funcs.page_flip implementation 3018 * using the atomic driver interface. 3019 * 3020 * Returns: 3021 * Returns 0 on success, negative errno numbers on failure. 3022 * 3023 * See also: 3024 * drm_atomic_helper_page_flip_target() 3025 */ 3026 int drm_atomic_helper_page_flip(struct drm_crtc *crtc, 3027 struct drm_framebuffer *fb, 3028 struct drm_pending_vblank_event *event, 3029 uint32_t flags, 3030 struct drm_modeset_acquire_ctx *ctx) 3031 { 3032 struct drm_plane *plane = crtc->primary; 3033 struct drm_atomic_state *state; 3034 int ret = 0; 3035 3036 state = drm_atomic_state_alloc(plane->dev); 3037 if (!state) 3038 return -ENOMEM; 3039 3040 state->acquire_ctx = ctx; 3041 3042 ret = page_flip_common(state, crtc, fb, event, flags); 3043 if (ret != 0) 3044 goto fail; 3045 3046 ret = drm_atomic_nonblocking_commit(state); 3047 fail: 3048 drm_atomic_state_put(state); 3049 return ret; 3050 } 3051 EXPORT_SYMBOL(drm_atomic_helper_page_flip); 3052 3053 /** 3054 * drm_atomic_helper_page_flip_target - do page flip on target vblank period. 3055 * @crtc: DRM crtc 3056 * @fb: DRM framebuffer 3057 * @event: optional DRM event to signal upon completion 3058 * @flags: flip flags for non-vblank sync'ed updates 3059 * @target: specifying the target vblank period when the flip to take effect 3060 * @ctx: lock acquisition context 3061 * 3062 * Provides a default &drm_crtc_funcs.page_flip_target implementation. 3063 * Similar to drm_atomic_helper_page_flip() with extra parameter to specify 3064 * target vblank period to flip. 3065 * 3066 * Returns: 3067 * Returns 0 on success, negative errno numbers on failure. 3068 */ 3069 int drm_atomic_helper_page_flip_target(struct drm_crtc *crtc, 3070 struct drm_framebuffer *fb, 3071 struct drm_pending_vblank_event *event, 3072 uint32_t flags, 3073 uint32_t target, 3074 struct drm_modeset_acquire_ctx *ctx) 3075 { 3076 struct drm_plane *plane = crtc->primary; 3077 struct drm_atomic_state *state; 3078 struct drm_crtc_state *crtc_state; 3079 int ret = 0; 3080 3081 state = drm_atomic_state_alloc(plane->dev); 3082 if (!state) 3083 return -ENOMEM; 3084 3085 state->acquire_ctx = ctx; 3086 3087 ret = page_flip_common(state, crtc, fb, event, flags); 3088 if (ret != 0) 3089 goto fail; 3090 3091 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 3092 if (WARN_ON(!crtc_state)) { 3093 ret = -EINVAL; 3094 goto fail; 3095 } 3096 crtc_state->target_vblank = target; 3097 3098 ret = drm_atomic_nonblocking_commit(state); 3099 fail: 3100 drm_atomic_state_put(state); 3101 return ret; 3102 } 3103 EXPORT_SYMBOL(drm_atomic_helper_page_flip_target); 3104 3105 /** 3106 * drm_atomic_helper_best_encoder - Helper for 3107 * &drm_connector_helper_funcs.best_encoder callback 3108 * @connector: Connector control structure 3109 * 3110 * This is a &drm_connector_helper_funcs.best_encoder callback helper for 3111 * connectors that support exactly 1 encoder, statically determined at driver 3112 * init time. 3113 */ 3114 struct drm_encoder * 3115 drm_atomic_helper_best_encoder(struct drm_connector *connector) 3116 { 3117 WARN_ON(connector->encoder_ids[1]); 3118 return drm_encoder_find(connector->dev, connector->encoder_ids[0]); 3119 } 3120 EXPORT_SYMBOL(drm_atomic_helper_best_encoder); 3121 3122 /** 3123 * DOC: atomic state reset and initialization 3124 * 3125 * Both the drm core and the atomic helpers assume that there is always the full 3126 * and correct atomic software state for all connectors, CRTCs and planes 3127 * available. Which is a bit a problem on driver load and also after system 3128 * suspend. One way to solve this is to have a hardware state read-out 3129 * infrastructure which reconstructs the full software state (e.g. the i915 3130 * driver). 3131 * 3132 * The simpler solution is to just reset the software state to everything off, 3133 * which is easiest to do by calling drm_mode_config_reset(). To facilitate this 3134 * the atomic helpers provide default reset implementations for all hooks. 3135 * 3136 * On the upside the precise state tracking of atomic simplifies system suspend 3137 * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe 3138 * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume(). 3139 * For other drivers the building blocks are split out, see the documentation 3140 * for these functions. 3141 */ 3142 3143 /** 3144 * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs 3145 * @crtc: drm CRTC 3146 * 3147 * Resets the atomic state for @crtc by freeing the state pointer (which might 3148 * be NULL, e.g. at driver load time) and allocating a new empty state object. 3149 */ 3150 void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc) 3151 { 3152 if (crtc->state) 3153 __drm_atomic_helper_crtc_destroy_state(crtc->state); 3154 3155 kfree(crtc->state); 3156 crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL); 3157 3158 if (crtc->state) 3159 crtc->state->crtc = crtc; 3160 } 3161 EXPORT_SYMBOL(drm_atomic_helper_crtc_reset); 3162 3163 /** 3164 * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state 3165 * @crtc: CRTC object 3166 * @state: atomic CRTC state 3167 * 3168 * Copies atomic state from a CRTC's current state and resets inferred values. 3169 * This is useful for drivers that subclass the CRTC state. 3170 */ 3171 void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, 3172 struct drm_crtc_state *state) 3173 { 3174 memcpy(state, crtc->state, sizeof(*state)); 3175 3176 if (state->mode_blob) 3177 drm_property_blob_get(state->mode_blob); 3178 if (state->degamma_lut) 3179 drm_property_blob_get(state->degamma_lut); 3180 if (state->ctm) 3181 drm_property_blob_get(state->ctm); 3182 if (state->gamma_lut) 3183 drm_property_blob_get(state->gamma_lut); 3184 state->mode_changed = false; 3185 state->active_changed = false; 3186 state->planes_changed = false; 3187 state->connectors_changed = false; 3188 state->color_mgmt_changed = false; 3189 state->zpos_changed = false; 3190 state->event = NULL; 3191 state->pageflip_flags = 0; 3192 } 3193 EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state); 3194 3195 /** 3196 * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook 3197 * @crtc: drm CRTC 3198 * 3199 * Default CRTC state duplicate hook for drivers which don't have their own 3200 * subclassed CRTC state structure. 3201 */ 3202 struct drm_crtc_state * 3203 drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc) 3204 { 3205 struct drm_crtc_state *state; 3206 3207 if (WARN_ON(!crtc->state)) 3208 return NULL; 3209 3210 state = kmalloc(sizeof(*state), GFP_KERNEL); 3211 if (state) 3212 __drm_atomic_helper_crtc_duplicate_state(crtc, state); 3213 3214 return state; 3215 } 3216 EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state); 3217 3218 /** 3219 * __drm_atomic_helper_crtc_destroy_state - release CRTC state 3220 * @state: CRTC state object to release 3221 * 3222 * Releases all resources stored in the CRTC state without actually freeing 3223 * the memory of the CRTC state. This is useful for drivers that subclass the 3224 * CRTC state. 3225 */ 3226 void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state) 3227 { 3228 drm_property_blob_put(state->mode_blob); 3229 drm_property_blob_put(state->degamma_lut); 3230 drm_property_blob_put(state->ctm); 3231 drm_property_blob_put(state->gamma_lut); 3232 } 3233 EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state); 3234 3235 /** 3236 * drm_atomic_helper_crtc_destroy_state - default state destroy hook 3237 * @crtc: drm CRTC 3238 * @state: CRTC state object to release 3239 * 3240 * Default CRTC state destroy hook for drivers which don't have their own 3241 * subclassed CRTC state structure. 3242 */ 3243 void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, 3244 struct drm_crtc_state *state) 3245 { 3246 __drm_atomic_helper_crtc_destroy_state(state); 3247 kfree(state); 3248 } 3249 EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state); 3250 3251 /** 3252 * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes 3253 * @plane: drm plane 3254 * 3255 * Resets the atomic state for @plane by freeing the state pointer (which might 3256 * be NULL, e.g. at driver load time) and allocating a new empty state object. 3257 */ 3258 void drm_atomic_helper_plane_reset(struct drm_plane *plane) 3259 { 3260 if (plane->state) 3261 __drm_atomic_helper_plane_destroy_state(plane->state); 3262 3263 kfree(plane->state); 3264 plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL); 3265 3266 if (plane->state) { 3267 plane->state->plane = plane; 3268 plane->state->rotation = DRM_MODE_ROTATE_0; 3269 } 3270 } 3271 EXPORT_SYMBOL(drm_atomic_helper_plane_reset); 3272 3273 /** 3274 * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state 3275 * @plane: plane object 3276 * @state: atomic plane state 3277 * 3278 * Copies atomic state from a plane's current state. This is useful for 3279 * drivers that subclass the plane state. 3280 */ 3281 void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, 3282 struct drm_plane_state *state) 3283 { 3284 memcpy(state, plane->state, sizeof(*state)); 3285 3286 if (state->fb) 3287 drm_framebuffer_get(state->fb); 3288 3289 state->fence = NULL; 3290 } 3291 EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state); 3292 3293 /** 3294 * drm_atomic_helper_plane_duplicate_state - default state duplicate hook 3295 * @plane: drm plane 3296 * 3297 * Default plane state duplicate hook for drivers which don't have their own 3298 * subclassed plane state structure. 3299 */ 3300 struct drm_plane_state * 3301 drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane) 3302 { 3303 struct drm_plane_state *state; 3304 3305 if (WARN_ON(!plane->state)) 3306 return NULL; 3307 3308 state = kmalloc(sizeof(*state), GFP_KERNEL); 3309 if (state) 3310 __drm_atomic_helper_plane_duplicate_state(plane, state); 3311 3312 return state; 3313 } 3314 EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state); 3315 3316 /** 3317 * __drm_atomic_helper_plane_destroy_state - release plane state 3318 * @state: plane state object to release 3319 * 3320 * Releases all resources stored in the plane state without actually freeing 3321 * the memory of the plane state. This is useful for drivers that subclass the 3322 * plane state. 3323 */ 3324 void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state) 3325 { 3326 if (state->fb) 3327 drm_framebuffer_put(state->fb); 3328 3329 if (state->fence) 3330 dma_fence_put(state->fence); 3331 } 3332 EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state); 3333 3334 /** 3335 * drm_atomic_helper_plane_destroy_state - default state destroy hook 3336 * @plane: drm plane 3337 * @state: plane state object to release 3338 * 3339 * Default plane state destroy hook for drivers which don't have their own 3340 * subclassed plane state structure. 3341 */ 3342 void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, 3343 struct drm_plane_state *state) 3344 { 3345 __drm_atomic_helper_plane_destroy_state(state); 3346 kfree(state); 3347 } 3348 EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); 3349 3350 /** 3351 * __drm_atomic_helper_connector_reset - reset state on connector 3352 * @connector: drm connector 3353 * @conn_state: connector state to assign 3354 * 3355 * Initializes the newly allocated @conn_state and assigns it to 3356 * the &drm_conector->state pointer of @connector, usually required when 3357 * initializing the drivers or when called from the &drm_connector_funcs.reset 3358 * hook. 3359 * 3360 * This is useful for drivers that subclass the connector state. 3361 */ 3362 void 3363 __drm_atomic_helper_connector_reset(struct drm_connector *connector, 3364 struct drm_connector_state *conn_state) 3365 { 3366 if (conn_state) 3367 conn_state->connector = connector; 3368 3369 connector->state = conn_state; 3370 } 3371 EXPORT_SYMBOL(__drm_atomic_helper_connector_reset); 3372 3373 /** 3374 * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors 3375 * @connector: drm connector 3376 * 3377 * Resets the atomic state for @connector by freeing the state pointer (which 3378 * might be NULL, e.g. at driver load time) and allocating a new empty state 3379 * object. 3380 */ 3381 void drm_atomic_helper_connector_reset(struct drm_connector *connector) 3382 { 3383 struct drm_connector_state *conn_state = 3384 kzalloc(sizeof(*conn_state), GFP_KERNEL); 3385 3386 if (connector->state) 3387 __drm_atomic_helper_connector_destroy_state(connector->state); 3388 3389 kfree(connector->state); 3390 __drm_atomic_helper_connector_reset(connector, conn_state); 3391 } 3392 EXPORT_SYMBOL(drm_atomic_helper_connector_reset); 3393 3394 /** 3395 * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state 3396 * @connector: connector object 3397 * @state: atomic connector state 3398 * 3399 * Copies atomic state from a connector's current state. This is useful for 3400 * drivers that subclass the connector state. 3401 */ 3402 void 3403 __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, 3404 struct drm_connector_state *state) 3405 { 3406 memcpy(state, connector->state, sizeof(*state)); 3407 if (state->crtc) 3408 drm_connector_get(connector); 3409 } 3410 EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state); 3411 3412 /** 3413 * drm_atomic_helper_connector_duplicate_state - default state duplicate hook 3414 * @connector: drm connector 3415 * 3416 * Default connector state duplicate hook for drivers which don't have their own 3417 * subclassed connector state structure. 3418 */ 3419 struct drm_connector_state * 3420 drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector) 3421 { 3422 struct drm_connector_state *state; 3423 3424 if (WARN_ON(!connector->state)) 3425 return NULL; 3426 3427 state = kmalloc(sizeof(*state), GFP_KERNEL); 3428 if (state) 3429 __drm_atomic_helper_connector_duplicate_state(connector, state); 3430 3431 return state; 3432 } 3433 EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); 3434 3435 /** 3436 * drm_atomic_helper_duplicate_state - duplicate an atomic state object 3437 * @dev: DRM device 3438 * @ctx: lock acquisition context 3439 * 3440 * Makes a copy of the current atomic state by looping over all objects and 3441 * duplicating their respective states. This is used for example by suspend/ 3442 * resume support code to save the state prior to suspend such that it can 3443 * be restored upon resume. 3444 * 3445 * Note that this treats atomic state as persistent between save and restore. 3446 * Drivers must make sure that this is possible and won't result in confusion 3447 * or erroneous behaviour. 3448 * 3449 * Note that if callers haven't already acquired all modeset locks this might 3450 * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). 3451 * 3452 * Returns: 3453 * A pointer to the copy of the atomic state object on success or an 3454 * ERR_PTR()-encoded error code on failure. 3455 * 3456 * See also: 3457 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() 3458 */ 3459 struct drm_atomic_state * 3460 drm_atomic_helper_duplicate_state(struct drm_device *dev, 3461 struct drm_modeset_acquire_ctx *ctx) 3462 { 3463 struct drm_atomic_state *state; 3464 struct drm_connector *conn; 3465 struct drm_connector_list_iter conn_iter; 3466 struct drm_plane *plane; 3467 struct drm_crtc *crtc; 3468 int err = 0; 3469 3470 state = drm_atomic_state_alloc(dev); 3471 if (!state) 3472 return ERR_PTR(-ENOMEM); 3473 3474 state->acquire_ctx = ctx; 3475 3476 drm_for_each_crtc(crtc, dev) { 3477 struct drm_crtc_state *crtc_state; 3478 3479 crtc_state = drm_atomic_get_crtc_state(state, crtc); 3480 if (IS_ERR(crtc_state)) { 3481 err = PTR_ERR(crtc_state); 3482 goto free; 3483 } 3484 } 3485 3486 drm_for_each_plane(plane, dev) { 3487 struct drm_plane_state *plane_state; 3488 3489 plane_state = drm_atomic_get_plane_state(state, plane); 3490 if (IS_ERR(plane_state)) { 3491 err = PTR_ERR(plane_state); 3492 goto free; 3493 } 3494 } 3495 3496 drm_connector_list_iter_begin(dev, &conn_iter); 3497 drm_for_each_connector_iter(conn, &conn_iter) { 3498 struct drm_connector_state *conn_state; 3499 3500 conn_state = drm_atomic_get_connector_state(state, conn); 3501 if (IS_ERR(conn_state)) { 3502 err = PTR_ERR(conn_state); 3503 drm_connector_list_iter_end(&conn_iter); 3504 goto free; 3505 } 3506 } 3507 drm_connector_list_iter_end(&conn_iter); 3508 3509 /* clear the acquire context so that it isn't accidentally reused */ 3510 state->acquire_ctx = NULL; 3511 3512 free: 3513 if (err < 0) { 3514 drm_atomic_state_put(state); 3515 state = ERR_PTR(err); 3516 } 3517 3518 return state; 3519 } 3520 EXPORT_SYMBOL(drm_atomic_helper_duplicate_state); 3521 3522 /** 3523 * __drm_atomic_helper_connector_destroy_state - release connector state 3524 * @state: connector state object to release 3525 * 3526 * Releases all resources stored in the connector state without actually 3527 * freeing the memory of the connector state. This is useful for drivers that 3528 * subclass the connector state. 3529 */ 3530 void 3531 __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state) 3532 { 3533 if (state->crtc) 3534 drm_connector_put(state->connector); 3535 } 3536 EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state); 3537 3538 /** 3539 * drm_atomic_helper_connector_destroy_state - default state destroy hook 3540 * @connector: drm connector 3541 * @state: connector state object to release 3542 * 3543 * Default connector state destroy hook for drivers which don't have their own 3544 * subclassed connector state structure. 3545 */ 3546 void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, 3547 struct drm_connector_state *state) 3548 { 3549 __drm_atomic_helper_connector_destroy_state(state); 3550 kfree(state); 3551 } 3552 EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state); 3553 3554 /** 3555 * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table 3556 * @crtc: CRTC object 3557 * @red: red correction table 3558 * @green: green correction table 3559 * @blue: green correction table 3560 * @size: size of the tables 3561 * @ctx: lock acquire context 3562 * 3563 * Implements support for legacy gamma correction table for drivers 3564 * that support color management through the DEGAMMA_LUT/GAMMA_LUT 3565 * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for 3566 * how the atomic color management and gamma tables work. 3567 */ 3568 int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, 3569 u16 *red, u16 *green, u16 *blue, 3570 uint32_t size, 3571 struct drm_modeset_acquire_ctx *ctx) 3572 { 3573 struct drm_device *dev = crtc->dev; 3574 struct drm_atomic_state *state; 3575 struct drm_crtc_state *crtc_state; 3576 struct drm_property_blob *blob = NULL; 3577 struct drm_color_lut *blob_data; 3578 int i, ret = 0; 3579 bool replaced; 3580 3581 state = drm_atomic_state_alloc(crtc->dev); 3582 if (!state) 3583 return -ENOMEM; 3584 3585 blob = drm_property_create_blob(dev, 3586 sizeof(struct drm_color_lut) * size, 3587 NULL); 3588 if (IS_ERR(blob)) { 3589 ret = PTR_ERR(blob); 3590 blob = NULL; 3591 goto fail; 3592 } 3593 3594 /* Prepare GAMMA_LUT with the legacy values. */ 3595 blob_data = (struct drm_color_lut *) blob->data; 3596 for (i = 0; i < size; i++) { 3597 blob_data[i].red = red[i]; 3598 blob_data[i].green = green[i]; 3599 blob_data[i].blue = blue[i]; 3600 } 3601 3602 state->acquire_ctx = ctx; 3603 crtc_state = drm_atomic_get_crtc_state(state, crtc); 3604 if (IS_ERR(crtc_state)) { 3605 ret = PTR_ERR(crtc_state); 3606 goto fail; 3607 } 3608 3609 /* Reset DEGAMMA_LUT and CTM properties. */ 3610 replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); 3611 replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); 3612 replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob); 3613 crtc_state->color_mgmt_changed |= replaced; 3614 3615 ret = drm_atomic_commit(state); 3616 3617 fail: 3618 drm_atomic_state_put(state); 3619 drm_property_blob_put(blob); 3620 return ret; 3621 } 3622 EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set); 3623 3624 /** 3625 * __drm_atomic_helper_private_duplicate_state - copy atomic private state 3626 * @obj: CRTC object 3627 * @state: new private object state 3628 * 3629 * Copies atomic state from a private objects's current state and resets inferred values. 3630 * This is useful for drivers that subclass the private state. 3631 */ 3632 void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj, 3633 struct drm_private_state *state) 3634 { 3635 memcpy(state, obj->state, sizeof(*state)); 3636 } 3637 EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state); 3638