1 /* 2 * Copyright (C) 2014 Red Hat 3 * Copyright (C) 2014 Intel Corp. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robdclark@gmail.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 */ 27 28 29 #include <drm/drmP.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_mode.h> 32 #include <drm/drm_print.h> 33 #include <linux/sync_file.h> 34 35 #include "drm_crtc_internal.h" 36 37 void __drm_crtc_commit_free(struct kref *kref) 38 { 39 struct drm_crtc_commit *commit = 40 container_of(kref, struct drm_crtc_commit, ref); 41 42 kfree(commit); 43 } 44 EXPORT_SYMBOL(__drm_crtc_commit_free); 45 46 /** 47 * drm_atomic_state_default_release - 48 * release memory initialized by drm_atomic_state_init 49 * @state: atomic state 50 * 51 * Free all the memory allocated by drm_atomic_state_init. 52 * This is useful for drivers that subclass the atomic state. 53 */ 54 void drm_atomic_state_default_release(struct drm_atomic_state *state) 55 { 56 kfree(state->connectors); 57 kfree(state->crtcs); 58 kfree(state->planes); 59 kfree(state->private_objs); 60 } 61 EXPORT_SYMBOL(drm_atomic_state_default_release); 62 63 /** 64 * drm_atomic_state_init - init new atomic state 65 * @dev: DRM device 66 * @state: atomic state 67 * 68 * Default implementation for filling in a new atomic state. 69 * This is useful for drivers that subclass the atomic state. 70 */ 71 int 72 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) 73 { 74 kref_init(&state->ref); 75 76 /* TODO legacy paths should maybe do a better job about 77 * setting this appropriately? 78 */ 79 state->allow_modeset = true; 80 81 state->crtcs = kcalloc(dev->mode_config.num_crtc, 82 sizeof(*state->crtcs), GFP_KERNEL); 83 if (!state->crtcs) 84 goto fail; 85 state->planes = kcalloc(dev->mode_config.num_total_plane, 86 sizeof(*state->planes), GFP_KERNEL); 87 if (!state->planes) 88 goto fail; 89 90 state->dev = dev; 91 92 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state); 93 94 return 0; 95 fail: 96 drm_atomic_state_default_release(state); 97 return -ENOMEM; 98 } 99 EXPORT_SYMBOL(drm_atomic_state_init); 100 101 /** 102 * drm_atomic_state_alloc - allocate atomic state 103 * @dev: DRM device 104 * 105 * This allocates an empty atomic state to track updates. 106 */ 107 struct drm_atomic_state * 108 drm_atomic_state_alloc(struct drm_device *dev) 109 { 110 struct drm_mode_config *config = &dev->mode_config; 111 112 if (!config->funcs->atomic_state_alloc) { 113 struct drm_atomic_state *state; 114 115 state = kzalloc(sizeof(*state), GFP_KERNEL); 116 if (!state) 117 return NULL; 118 if (drm_atomic_state_init(dev, state) < 0) { 119 kfree(state); 120 return NULL; 121 } 122 return state; 123 } 124 125 return config->funcs->atomic_state_alloc(dev); 126 } 127 EXPORT_SYMBOL(drm_atomic_state_alloc); 128 129 /** 130 * drm_atomic_state_default_clear - clear base atomic state 131 * @state: atomic state 132 * 133 * Default implementation for clearing atomic state. 134 * This is useful for drivers that subclass the atomic state. 135 */ 136 void drm_atomic_state_default_clear(struct drm_atomic_state *state) 137 { 138 struct drm_device *dev = state->dev; 139 struct drm_mode_config *config = &dev->mode_config; 140 int i; 141 142 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); 143 144 for (i = 0; i < state->num_connector; i++) { 145 struct drm_connector *connector = state->connectors[i].ptr; 146 147 if (!connector) 148 continue; 149 150 connector->funcs->atomic_destroy_state(connector, 151 state->connectors[i].state); 152 state->connectors[i].ptr = NULL; 153 state->connectors[i].state = NULL; 154 drm_connector_put(connector); 155 } 156 157 for (i = 0; i < config->num_crtc; i++) { 158 struct drm_crtc *crtc = state->crtcs[i].ptr; 159 160 if (!crtc) 161 continue; 162 163 crtc->funcs->atomic_destroy_state(crtc, 164 state->crtcs[i].state); 165 166 state->crtcs[i].ptr = NULL; 167 state->crtcs[i].state = NULL; 168 } 169 170 for (i = 0; i < config->num_total_plane; i++) { 171 struct drm_plane *plane = state->planes[i].ptr; 172 173 if (!plane) 174 continue; 175 176 plane->funcs->atomic_destroy_state(plane, 177 state->planes[i].state); 178 state->planes[i].ptr = NULL; 179 state->planes[i].state = NULL; 180 } 181 182 for (i = 0; i < state->num_private_objs; i++) { 183 struct drm_private_obj *obj = state->private_objs[i].ptr; 184 185 obj->funcs->atomic_destroy_state(obj, 186 state->private_objs[i].state); 187 state->private_objs[i].ptr = NULL; 188 state->private_objs[i].state = NULL; 189 } 190 state->num_private_objs = 0; 191 192 if (state->fake_commit) { 193 drm_crtc_commit_put(state->fake_commit); 194 state->fake_commit = NULL; 195 } 196 } 197 EXPORT_SYMBOL(drm_atomic_state_default_clear); 198 199 /** 200 * drm_atomic_state_clear - clear state object 201 * @state: atomic state 202 * 203 * When the w/w mutex algorithm detects a deadlock we need to back off and drop 204 * all locks. So someone else could sneak in and change the current modeset 205 * configuration. Which means that all the state assembled in @state is no 206 * longer an atomic update to the current state, but to some arbitrary earlier 207 * state. Which could break assumptions the driver's 208 * &drm_mode_config_funcs.atomic_check likely relies on. 209 * 210 * Hence we must clear all cached state and completely start over, using this 211 * function. 212 */ 213 void drm_atomic_state_clear(struct drm_atomic_state *state) 214 { 215 struct drm_device *dev = state->dev; 216 struct drm_mode_config *config = &dev->mode_config; 217 218 if (config->funcs->atomic_state_clear) 219 config->funcs->atomic_state_clear(state); 220 else 221 drm_atomic_state_default_clear(state); 222 } 223 EXPORT_SYMBOL(drm_atomic_state_clear); 224 225 /** 226 * __drm_atomic_state_free - free all memory for an atomic state 227 * @ref: This atomic state to deallocate 228 * 229 * This frees all memory associated with an atomic state, including all the 230 * per-object state for planes, crtcs and connectors. 231 */ 232 void __drm_atomic_state_free(struct kref *ref) 233 { 234 struct drm_atomic_state *state = container_of(ref, typeof(*state), ref); 235 struct drm_mode_config *config = &state->dev->mode_config; 236 237 drm_atomic_state_clear(state); 238 239 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); 240 241 if (config->funcs->atomic_state_free) { 242 config->funcs->atomic_state_free(state); 243 } else { 244 drm_atomic_state_default_release(state); 245 kfree(state); 246 } 247 } 248 EXPORT_SYMBOL(__drm_atomic_state_free); 249 250 /** 251 * drm_atomic_get_crtc_state - get crtc state 252 * @state: global atomic state object 253 * @crtc: crtc to get state object for 254 * 255 * This function returns the crtc state for the given crtc, allocating it if 256 * needed. It will also grab the relevant crtc lock to make sure that the state 257 * is consistent. 258 * 259 * Returns: 260 * 261 * Either the allocated state or the error code encoded into the pointer. When 262 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 263 * entire atomic sequence must be restarted. All other errors are fatal. 264 */ 265 struct drm_crtc_state * 266 drm_atomic_get_crtc_state(struct drm_atomic_state *state, 267 struct drm_crtc *crtc) 268 { 269 int ret, index = drm_crtc_index(crtc); 270 struct drm_crtc_state *crtc_state; 271 272 WARN_ON(!state->acquire_ctx); 273 274 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); 275 if (crtc_state) 276 return crtc_state; 277 278 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); 279 if (ret) 280 return ERR_PTR(ret); 281 282 crtc_state = crtc->funcs->atomic_duplicate_state(crtc); 283 if (!crtc_state) 284 return ERR_PTR(-ENOMEM); 285 286 state->crtcs[index].state = crtc_state; 287 state->crtcs[index].old_state = crtc->state; 288 state->crtcs[index].new_state = crtc_state; 289 state->crtcs[index].ptr = crtc; 290 crtc_state->state = state; 291 292 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", 293 crtc->base.id, crtc->name, crtc_state, state); 294 295 return crtc_state; 296 } 297 EXPORT_SYMBOL(drm_atomic_get_crtc_state); 298 299 static void set_out_fence_for_crtc(struct drm_atomic_state *state, 300 struct drm_crtc *crtc, s32 __user *fence_ptr) 301 { 302 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; 303 } 304 305 static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, 306 struct drm_crtc *crtc) 307 { 308 s32 __user *fence_ptr; 309 310 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; 311 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; 312 313 return fence_ptr; 314 } 315 316 /** 317 * drm_atomic_set_mode_for_crtc - set mode for CRTC 318 * @state: the CRTC whose incoming state to update 319 * @mode: kernel-internal mode to use for the CRTC, or NULL to disable 320 * 321 * Set a mode (originating from the kernel) on the desired CRTC state and update 322 * the enable property. 323 * 324 * RETURNS: 325 * Zero on success, error code on failure. Cannot return -EDEADLK. 326 */ 327 int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, 328 const struct drm_display_mode *mode) 329 { 330 struct drm_mode_modeinfo umode; 331 332 /* Early return for no change. */ 333 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0) 334 return 0; 335 336 drm_property_blob_put(state->mode_blob); 337 state->mode_blob = NULL; 338 339 if (mode) { 340 drm_mode_convert_to_umode(&umode, mode); 341 state->mode_blob = 342 drm_property_create_blob(state->crtc->dev, 343 sizeof(umode), 344 &umode); 345 if (IS_ERR(state->mode_blob)) 346 return PTR_ERR(state->mode_blob); 347 348 drm_mode_copy(&state->mode, mode); 349 state->enable = true; 350 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", 351 mode->name, state); 352 } else { 353 memset(&state->mode, 0, sizeof(state->mode)); 354 state->enable = false; 355 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", 356 state); 357 } 358 359 return 0; 360 } 361 EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc); 362 363 /** 364 * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC 365 * @state: the CRTC whose incoming state to update 366 * @blob: pointer to blob property to use for mode 367 * 368 * Set a mode (originating from a blob property) on the desired CRTC state. 369 * This function will take a reference on the blob property for the CRTC state, 370 * and release the reference held on the state's existing mode property, if any 371 * was set. 372 * 373 * RETURNS: 374 * Zero on success, error code on failure. Cannot return -EDEADLK. 375 */ 376 int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, 377 struct drm_property_blob *blob) 378 { 379 if (blob == state->mode_blob) 380 return 0; 381 382 drm_property_blob_put(state->mode_blob); 383 state->mode_blob = NULL; 384 385 memset(&state->mode, 0, sizeof(state->mode)); 386 387 if (blob) { 388 if (blob->length != sizeof(struct drm_mode_modeinfo) || 389 drm_mode_convert_umode(&state->mode, 390 (const struct drm_mode_modeinfo *) 391 blob->data)) 392 return -EINVAL; 393 394 state->mode_blob = drm_property_blob_get(blob); 395 state->enable = true; 396 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", 397 state->mode.name, state); 398 } else { 399 state->enable = false; 400 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", 401 state); 402 } 403 404 return 0; 405 } 406 EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); 407 408 static int 409 drm_atomic_replace_property_blob_from_id(struct drm_device *dev, 410 struct drm_property_blob **blob, 411 uint64_t blob_id, 412 ssize_t expected_size, 413 bool *replaced) 414 { 415 struct drm_property_blob *new_blob = NULL; 416 417 if (blob_id != 0) { 418 new_blob = drm_property_lookup_blob(dev, blob_id); 419 if (new_blob == NULL) 420 return -EINVAL; 421 422 if (expected_size > 0 && expected_size != new_blob->length) { 423 drm_property_blob_put(new_blob); 424 return -EINVAL; 425 } 426 } 427 428 *replaced |= drm_property_replace_blob(blob, new_blob); 429 drm_property_blob_put(new_blob); 430 431 return 0; 432 } 433 434 /** 435 * drm_atomic_crtc_set_property - set property on CRTC 436 * @crtc: the drm CRTC to set a property on 437 * @state: the state object to update with the new property value 438 * @property: the property to set 439 * @val: the new property value 440 * 441 * This function handles generic/core properties and calls out to driver's 442 * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure 443 * consistent behavior you must call this function rather than the driver hook 444 * directly. 445 * 446 * RETURNS: 447 * Zero on success, error code on failure 448 */ 449 int drm_atomic_crtc_set_property(struct drm_crtc *crtc, 450 struct drm_crtc_state *state, struct drm_property *property, 451 uint64_t val) 452 { 453 struct drm_device *dev = crtc->dev; 454 struct drm_mode_config *config = &dev->mode_config; 455 bool replaced = false; 456 int ret; 457 458 if (property == config->prop_active) 459 state->active = val; 460 else if (property == config->prop_mode_id) { 461 struct drm_property_blob *mode = 462 drm_property_lookup_blob(dev, val); 463 ret = drm_atomic_set_mode_prop_for_crtc(state, mode); 464 drm_property_blob_put(mode); 465 return ret; 466 } else if (property == config->degamma_lut_property) { 467 ret = drm_atomic_replace_property_blob_from_id(dev, 468 &state->degamma_lut, 469 val, 470 -1, 471 &replaced); 472 state->color_mgmt_changed |= replaced; 473 return ret; 474 } else if (property == config->ctm_property) { 475 ret = drm_atomic_replace_property_blob_from_id(dev, 476 &state->ctm, 477 val, 478 sizeof(struct drm_color_ctm), 479 &replaced); 480 state->color_mgmt_changed |= replaced; 481 return ret; 482 } else if (property == config->gamma_lut_property) { 483 ret = drm_atomic_replace_property_blob_from_id(dev, 484 &state->gamma_lut, 485 val, 486 -1, 487 &replaced); 488 state->color_mgmt_changed |= replaced; 489 return ret; 490 } else if (property == config->prop_out_fence_ptr) { 491 s32 __user *fence_ptr = u64_to_user_ptr(val); 492 493 if (!fence_ptr) 494 return 0; 495 496 if (put_user(-1, fence_ptr)) 497 return -EFAULT; 498 499 set_out_fence_for_crtc(state->state, crtc, fence_ptr); 500 } else if (crtc->funcs->atomic_set_property) 501 return crtc->funcs->atomic_set_property(crtc, state, property, val); 502 else 503 return -EINVAL; 504 505 return 0; 506 } 507 EXPORT_SYMBOL(drm_atomic_crtc_set_property); 508 509 /** 510 * drm_atomic_crtc_get_property - get property value from CRTC state 511 * @crtc: the drm CRTC to set a property on 512 * @state: the state object to get the property value from 513 * @property: the property to set 514 * @val: return location for the property value 515 * 516 * This function handles generic/core properties and calls out to driver's 517 * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure 518 * consistent behavior you must call this function rather than the driver hook 519 * directly. 520 * 521 * RETURNS: 522 * Zero on success, error code on failure 523 */ 524 static int 525 drm_atomic_crtc_get_property(struct drm_crtc *crtc, 526 const struct drm_crtc_state *state, 527 struct drm_property *property, uint64_t *val) 528 { 529 struct drm_device *dev = crtc->dev; 530 struct drm_mode_config *config = &dev->mode_config; 531 532 if (property == config->prop_active) 533 *val = state->active; 534 else if (property == config->prop_mode_id) 535 *val = (state->mode_blob) ? state->mode_blob->base.id : 0; 536 else if (property == config->degamma_lut_property) 537 *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; 538 else if (property == config->ctm_property) 539 *val = (state->ctm) ? state->ctm->base.id : 0; 540 else if (property == config->gamma_lut_property) 541 *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; 542 else if (property == config->prop_out_fence_ptr) 543 *val = 0; 544 else if (crtc->funcs->atomic_get_property) 545 return crtc->funcs->atomic_get_property(crtc, state, property, val); 546 else 547 return -EINVAL; 548 549 return 0; 550 } 551 552 /** 553 * drm_atomic_crtc_check - check crtc state 554 * @crtc: crtc to check 555 * @state: crtc state to check 556 * 557 * Provides core sanity checks for crtc state. 558 * 559 * RETURNS: 560 * Zero on success, error code on failure 561 */ 562 static int drm_atomic_crtc_check(struct drm_crtc *crtc, 563 struct drm_crtc_state *state) 564 { 565 /* NOTE: we explicitly don't enforce constraints such as primary 566 * layer covering entire screen, since that is something we want 567 * to allow (on hw that supports it). For hw that does not, it 568 * should be checked in driver's crtc->atomic_check() vfunc. 569 * 570 * TODO: Add generic modeset state checks once we support those. 571 */ 572 573 if (state->active && !state->enable) { 574 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", 575 crtc->base.id, crtc->name); 576 return -EINVAL; 577 } 578 579 /* The state->enable vs. state->mode_blob checks can be WARN_ON, 580 * as this is a kernel-internal detail that userspace should never 581 * be able to trigger. */ 582 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 583 WARN_ON(state->enable && !state->mode_blob)) { 584 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", 585 crtc->base.id, crtc->name); 586 return -EINVAL; 587 } 588 589 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 590 WARN_ON(!state->enable && state->mode_blob)) { 591 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", 592 crtc->base.id, crtc->name); 593 return -EINVAL; 594 } 595 596 /* 597 * Reject event generation for when a CRTC is off and stays off. 598 * It wouldn't be hard to implement this, but userspace has a track 599 * record of happily burning through 100% cpu (or worse, crash) when the 600 * display pipe is suspended. To avoid all that fun just reject updates 601 * that ask for events since likely that indicates a bug in the 602 * compositor's drawing loop. This is consistent with the vblank IOCTL 603 * and legacy page_flip IOCTL which also reject service on a disabled 604 * pipe. 605 */ 606 if (state->event && !state->active && !crtc->state->active) { 607 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n", 608 crtc->base.id, crtc->name); 609 return -EINVAL; 610 } 611 612 return 0; 613 } 614 615 static void drm_atomic_crtc_print_state(struct drm_printer *p, 616 const struct drm_crtc_state *state) 617 { 618 struct drm_crtc *crtc = state->crtc; 619 620 drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name); 621 drm_printf(p, "\tenable=%d\n", state->enable); 622 drm_printf(p, "\tactive=%d\n", state->active); 623 drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed); 624 drm_printf(p, "\tmode_changed=%d\n", state->mode_changed); 625 drm_printf(p, "\tactive_changed=%d\n", state->active_changed); 626 drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed); 627 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); 628 drm_printf(p, "\tplane_mask=%x\n", state->plane_mask); 629 drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask); 630 drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask); 631 drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode)); 632 633 if (crtc->funcs->atomic_print_state) 634 crtc->funcs->atomic_print_state(p, state); 635 } 636 637 /** 638 * drm_atomic_get_plane_state - get plane state 639 * @state: global atomic state object 640 * @plane: plane to get state object for 641 * 642 * This function returns the plane state for the given plane, allocating it if 643 * needed. It will also grab the relevant plane lock to make sure that the state 644 * is consistent. 645 * 646 * Returns: 647 * 648 * Either the allocated state or the error code encoded into the pointer. When 649 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 650 * entire atomic sequence must be restarted. All other errors are fatal. 651 */ 652 struct drm_plane_state * 653 drm_atomic_get_plane_state(struct drm_atomic_state *state, 654 struct drm_plane *plane) 655 { 656 int ret, index = drm_plane_index(plane); 657 struct drm_plane_state *plane_state; 658 659 WARN_ON(!state->acquire_ctx); 660 661 plane_state = drm_atomic_get_existing_plane_state(state, plane); 662 if (plane_state) 663 return plane_state; 664 665 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); 666 if (ret) 667 return ERR_PTR(ret); 668 669 plane_state = plane->funcs->atomic_duplicate_state(plane); 670 if (!plane_state) 671 return ERR_PTR(-ENOMEM); 672 673 state->planes[index].state = plane_state; 674 state->planes[index].ptr = plane; 675 state->planes[index].old_state = plane->state; 676 state->planes[index].new_state = plane_state; 677 plane_state->state = state; 678 679 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", 680 plane->base.id, plane->name, plane_state, state); 681 682 if (plane_state->crtc) { 683 struct drm_crtc_state *crtc_state; 684 685 crtc_state = drm_atomic_get_crtc_state(state, 686 plane_state->crtc); 687 if (IS_ERR(crtc_state)) 688 return ERR_CAST(crtc_state); 689 } 690 691 return plane_state; 692 } 693 EXPORT_SYMBOL(drm_atomic_get_plane_state); 694 695 /** 696 * drm_atomic_plane_set_property - set property on plane 697 * @plane: the drm plane to set a property on 698 * @state: the state object to update with the new property value 699 * @property: the property to set 700 * @val: the new property value 701 * 702 * This function handles generic/core properties and calls out to driver's 703 * &drm_plane_funcs.atomic_set_property for driver properties. To ensure 704 * consistent behavior you must call this function rather than the driver hook 705 * directly. 706 * 707 * RETURNS: 708 * Zero on success, error code on failure 709 */ 710 static int drm_atomic_plane_set_property(struct drm_plane *plane, 711 struct drm_plane_state *state, struct drm_property *property, 712 uint64_t val) 713 { 714 struct drm_device *dev = plane->dev; 715 struct drm_mode_config *config = &dev->mode_config; 716 717 if (property == config->prop_fb_id) { 718 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); 719 drm_atomic_set_fb_for_plane(state, fb); 720 if (fb) 721 drm_framebuffer_put(fb); 722 } else if (property == config->prop_in_fence_fd) { 723 if (state->fence) 724 return -EINVAL; 725 726 if (U642I64(val) == -1) 727 return 0; 728 729 state->fence = sync_file_get_fence(val); 730 if (!state->fence) 731 return -EINVAL; 732 733 } else if (property == config->prop_crtc_id) { 734 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); 735 return drm_atomic_set_crtc_for_plane(state, crtc); 736 } else if (property == config->prop_crtc_x) { 737 state->crtc_x = U642I64(val); 738 } else if (property == config->prop_crtc_y) { 739 state->crtc_y = U642I64(val); 740 } else if (property == config->prop_crtc_w) { 741 state->crtc_w = val; 742 } else if (property == config->prop_crtc_h) { 743 state->crtc_h = val; 744 } else if (property == config->prop_src_x) { 745 state->src_x = val; 746 } else if (property == config->prop_src_y) { 747 state->src_y = val; 748 } else if (property == config->prop_src_w) { 749 state->src_w = val; 750 } else if (property == config->prop_src_h) { 751 state->src_h = val; 752 } else if (property == plane->rotation_property) { 753 if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) 754 return -EINVAL; 755 state->rotation = val; 756 } else if (property == plane->zpos_property) { 757 state->zpos = val; 758 } else if (plane->funcs->atomic_set_property) { 759 return plane->funcs->atomic_set_property(plane, state, 760 property, val); 761 } else { 762 return -EINVAL; 763 } 764 765 return 0; 766 } 767 768 /** 769 * drm_atomic_plane_get_property - get property value from plane state 770 * @plane: the drm plane to set a property on 771 * @state: the state object to get the property value from 772 * @property: the property to set 773 * @val: return location for the property value 774 * 775 * This function handles generic/core properties and calls out to driver's 776 * &drm_plane_funcs.atomic_get_property for driver properties. To ensure 777 * consistent behavior you must call this function rather than the driver hook 778 * directly. 779 * 780 * RETURNS: 781 * Zero on success, error code on failure 782 */ 783 static int 784 drm_atomic_plane_get_property(struct drm_plane *plane, 785 const struct drm_plane_state *state, 786 struct drm_property *property, uint64_t *val) 787 { 788 struct drm_device *dev = plane->dev; 789 struct drm_mode_config *config = &dev->mode_config; 790 791 if (property == config->prop_fb_id) { 792 *val = (state->fb) ? state->fb->base.id : 0; 793 } else if (property == config->prop_in_fence_fd) { 794 *val = -1; 795 } else if (property == config->prop_crtc_id) { 796 *val = (state->crtc) ? state->crtc->base.id : 0; 797 } else if (property == config->prop_crtc_x) { 798 *val = I642U64(state->crtc_x); 799 } else if (property == config->prop_crtc_y) { 800 *val = I642U64(state->crtc_y); 801 } else if (property == config->prop_crtc_w) { 802 *val = state->crtc_w; 803 } else if (property == config->prop_crtc_h) { 804 *val = state->crtc_h; 805 } else if (property == config->prop_src_x) { 806 *val = state->src_x; 807 } else if (property == config->prop_src_y) { 808 *val = state->src_y; 809 } else if (property == config->prop_src_w) { 810 *val = state->src_w; 811 } else if (property == config->prop_src_h) { 812 *val = state->src_h; 813 } else if (property == plane->rotation_property) { 814 *val = state->rotation; 815 } else if (property == plane->zpos_property) { 816 *val = state->zpos; 817 } else if (plane->funcs->atomic_get_property) { 818 return plane->funcs->atomic_get_property(plane, state, property, val); 819 } else { 820 return -EINVAL; 821 } 822 823 return 0; 824 } 825 826 static bool 827 plane_switching_crtc(struct drm_atomic_state *state, 828 struct drm_plane *plane, 829 struct drm_plane_state *plane_state) 830 { 831 if (!plane->state->crtc || !plane_state->crtc) 832 return false; 833 834 if (plane->state->crtc == plane_state->crtc) 835 return false; 836 837 /* This could be refined, but currently there's no helper or driver code 838 * to implement direct switching of active planes nor userspace to take 839 * advantage of more direct plane switching without the intermediate 840 * full OFF state. 841 */ 842 return true; 843 } 844 845 /** 846 * drm_atomic_plane_check - check plane state 847 * @plane: plane to check 848 * @state: plane state to check 849 * 850 * Provides core sanity checks for plane state. 851 * 852 * RETURNS: 853 * Zero on success, error code on failure 854 */ 855 static int drm_atomic_plane_check(struct drm_plane *plane, 856 struct drm_plane_state *state) 857 { 858 unsigned int fb_width, fb_height; 859 int ret; 860 861 /* either *both* CRTC and FB must be set, or neither */ 862 if (WARN_ON(state->crtc && !state->fb)) { 863 DRM_DEBUG_ATOMIC("CRTC set but no FB\n"); 864 return -EINVAL; 865 } else if (WARN_ON(state->fb && !state->crtc)) { 866 DRM_DEBUG_ATOMIC("FB set but no CRTC\n"); 867 return -EINVAL; 868 } 869 870 /* if disabled, we don't care about the rest of the state: */ 871 if (!state->crtc) 872 return 0; 873 874 /* Check whether this plane is usable on this CRTC */ 875 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) { 876 DRM_DEBUG_ATOMIC("Invalid crtc for plane\n"); 877 return -EINVAL; 878 } 879 880 /* Check whether this plane supports the fb pixel format. */ 881 ret = drm_plane_check_pixel_format(plane, state->fb->format->format); 882 if (ret) { 883 struct drm_format_name_buf format_name; 884 DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", 885 drm_get_format_name(state->fb->format->format, 886 &format_name)); 887 return ret; 888 } 889 890 /* Give drivers some help against integer overflows */ 891 if (state->crtc_w > INT_MAX || 892 state->crtc_x > INT_MAX - (int32_t) state->crtc_w || 893 state->crtc_h > INT_MAX || 894 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) { 895 DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n", 896 state->crtc_w, state->crtc_h, 897 state->crtc_x, state->crtc_y); 898 return -ERANGE; 899 } 900 901 fb_width = state->fb->width << 16; 902 fb_height = state->fb->height << 16; 903 904 /* Make sure source coordinates are inside the fb. */ 905 if (state->src_w > fb_width || 906 state->src_x > fb_width - state->src_w || 907 state->src_h > fb_height || 908 state->src_y > fb_height - state->src_h) { 909 DRM_DEBUG_ATOMIC("Invalid source coordinates " 910 "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", 911 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10, 912 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10, 913 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10, 914 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10); 915 return -ENOSPC; 916 } 917 918 if (plane_switching_crtc(state->state, plane, state)) { 919 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", 920 plane->base.id, plane->name); 921 return -EINVAL; 922 } 923 924 return 0; 925 } 926 927 static void drm_atomic_plane_print_state(struct drm_printer *p, 928 const struct drm_plane_state *state) 929 { 930 struct drm_plane *plane = state->plane; 931 struct drm_rect src = drm_plane_state_src(state); 932 struct drm_rect dest = drm_plane_state_dest(state); 933 934 drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); 935 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 936 drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); 937 if (state->fb) { 938 struct drm_framebuffer *fb = state->fb; 939 int i, n = fb->format->num_planes; 940 struct drm_format_name_buf format_name; 941 942 drm_printf(p, "\t\tformat=%s\n", 943 drm_get_format_name(fb->format->format, &format_name)); 944 drm_printf(p, "\t\t\tmodifier=0x%llx\n", fb->modifier); 945 drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height); 946 drm_printf(p, "\t\tlayers:\n"); 947 for (i = 0; i < n; i++) { 948 drm_printf(p, "\t\t\tpitch[%d]=%u\n", i, fb->pitches[i]); 949 drm_printf(p, "\t\t\toffset[%d]=%u\n", i, fb->offsets[i]); 950 } 951 } 952 drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); 953 drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); 954 drm_printf(p, "\trotation=%x\n", state->rotation); 955 956 if (plane->funcs->atomic_print_state) 957 plane->funcs->atomic_print_state(p, state); 958 } 959 960 /** 961 * drm_atomic_private_obj_init - initialize private object 962 * @obj: private object 963 * @state: initial private object state 964 * @funcs: pointer to the struct of function pointers that identify the object 965 * type 966 * 967 * Initialize the private object, which can be embedded into any 968 * driver private object that needs its own atomic state. 969 */ 970 void 971 drm_atomic_private_obj_init(struct drm_private_obj *obj, 972 struct drm_private_state *state, 973 const struct drm_private_state_funcs *funcs) 974 { 975 memset(obj, 0, sizeof(*obj)); 976 977 obj->state = state; 978 obj->funcs = funcs; 979 } 980 EXPORT_SYMBOL(drm_atomic_private_obj_init); 981 982 /** 983 * drm_atomic_private_obj_fini - finalize private object 984 * @obj: private object 985 * 986 * Finalize the private object. 987 */ 988 void 989 drm_atomic_private_obj_fini(struct drm_private_obj *obj) 990 { 991 obj->funcs->atomic_destroy_state(obj, obj->state); 992 } 993 EXPORT_SYMBOL(drm_atomic_private_obj_fini); 994 995 /** 996 * drm_atomic_get_private_obj_state - get private object state 997 * @state: global atomic state 998 * @obj: private object to get the state for 999 * 1000 * This function returns the private object state for the given private object, 1001 * allocating the state if needed. It does not grab any locks as the caller is 1002 * expected to care of any required locking. 1003 * 1004 * RETURNS: 1005 * 1006 * Either the allocated state or the error code encoded into a pointer. 1007 */ 1008 struct drm_private_state * 1009 drm_atomic_get_private_obj_state(struct drm_atomic_state *state, 1010 struct drm_private_obj *obj) 1011 { 1012 int index, num_objs, i; 1013 size_t size; 1014 struct __drm_private_objs_state *arr; 1015 struct drm_private_state *obj_state; 1016 1017 for (i = 0; i < state->num_private_objs; i++) 1018 if (obj == state->private_objs[i].ptr) 1019 return state->private_objs[i].state; 1020 1021 num_objs = state->num_private_objs + 1; 1022 size = sizeof(*state->private_objs) * num_objs; 1023 arr = krealloc(state->private_objs, size, GFP_KERNEL); 1024 if (!arr) 1025 return ERR_PTR(-ENOMEM); 1026 1027 state->private_objs = arr; 1028 index = state->num_private_objs; 1029 memset(&state->private_objs[index], 0, sizeof(*state->private_objs)); 1030 1031 obj_state = obj->funcs->atomic_duplicate_state(obj); 1032 if (!obj_state) 1033 return ERR_PTR(-ENOMEM); 1034 1035 state->private_objs[index].state = obj_state; 1036 state->private_objs[index].old_state = obj->state; 1037 state->private_objs[index].new_state = obj_state; 1038 state->private_objs[index].ptr = obj; 1039 1040 state->num_private_objs = num_objs; 1041 1042 DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n", 1043 obj, obj_state, state); 1044 1045 return obj_state; 1046 } 1047 EXPORT_SYMBOL(drm_atomic_get_private_obj_state); 1048 1049 /** 1050 * drm_atomic_get_connector_state - get connector state 1051 * @state: global atomic state object 1052 * @connector: connector to get state object for 1053 * 1054 * This function returns the connector state for the given connector, 1055 * allocating it if needed. It will also grab the relevant connector lock to 1056 * make sure that the state is consistent. 1057 * 1058 * Returns: 1059 * 1060 * Either the allocated state or the error code encoded into the pointer. When 1061 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 1062 * entire atomic sequence must be restarted. All other errors are fatal. 1063 */ 1064 struct drm_connector_state * 1065 drm_atomic_get_connector_state(struct drm_atomic_state *state, 1066 struct drm_connector *connector) 1067 { 1068 int ret, index; 1069 struct drm_mode_config *config = &connector->dev->mode_config; 1070 struct drm_connector_state *connector_state; 1071 1072 WARN_ON(!state->acquire_ctx); 1073 1074 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1075 if (ret) 1076 return ERR_PTR(ret); 1077 1078 index = drm_connector_index(connector); 1079 1080 if (index >= state->num_connector) { 1081 struct __drm_connnectors_state *c; 1082 int alloc = max(index + 1, config->num_connector); 1083 1084 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); 1085 if (!c) 1086 return ERR_PTR(-ENOMEM); 1087 1088 state->connectors = c; 1089 memset(&state->connectors[state->num_connector], 0, 1090 sizeof(*state->connectors) * (alloc - state->num_connector)); 1091 1092 state->num_connector = alloc; 1093 } 1094 1095 if (state->connectors[index].state) 1096 return state->connectors[index].state; 1097 1098 connector_state = connector->funcs->atomic_duplicate_state(connector); 1099 if (!connector_state) 1100 return ERR_PTR(-ENOMEM); 1101 1102 drm_connector_get(connector); 1103 state->connectors[index].state = connector_state; 1104 state->connectors[index].old_state = connector->state; 1105 state->connectors[index].new_state = connector_state; 1106 state->connectors[index].ptr = connector; 1107 connector_state->state = state; 1108 1109 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n", 1110 connector->base.id, connector->name, 1111 connector_state, state); 1112 1113 if (connector_state->crtc) { 1114 struct drm_crtc_state *crtc_state; 1115 1116 crtc_state = drm_atomic_get_crtc_state(state, 1117 connector_state->crtc); 1118 if (IS_ERR(crtc_state)) 1119 return ERR_CAST(crtc_state); 1120 } 1121 1122 return connector_state; 1123 } 1124 EXPORT_SYMBOL(drm_atomic_get_connector_state); 1125 1126 /** 1127 * drm_atomic_connector_set_property - set property on connector. 1128 * @connector: the drm connector to set a property on 1129 * @state: the state object to update with the new property value 1130 * @property: the property to set 1131 * @val: the new property value 1132 * 1133 * This function handles generic/core properties and calls out to driver's 1134 * &drm_connector_funcs.atomic_set_property for driver properties. To ensure 1135 * consistent behavior you must call this function rather than the driver hook 1136 * directly. 1137 * 1138 * RETURNS: 1139 * Zero on success, error code on failure 1140 */ 1141 static int drm_atomic_connector_set_property(struct drm_connector *connector, 1142 struct drm_connector_state *state, struct drm_property *property, 1143 uint64_t val) 1144 { 1145 struct drm_device *dev = connector->dev; 1146 struct drm_mode_config *config = &dev->mode_config; 1147 1148 if (property == config->prop_crtc_id) { 1149 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); 1150 return drm_atomic_set_crtc_for_connector(state, crtc); 1151 } else if (property == config->dpms_property) { 1152 /* setting DPMS property requires special handling, which 1153 * is done in legacy setprop path for us. Disallow (for 1154 * now?) atomic writes to DPMS property: 1155 */ 1156 return -EINVAL; 1157 } else if (property == config->tv_select_subconnector_property) { 1158 state->tv.subconnector = val; 1159 } else if (property == config->tv_left_margin_property) { 1160 state->tv.margins.left = val; 1161 } else if (property == config->tv_right_margin_property) { 1162 state->tv.margins.right = val; 1163 } else if (property == config->tv_top_margin_property) { 1164 state->tv.margins.top = val; 1165 } else if (property == config->tv_bottom_margin_property) { 1166 state->tv.margins.bottom = val; 1167 } else if (property == config->tv_mode_property) { 1168 state->tv.mode = val; 1169 } else if (property == config->tv_brightness_property) { 1170 state->tv.brightness = val; 1171 } else if (property == config->tv_contrast_property) { 1172 state->tv.contrast = val; 1173 } else if (property == config->tv_flicker_reduction_property) { 1174 state->tv.flicker_reduction = val; 1175 } else if (property == config->tv_overscan_property) { 1176 state->tv.overscan = val; 1177 } else if (property == config->tv_saturation_property) { 1178 state->tv.saturation = val; 1179 } else if (property == config->tv_hue_property) { 1180 state->tv.hue = val; 1181 } else if (property == config->link_status_property) { 1182 /* Never downgrade from GOOD to BAD on userspace's request here, 1183 * only hw issues can do that. 1184 * 1185 * For an atomic property the userspace doesn't need to be able 1186 * to understand all the properties, but needs to be able to 1187 * restore the state it wants on VT switch. So if the userspace 1188 * tries to change the link_status from GOOD to BAD, driver 1189 * silently rejects it and returns a 0. This prevents userspace 1190 * from accidently breaking the display when it restores the 1191 * state. 1192 */ 1193 if (state->link_status != DRM_LINK_STATUS_GOOD) 1194 state->link_status = val; 1195 } else if (property == config->aspect_ratio_property) { 1196 state->picture_aspect_ratio = val; 1197 } else if (property == connector->scaling_mode_property) { 1198 state->scaling_mode = val; 1199 } else if (connector->funcs->atomic_set_property) { 1200 return connector->funcs->atomic_set_property(connector, 1201 state, property, val); 1202 } else { 1203 return -EINVAL; 1204 } 1205 1206 return 0; 1207 } 1208 1209 static void drm_atomic_connector_print_state(struct drm_printer *p, 1210 const struct drm_connector_state *state) 1211 { 1212 struct drm_connector *connector = state->connector; 1213 1214 drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); 1215 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 1216 1217 if (connector->funcs->atomic_print_state) 1218 connector->funcs->atomic_print_state(p, state); 1219 } 1220 1221 /** 1222 * drm_atomic_connector_get_property - get property value from connector state 1223 * @connector: the drm connector to set a property on 1224 * @state: the state object to get the property value from 1225 * @property: the property to set 1226 * @val: return location for the property value 1227 * 1228 * This function handles generic/core properties and calls out to driver's 1229 * &drm_connector_funcs.atomic_get_property for driver properties. To ensure 1230 * consistent behavior you must call this function rather than the driver hook 1231 * directly. 1232 * 1233 * RETURNS: 1234 * Zero on success, error code on failure 1235 */ 1236 static int 1237 drm_atomic_connector_get_property(struct drm_connector *connector, 1238 const struct drm_connector_state *state, 1239 struct drm_property *property, uint64_t *val) 1240 { 1241 struct drm_device *dev = connector->dev; 1242 struct drm_mode_config *config = &dev->mode_config; 1243 1244 if (property == config->prop_crtc_id) { 1245 *val = (state->crtc) ? state->crtc->base.id : 0; 1246 } else if (property == config->dpms_property) { 1247 *val = connector->dpms; 1248 } else if (property == config->tv_select_subconnector_property) { 1249 *val = state->tv.subconnector; 1250 } else if (property == config->tv_left_margin_property) { 1251 *val = state->tv.margins.left; 1252 } else if (property == config->tv_right_margin_property) { 1253 *val = state->tv.margins.right; 1254 } else if (property == config->tv_top_margin_property) { 1255 *val = state->tv.margins.top; 1256 } else if (property == config->tv_bottom_margin_property) { 1257 *val = state->tv.margins.bottom; 1258 } else if (property == config->tv_mode_property) { 1259 *val = state->tv.mode; 1260 } else if (property == config->tv_brightness_property) { 1261 *val = state->tv.brightness; 1262 } else if (property == config->tv_contrast_property) { 1263 *val = state->tv.contrast; 1264 } else if (property == config->tv_flicker_reduction_property) { 1265 *val = state->tv.flicker_reduction; 1266 } else if (property == config->tv_overscan_property) { 1267 *val = state->tv.overscan; 1268 } else if (property == config->tv_saturation_property) { 1269 *val = state->tv.saturation; 1270 } else if (property == config->tv_hue_property) { 1271 *val = state->tv.hue; 1272 } else if (property == config->link_status_property) { 1273 *val = state->link_status; 1274 } else if (property == config->aspect_ratio_property) { 1275 *val = state->picture_aspect_ratio; 1276 } else if (property == connector->scaling_mode_property) { 1277 *val = state->scaling_mode; 1278 } else if (connector->funcs->atomic_get_property) { 1279 return connector->funcs->atomic_get_property(connector, 1280 state, property, val); 1281 } else { 1282 return -EINVAL; 1283 } 1284 1285 return 0; 1286 } 1287 1288 int drm_atomic_get_property(struct drm_mode_object *obj, 1289 struct drm_property *property, uint64_t *val) 1290 { 1291 struct drm_device *dev = property->dev; 1292 int ret; 1293 1294 switch (obj->type) { 1295 case DRM_MODE_OBJECT_CONNECTOR: { 1296 struct drm_connector *connector = obj_to_connector(obj); 1297 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 1298 ret = drm_atomic_connector_get_property(connector, 1299 connector->state, property, val); 1300 break; 1301 } 1302 case DRM_MODE_OBJECT_CRTC: { 1303 struct drm_crtc *crtc = obj_to_crtc(obj); 1304 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 1305 ret = drm_atomic_crtc_get_property(crtc, 1306 crtc->state, property, val); 1307 break; 1308 } 1309 case DRM_MODE_OBJECT_PLANE: { 1310 struct drm_plane *plane = obj_to_plane(obj); 1311 WARN_ON(!drm_modeset_is_locked(&plane->mutex)); 1312 ret = drm_atomic_plane_get_property(plane, 1313 plane->state, property, val); 1314 break; 1315 } 1316 default: 1317 ret = -EINVAL; 1318 break; 1319 } 1320 1321 return ret; 1322 } 1323 1324 /** 1325 * drm_atomic_set_crtc_for_plane - set crtc for plane 1326 * @plane_state: the plane whose incoming state to update 1327 * @crtc: crtc to use for the plane 1328 * 1329 * Changing the assigned crtc for a plane requires us to grab the lock and state 1330 * for the new crtc, as needed. This function takes care of all these details 1331 * besides updating the pointer in the state object itself. 1332 * 1333 * Returns: 1334 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1335 * then the w/w mutex code has detected a deadlock and the entire atomic 1336 * sequence must be restarted. All other errors are fatal. 1337 */ 1338 int 1339 drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, 1340 struct drm_crtc *crtc) 1341 { 1342 struct drm_plane *plane = plane_state->plane; 1343 struct drm_crtc_state *crtc_state; 1344 1345 if (plane_state->crtc) { 1346 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1347 plane_state->crtc); 1348 if (WARN_ON(IS_ERR(crtc_state))) 1349 return PTR_ERR(crtc_state); 1350 1351 crtc_state->plane_mask &= ~(1 << drm_plane_index(plane)); 1352 } 1353 1354 plane_state->crtc = crtc; 1355 1356 if (crtc) { 1357 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1358 crtc); 1359 if (IS_ERR(crtc_state)) 1360 return PTR_ERR(crtc_state); 1361 crtc_state->plane_mask |= (1 << drm_plane_index(plane)); 1362 } 1363 1364 if (crtc) 1365 DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n", 1366 plane_state, crtc->base.id, crtc->name); 1367 else 1368 DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n", 1369 plane_state); 1370 1371 return 0; 1372 } 1373 EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane); 1374 1375 /** 1376 * drm_atomic_set_fb_for_plane - set framebuffer for plane 1377 * @plane_state: atomic state object for the plane 1378 * @fb: fb to use for the plane 1379 * 1380 * Changing the assigned framebuffer for a plane requires us to grab a reference 1381 * to the new fb and drop the reference to the old fb, if there is one. This 1382 * function takes care of all these details besides updating the pointer in the 1383 * state object itself. 1384 */ 1385 void 1386 drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, 1387 struct drm_framebuffer *fb) 1388 { 1389 if (fb) 1390 DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n", 1391 fb->base.id, plane_state); 1392 else 1393 DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n", 1394 plane_state); 1395 1396 drm_framebuffer_assign(&plane_state->fb, fb); 1397 } 1398 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); 1399 1400 /** 1401 * drm_atomic_set_fence_for_plane - set fence for plane 1402 * @plane_state: atomic state object for the plane 1403 * @fence: dma_fence to use for the plane 1404 * 1405 * Helper to setup the plane_state fence in case it is not set yet. 1406 * By using this drivers doesn't need to worry if the user choose 1407 * implicit or explicit fencing. 1408 * 1409 * This function will not set the fence to the state if it was set 1410 * via explicit fencing interfaces on the atomic ioctl. In that case it will 1411 * drop the reference to the fence as we are not storing it anywhere. 1412 * Otherwise, if &drm_plane_state.fence is not set this function we just set it 1413 * with the received implicit fence. In both cases this function consumes a 1414 * reference for @fence. 1415 */ 1416 void 1417 drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, 1418 struct dma_fence *fence) 1419 { 1420 if (plane_state->fence) { 1421 dma_fence_put(fence); 1422 return; 1423 } 1424 1425 plane_state->fence = fence; 1426 } 1427 EXPORT_SYMBOL(drm_atomic_set_fence_for_plane); 1428 1429 /** 1430 * drm_atomic_set_crtc_for_connector - set crtc for connector 1431 * @conn_state: atomic state object for the connector 1432 * @crtc: crtc to use for the connector 1433 * 1434 * Changing the assigned crtc for a connector requires us to grab the lock and 1435 * state for the new crtc, as needed. This function takes care of all these 1436 * details besides updating the pointer in the state object itself. 1437 * 1438 * Returns: 1439 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1440 * then the w/w mutex code has detected a deadlock and the entire atomic 1441 * sequence must be restarted. All other errors are fatal. 1442 */ 1443 int 1444 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, 1445 struct drm_crtc *crtc) 1446 { 1447 struct drm_crtc_state *crtc_state; 1448 1449 if (conn_state->crtc == crtc) 1450 return 0; 1451 1452 if (conn_state->crtc) { 1453 crtc_state = drm_atomic_get_new_crtc_state(conn_state->state, 1454 conn_state->crtc); 1455 1456 crtc_state->connector_mask &= 1457 ~(1 << drm_connector_index(conn_state->connector)); 1458 1459 drm_connector_put(conn_state->connector); 1460 conn_state->crtc = NULL; 1461 } 1462 1463 if (crtc) { 1464 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); 1465 if (IS_ERR(crtc_state)) 1466 return PTR_ERR(crtc_state); 1467 1468 crtc_state->connector_mask |= 1469 1 << drm_connector_index(conn_state->connector); 1470 1471 drm_connector_get(conn_state->connector); 1472 conn_state->crtc = crtc; 1473 1474 DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n", 1475 conn_state, crtc->base.id, crtc->name); 1476 } else { 1477 DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n", 1478 conn_state); 1479 } 1480 1481 return 0; 1482 } 1483 EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector); 1484 1485 /** 1486 * drm_atomic_add_affected_connectors - add connectors for crtc 1487 * @state: atomic state 1488 * @crtc: DRM crtc 1489 * 1490 * This function walks the current configuration and adds all connectors 1491 * currently using @crtc to the atomic configuration @state. Note that this 1492 * function must acquire the connection mutex. This can potentially cause 1493 * unneeded seralization if the update is just for the planes on one crtc. Hence 1494 * drivers and helpers should only call this when really needed (e.g. when a 1495 * full modeset needs to happen due to some change). 1496 * 1497 * Returns: 1498 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1499 * then the w/w mutex code has detected a deadlock and the entire atomic 1500 * sequence must be restarted. All other errors are fatal. 1501 */ 1502 int 1503 drm_atomic_add_affected_connectors(struct drm_atomic_state *state, 1504 struct drm_crtc *crtc) 1505 { 1506 struct drm_mode_config *config = &state->dev->mode_config; 1507 struct drm_connector *connector; 1508 struct drm_connector_state *conn_state; 1509 struct drm_connector_list_iter conn_iter; 1510 struct drm_crtc_state *crtc_state; 1511 int ret; 1512 1513 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1514 if (IS_ERR(crtc_state)) 1515 return PTR_ERR(crtc_state); 1516 1517 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1518 if (ret) 1519 return ret; 1520 1521 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n", 1522 crtc->base.id, crtc->name, state); 1523 1524 /* 1525 * Changed connectors are already in @state, so only need to look 1526 * at the connector_mask in crtc_state. 1527 */ 1528 drm_connector_list_iter_begin(state->dev, &conn_iter); 1529 drm_for_each_connector_iter(connector, &conn_iter) { 1530 if (!(crtc_state->connector_mask & (1 << drm_connector_index(connector)))) 1531 continue; 1532 1533 conn_state = drm_atomic_get_connector_state(state, connector); 1534 if (IS_ERR(conn_state)) { 1535 drm_connector_list_iter_end(&conn_iter); 1536 return PTR_ERR(conn_state); 1537 } 1538 } 1539 drm_connector_list_iter_end(&conn_iter); 1540 1541 return 0; 1542 } 1543 EXPORT_SYMBOL(drm_atomic_add_affected_connectors); 1544 1545 /** 1546 * drm_atomic_add_affected_planes - add planes for crtc 1547 * @state: atomic state 1548 * @crtc: DRM crtc 1549 * 1550 * This function walks the current configuration and adds all planes 1551 * currently used by @crtc to the atomic configuration @state. This is useful 1552 * when an atomic commit also needs to check all currently enabled plane on 1553 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC 1554 * to avoid special code to force-enable all planes. 1555 * 1556 * Since acquiring a plane state will always also acquire the w/w mutex of the 1557 * current CRTC for that plane (if there is any) adding all the plane states for 1558 * a CRTC will not reduce parallism of atomic updates. 1559 * 1560 * Returns: 1561 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1562 * then the w/w mutex code has detected a deadlock and the entire atomic 1563 * sequence must be restarted. All other errors are fatal. 1564 */ 1565 int 1566 drm_atomic_add_affected_planes(struct drm_atomic_state *state, 1567 struct drm_crtc *crtc) 1568 { 1569 struct drm_plane *plane; 1570 1571 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); 1572 1573 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 1574 struct drm_plane_state *plane_state = 1575 drm_atomic_get_plane_state(state, plane); 1576 1577 if (IS_ERR(plane_state)) 1578 return PTR_ERR(plane_state); 1579 } 1580 return 0; 1581 } 1582 EXPORT_SYMBOL(drm_atomic_add_affected_planes); 1583 1584 /** 1585 * drm_atomic_check_only - check whether a given config would work 1586 * @state: atomic configuration to check 1587 * 1588 * Note that this function can return -EDEADLK if the driver needed to acquire 1589 * more locks but encountered a deadlock. The caller must then do the usual w/w 1590 * backoff dance and restart. All other errors are fatal. 1591 * 1592 * Returns: 1593 * 0 on success, negative error code on failure. 1594 */ 1595 int drm_atomic_check_only(struct drm_atomic_state *state) 1596 { 1597 struct drm_device *dev = state->dev; 1598 struct drm_mode_config *config = &dev->mode_config; 1599 struct drm_plane *plane; 1600 struct drm_plane_state *plane_state; 1601 struct drm_crtc *crtc; 1602 struct drm_crtc_state *crtc_state; 1603 int i, ret = 0; 1604 1605 DRM_DEBUG_ATOMIC("checking %p\n", state); 1606 1607 for_each_new_plane_in_state(state, plane, plane_state, i) { 1608 ret = drm_atomic_plane_check(plane, plane_state); 1609 if (ret) { 1610 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", 1611 plane->base.id, plane->name); 1612 return ret; 1613 } 1614 } 1615 1616 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1617 ret = drm_atomic_crtc_check(crtc, crtc_state); 1618 if (ret) { 1619 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", 1620 crtc->base.id, crtc->name); 1621 return ret; 1622 } 1623 } 1624 1625 if (config->funcs->atomic_check) 1626 ret = config->funcs->atomic_check(state->dev, state); 1627 1628 if (ret) 1629 return ret; 1630 1631 if (!state->allow_modeset) { 1632 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1633 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 1634 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", 1635 crtc->base.id, crtc->name); 1636 return -EINVAL; 1637 } 1638 } 1639 } 1640 1641 return 0; 1642 } 1643 EXPORT_SYMBOL(drm_atomic_check_only); 1644 1645 /** 1646 * drm_atomic_commit - commit configuration atomically 1647 * @state: atomic configuration to check 1648 * 1649 * Note that this function can return -EDEADLK if the driver needed to acquire 1650 * more locks but encountered a deadlock. The caller must then do the usual w/w 1651 * backoff dance and restart. All other errors are fatal. 1652 * 1653 * This function will take its own reference on @state. 1654 * Callers should always release their reference with drm_atomic_state_put(). 1655 * 1656 * Returns: 1657 * 0 on success, negative error code on failure. 1658 */ 1659 int drm_atomic_commit(struct drm_atomic_state *state) 1660 { 1661 struct drm_mode_config *config = &state->dev->mode_config; 1662 int ret; 1663 1664 ret = drm_atomic_check_only(state); 1665 if (ret) 1666 return ret; 1667 1668 DRM_DEBUG_ATOMIC("committing %p\n", state); 1669 1670 return config->funcs->atomic_commit(state->dev, state, false); 1671 } 1672 EXPORT_SYMBOL(drm_atomic_commit); 1673 1674 /** 1675 * drm_atomic_nonblocking_commit - atomic nonblocking commit 1676 * @state: atomic configuration to check 1677 * 1678 * Note that this function can return -EDEADLK if the driver needed to acquire 1679 * more locks but encountered a deadlock. The caller must then do the usual w/w 1680 * backoff dance and restart. All other errors are fatal. 1681 * 1682 * This function will take its own reference on @state. 1683 * Callers should always release their reference with drm_atomic_state_put(). 1684 * 1685 * Returns: 1686 * 0 on success, negative error code on failure. 1687 */ 1688 int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) 1689 { 1690 struct drm_mode_config *config = &state->dev->mode_config; 1691 int ret; 1692 1693 ret = drm_atomic_check_only(state); 1694 if (ret) 1695 return ret; 1696 1697 DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state); 1698 1699 return config->funcs->atomic_commit(state->dev, state, true); 1700 } 1701 EXPORT_SYMBOL(drm_atomic_nonblocking_commit); 1702 1703 static void drm_atomic_print_state(const struct drm_atomic_state *state) 1704 { 1705 struct drm_printer p = drm_info_printer(state->dev->dev); 1706 struct drm_plane *plane; 1707 struct drm_plane_state *plane_state; 1708 struct drm_crtc *crtc; 1709 struct drm_crtc_state *crtc_state; 1710 struct drm_connector *connector; 1711 struct drm_connector_state *connector_state; 1712 int i; 1713 1714 DRM_DEBUG_ATOMIC("checking %p\n", state); 1715 1716 for_each_new_plane_in_state(state, plane, plane_state, i) 1717 drm_atomic_plane_print_state(&p, plane_state); 1718 1719 for_each_new_crtc_in_state(state, crtc, crtc_state, i) 1720 drm_atomic_crtc_print_state(&p, crtc_state); 1721 1722 for_each_new_connector_in_state(state, connector, connector_state, i) 1723 drm_atomic_connector_print_state(&p, connector_state); 1724 } 1725 1726 static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, 1727 bool take_locks) 1728 { 1729 struct drm_mode_config *config = &dev->mode_config; 1730 struct drm_plane *plane; 1731 struct drm_crtc *crtc; 1732 struct drm_connector *connector; 1733 struct drm_connector_list_iter conn_iter; 1734 1735 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 1736 return; 1737 1738 list_for_each_entry(plane, &config->plane_list, head) { 1739 if (take_locks) 1740 drm_modeset_lock(&plane->mutex, NULL); 1741 drm_atomic_plane_print_state(p, plane->state); 1742 if (take_locks) 1743 drm_modeset_unlock(&plane->mutex); 1744 } 1745 1746 list_for_each_entry(crtc, &config->crtc_list, head) { 1747 if (take_locks) 1748 drm_modeset_lock(&crtc->mutex, NULL); 1749 drm_atomic_crtc_print_state(p, crtc->state); 1750 if (take_locks) 1751 drm_modeset_unlock(&crtc->mutex); 1752 } 1753 1754 drm_connector_list_iter_begin(dev, &conn_iter); 1755 if (take_locks) 1756 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1757 drm_for_each_connector_iter(connector, &conn_iter) 1758 drm_atomic_connector_print_state(p, connector->state); 1759 if (take_locks) 1760 drm_modeset_unlock(&dev->mode_config.connection_mutex); 1761 drm_connector_list_iter_end(&conn_iter); 1762 } 1763 1764 /** 1765 * drm_state_dump - dump entire device atomic state 1766 * @dev: the drm device 1767 * @p: where to print the state to 1768 * 1769 * Just for debugging. Drivers might want an option to dump state 1770 * to dmesg in case of error irq's. (Hint, you probably want to 1771 * ratelimit this!) 1772 * 1773 * The caller must drm_modeset_lock_all(), or if this is called 1774 * from error irq handler, it should not be enabled by default. 1775 * (Ie. if you are debugging errors you might not care that this 1776 * is racey. But calling this without all modeset locks held is 1777 * not inherently safe.) 1778 */ 1779 void drm_state_dump(struct drm_device *dev, struct drm_printer *p) 1780 { 1781 __drm_state_dump(dev, p, false); 1782 } 1783 EXPORT_SYMBOL(drm_state_dump); 1784 1785 #ifdef CONFIG_DEBUG_FS 1786 static int drm_state_info(struct seq_file *m, void *data) 1787 { 1788 struct drm_info_node *node = (struct drm_info_node *) m->private; 1789 struct drm_device *dev = node->minor->dev; 1790 struct drm_printer p = drm_seq_file_printer(m); 1791 1792 __drm_state_dump(dev, &p, true); 1793 1794 return 0; 1795 } 1796 1797 /* any use in debugfs files to dump individual planes/crtc/etc? */ 1798 static const struct drm_info_list drm_atomic_debugfs_list[] = { 1799 {"state", drm_state_info, 0}, 1800 }; 1801 1802 int drm_atomic_debugfs_init(struct drm_minor *minor) 1803 { 1804 return drm_debugfs_create_files(drm_atomic_debugfs_list, 1805 ARRAY_SIZE(drm_atomic_debugfs_list), 1806 minor->debugfs_root, minor); 1807 } 1808 #endif 1809 1810 /* 1811 * The big monstor ioctl 1812 */ 1813 1814 static struct drm_pending_vblank_event *create_vblank_event( 1815 struct drm_crtc *crtc, uint64_t user_data) 1816 { 1817 struct drm_pending_vblank_event *e = NULL; 1818 1819 e = kzalloc(sizeof *e, GFP_KERNEL); 1820 if (!e) 1821 return NULL; 1822 1823 e->event.base.type = DRM_EVENT_FLIP_COMPLETE; 1824 e->event.base.length = sizeof(e->event); 1825 e->event.vbl.crtc_id = crtc->base.id; 1826 e->event.vbl.user_data = user_data; 1827 1828 return e; 1829 } 1830 1831 int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, 1832 struct drm_connector *connector, 1833 int mode) 1834 { 1835 struct drm_connector *tmp_connector; 1836 struct drm_connector_state *new_conn_state; 1837 struct drm_crtc *crtc; 1838 struct drm_crtc_state *crtc_state; 1839 int i, ret, old_mode = connector->dpms; 1840 bool active = false; 1841 1842 ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, 1843 state->acquire_ctx); 1844 if (ret) 1845 return ret; 1846 1847 if (mode != DRM_MODE_DPMS_ON) 1848 mode = DRM_MODE_DPMS_OFF; 1849 connector->dpms = mode; 1850 1851 crtc = connector->state->crtc; 1852 if (!crtc) 1853 goto out; 1854 ret = drm_atomic_add_affected_connectors(state, crtc); 1855 if (ret) 1856 goto out; 1857 1858 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1859 if (IS_ERR(crtc_state)) { 1860 ret = PTR_ERR(crtc_state); 1861 goto out; 1862 } 1863 1864 for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) { 1865 if (new_conn_state->crtc != crtc) 1866 continue; 1867 if (tmp_connector->dpms == DRM_MODE_DPMS_ON) { 1868 active = true; 1869 break; 1870 } 1871 } 1872 1873 crtc_state->active = active; 1874 ret = drm_atomic_commit(state); 1875 out: 1876 if (ret != 0) 1877 connector->dpms = old_mode; 1878 return ret; 1879 } 1880 1881 int drm_atomic_set_property(struct drm_atomic_state *state, 1882 struct drm_mode_object *obj, 1883 struct drm_property *prop, 1884 uint64_t prop_value) 1885 { 1886 struct drm_mode_object *ref; 1887 int ret; 1888 1889 if (!drm_property_change_valid_get(prop, prop_value, &ref)) 1890 return -EINVAL; 1891 1892 switch (obj->type) { 1893 case DRM_MODE_OBJECT_CONNECTOR: { 1894 struct drm_connector *connector = obj_to_connector(obj); 1895 struct drm_connector_state *connector_state; 1896 1897 connector_state = drm_atomic_get_connector_state(state, connector); 1898 if (IS_ERR(connector_state)) { 1899 ret = PTR_ERR(connector_state); 1900 break; 1901 } 1902 1903 ret = drm_atomic_connector_set_property(connector, 1904 connector_state, prop, prop_value); 1905 break; 1906 } 1907 case DRM_MODE_OBJECT_CRTC: { 1908 struct drm_crtc *crtc = obj_to_crtc(obj); 1909 struct drm_crtc_state *crtc_state; 1910 1911 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1912 if (IS_ERR(crtc_state)) { 1913 ret = PTR_ERR(crtc_state); 1914 break; 1915 } 1916 1917 ret = drm_atomic_crtc_set_property(crtc, 1918 crtc_state, prop, prop_value); 1919 break; 1920 } 1921 case DRM_MODE_OBJECT_PLANE: { 1922 struct drm_plane *plane = obj_to_plane(obj); 1923 struct drm_plane_state *plane_state; 1924 1925 plane_state = drm_atomic_get_plane_state(state, plane); 1926 if (IS_ERR(plane_state)) { 1927 ret = PTR_ERR(plane_state); 1928 break; 1929 } 1930 1931 ret = drm_atomic_plane_set_property(plane, 1932 plane_state, prop, prop_value); 1933 break; 1934 } 1935 default: 1936 ret = -EINVAL; 1937 break; 1938 } 1939 1940 drm_property_change_valid_put(prop, ref); 1941 return ret; 1942 } 1943 1944 /** 1945 * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers. 1946 * 1947 * @dev: drm device to check. 1948 * @plane_mask: plane mask for planes that were updated. 1949 * @ret: return value, can be -EDEADLK for a retry. 1950 * 1951 * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before 1952 * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This 1953 * is a common operation for each atomic update, so this call is split off as a 1954 * helper. 1955 */ 1956 void drm_atomic_clean_old_fb(struct drm_device *dev, 1957 unsigned plane_mask, 1958 int ret) 1959 { 1960 struct drm_plane *plane; 1961 1962 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping 1963 * locks (ie. while it is still safe to deref plane->state). We 1964 * need to do this here because the driver entry points cannot 1965 * distinguish between legacy and atomic ioctls. 1966 */ 1967 drm_for_each_plane_mask(plane, dev, plane_mask) { 1968 if (ret == 0) { 1969 struct drm_framebuffer *new_fb = plane->state->fb; 1970 if (new_fb) 1971 drm_framebuffer_get(new_fb); 1972 plane->fb = new_fb; 1973 plane->crtc = plane->state->crtc; 1974 1975 if (plane->old_fb) 1976 drm_framebuffer_put(plane->old_fb); 1977 } 1978 plane->old_fb = NULL; 1979 } 1980 } 1981 EXPORT_SYMBOL(drm_atomic_clean_old_fb); 1982 1983 /** 1984 * DOC: explicit fencing properties 1985 * 1986 * Explicit fencing allows userspace to control the buffer synchronization 1987 * between devices. A Fence or a group of fences are transfered to/from 1988 * userspace using Sync File fds and there are two DRM properties for that. 1989 * IN_FENCE_FD on each DRM Plane to send fences to the kernel and 1990 * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel. 1991 * 1992 * As a contrast, with implicit fencing the kernel keeps track of any 1993 * ongoing rendering, and automatically ensures that the atomic update waits 1994 * for any pending rendering to complete. For shared buffers represented with 1995 * a &struct dma_buf this is tracked in &struct reservation_object. 1996 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), 1997 * whereas explicit fencing is what Android wants. 1998 * 1999 * "IN_FENCE_FD”: 2000 * Use this property to pass a fence that DRM should wait on before 2001 * proceeding with the Atomic Commit request and show the framebuffer for 2002 * the plane on the screen. The fence can be either a normal fence or a 2003 * merged one, the sync_file framework will handle both cases and use a 2004 * fence_array if a merged fence is received. Passing -1 here means no 2005 * fences to wait on. 2006 * 2007 * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag 2008 * it will only check if the Sync File is a valid one. 2009 * 2010 * On the driver side the fence is stored on the @fence parameter of 2011 * &struct drm_plane_state. Drivers which also support implicit fencing 2012 * should set the implicit fence using drm_atomic_set_fence_for_plane(), 2013 * to make sure there's consistent behaviour between drivers in precedence 2014 * of implicit vs. explicit fencing. 2015 * 2016 * "OUT_FENCE_PTR”: 2017 * Use this property to pass a file descriptor pointer to DRM. Once the 2018 * Atomic Commit request call returns OUT_FENCE_PTR will be filled with 2019 * the file descriptor number of a Sync File. This Sync File contains the 2020 * CRTC fence that will be signaled when all framebuffers present on the 2021 * Atomic Commit * request for that given CRTC are scanned out on the 2022 * screen. 2023 * 2024 * The Atomic Commit request fails if a invalid pointer is passed. If the 2025 * Atomic Commit request fails for any other reason the out fence fd 2026 * returned will be -1. On a Atomic Commit with the 2027 * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1. 2028 * 2029 * Note that out-fences don't have a special interface to drivers and are 2030 * internally represented by a &struct drm_pending_vblank_event in struct 2031 * &drm_crtc_state, which is also used by the nonblocking atomic commit 2032 * helpers and for the DRM event handling for existing userspace. 2033 */ 2034 2035 struct drm_out_fence_state { 2036 s32 __user *out_fence_ptr; 2037 struct sync_file *sync_file; 2038 int fd; 2039 }; 2040 2041 static int setup_out_fence(struct drm_out_fence_state *fence_state, 2042 struct dma_fence *fence) 2043 { 2044 fence_state->fd = get_unused_fd_flags(O_CLOEXEC); 2045 if (fence_state->fd < 0) 2046 return fence_state->fd; 2047 2048 if (put_user(fence_state->fd, fence_state->out_fence_ptr)) 2049 return -EFAULT; 2050 2051 fence_state->sync_file = sync_file_create(fence); 2052 if (!fence_state->sync_file) 2053 return -ENOMEM; 2054 2055 return 0; 2056 } 2057 2058 static int prepare_crtc_signaling(struct drm_device *dev, 2059 struct drm_atomic_state *state, 2060 struct drm_mode_atomic *arg, 2061 struct drm_file *file_priv, 2062 struct drm_out_fence_state **fence_state, 2063 unsigned int *num_fences) 2064 { 2065 struct drm_crtc *crtc; 2066 struct drm_crtc_state *crtc_state; 2067 int i, c = 0, ret; 2068 2069 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) 2070 return 0; 2071 2072 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2073 s32 __user *fence_ptr; 2074 2075 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); 2076 2077 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) { 2078 struct drm_pending_vblank_event *e; 2079 2080 e = create_vblank_event(crtc, arg->user_data); 2081 if (!e) 2082 return -ENOMEM; 2083 2084 crtc_state->event = e; 2085 } 2086 2087 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { 2088 struct drm_pending_vblank_event *e = crtc_state->event; 2089 2090 if (!file_priv) 2091 continue; 2092 2093 ret = drm_event_reserve_init(dev, file_priv, &e->base, 2094 &e->event.base); 2095 if (ret) { 2096 kfree(e); 2097 crtc_state->event = NULL; 2098 return ret; 2099 } 2100 } 2101 2102 if (fence_ptr) { 2103 struct dma_fence *fence; 2104 struct drm_out_fence_state *f; 2105 2106 f = krealloc(*fence_state, sizeof(**fence_state) * 2107 (*num_fences + 1), GFP_KERNEL); 2108 if (!f) 2109 return -ENOMEM; 2110 2111 memset(&f[*num_fences], 0, sizeof(*f)); 2112 2113 f[*num_fences].out_fence_ptr = fence_ptr; 2114 *fence_state = f; 2115 2116 fence = drm_crtc_create_fence(crtc); 2117 if (!fence) 2118 return -ENOMEM; 2119 2120 ret = setup_out_fence(&f[(*num_fences)++], fence); 2121 if (ret) { 2122 dma_fence_put(fence); 2123 return ret; 2124 } 2125 2126 crtc_state->event->base.fence = fence; 2127 } 2128 2129 c++; 2130 } 2131 2132 /* 2133 * Having this flag means user mode pends on event which will never 2134 * reach due to lack of at least one CRTC for signaling 2135 */ 2136 if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 2137 return -EINVAL; 2138 2139 return 0; 2140 } 2141 2142 static void complete_crtc_signaling(struct drm_device *dev, 2143 struct drm_atomic_state *state, 2144 struct drm_out_fence_state *fence_state, 2145 unsigned int num_fences, 2146 bool install_fds) 2147 { 2148 struct drm_crtc *crtc; 2149 struct drm_crtc_state *crtc_state; 2150 int i; 2151 2152 if (install_fds) { 2153 for (i = 0; i < num_fences; i++) 2154 fd_install(fence_state[i].fd, 2155 fence_state[i].sync_file->file); 2156 2157 kfree(fence_state); 2158 return; 2159 } 2160 2161 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2162 struct drm_pending_vblank_event *event = crtc_state->event; 2163 /* 2164 * Free the allocated event. drm_atomic_helper_setup_commit 2165 * can allocate an event too, so only free it if it's ours 2166 * to prevent a double free in drm_atomic_state_clear. 2167 */ 2168 if (event && (event->base.fence || event->base.file_priv)) { 2169 drm_event_cancel_free(dev, &event->base); 2170 crtc_state->event = NULL; 2171 } 2172 } 2173 2174 if (!fence_state) 2175 return; 2176 2177 for (i = 0; i < num_fences; i++) { 2178 if (fence_state[i].sync_file) 2179 fput(fence_state[i].sync_file->file); 2180 if (fence_state[i].fd >= 0) 2181 put_unused_fd(fence_state[i].fd); 2182 2183 /* If this fails log error to the user */ 2184 if (fence_state[i].out_fence_ptr && 2185 put_user(-1, fence_state[i].out_fence_ptr)) 2186 DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n"); 2187 } 2188 2189 kfree(fence_state); 2190 } 2191 2192 int drm_mode_atomic_ioctl(struct drm_device *dev, 2193 void *data, struct drm_file *file_priv) 2194 { 2195 struct drm_mode_atomic *arg = data; 2196 uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr); 2197 uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr); 2198 uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr); 2199 uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr); 2200 unsigned int copied_objs, copied_props; 2201 struct drm_atomic_state *state; 2202 struct drm_modeset_acquire_ctx ctx; 2203 struct drm_plane *plane; 2204 struct drm_out_fence_state *fence_state; 2205 unsigned plane_mask; 2206 int ret = 0; 2207 unsigned int i, j, num_fences; 2208 2209 /* disallow for drivers not supporting atomic: */ 2210 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 2211 return -EINVAL; 2212 2213 /* disallow for userspace that has not enabled atomic cap (even 2214 * though this may be a bit overkill, since legacy userspace 2215 * wouldn't know how to call this ioctl) 2216 */ 2217 if (!file_priv->atomic) 2218 return -EINVAL; 2219 2220 if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) 2221 return -EINVAL; 2222 2223 if (arg->reserved) 2224 return -EINVAL; 2225 2226 if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) && 2227 !dev->mode_config.async_page_flip) 2228 return -EINVAL; 2229 2230 /* can't test and expect an event at the same time. */ 2231 if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) && 2232 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 2233 return -EINVAL; 2234 2235 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2236 2237 state = drm_atomic_state_alloc(dev); 2238 if (!state) 2239 return -ENOMEM; 2240 2241 state->acquire_ctx = &ctx; 2242 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 2243 2244 retry: 2245 plane_mask = 0; 2246 copied_objs = 0; 2247 copied_props = 0; 2248 fence_state = NULL; 2249 num_fences = 0; 2250 2251 for (i = 0; i < arg->count_objs; i++) { 2252 uint32_t obj_id, count_props; 2253 struct drm_mode_object *obj; 2254 2255 if (get_user(obj_id, objs_ptr + copied_objs)) { 2256 ret = -EFAULT; 2257 goto out; 2258 } 2259 2260 obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY); 2261 if (!obj) { 2262 ret = -ENOENT; 2263 goto out; 2264 } 2265 2266 if (!obj->properties) { 2267 drm_mode_object_put(obj); 2268 ret = -ENOENT; 2269 goto out; 2270 } 2271 2272 if (get_user(count_props, count_props_ptr + copied_objs)) { 2273 drm_mode_object_put(obj); 2274 ret = -EFAULT; 2275 goto out; 2276 } 2277 2278 copied_objs++; 2279 2280 for (j = 0; j < count_props; j++) { 2281 uint32_t prop_id; 2282 uint64_t prop_value; 2283 struct drm_property *prop; 2284 2285 if (get_user(prop_id, props_ptr + copied_props)) { 2286 drm_mode_object_put(obj); 2287 ret = -EFAULT; 2288 goto out; 2289 } 2290 2291 prop = drm_mode_obj_find_prop_id(obj, prop_id); 2292 if (!prop) { 2293 drm_mode_object_put(obj); 2294 ret = -ENOENT; 2295 goto out; 2296 } 2297 2298 if (copy_from_user(&prop_value, 2299 prop_values_ptr + copied_props, 2300 sizeof(prop_value))) { 2301 drm_mode_object_put(obj); 2302 ret = -EFAULT; 2303 goto out; 2304 } 2305 2306 ret = drm_atomic_set_property(state, obj, prop, 2307 prop_value); 2308 if (ret) { 2309 drm_mode_object_put(obj); 2310 goto out; 2311 } 2312 2313 copied_props++; 2314 } 2315 2316 if (obj->type == DRM_MODE_OBJECT_PLANE && count_props && 2317 !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) { 2318 plane = obj_to_plane(obj); 2319 plane_mask |= (1 << drm_plane_index(plane)); 2320 plane->old_fb = plane->fb; 2321 } 2322 drm_mode_object_put(obj); 2323 } 2324 2325 ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state, 2326 &num_fences); 2327 if (ret) 2328 goto out; 2329 2330 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { 2331 ret = drm_atomic_check_only(state); 2332 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { 2333 ret = drm_atomic_nonblocking_commit(state); 2334 } else { 2335 if (unlikely(drm_debug & DRM_UT_STATE)) 2336 drm_atomic_print_state(state); 2337 2338 ret = drm_atomic_commit(state); 2339 } 2340 2341 out: 2342 drm_atomic_clean_old_fb(dev, plane_mask, ret); 2343 2344 complete_crtc_signaling(dev, state, fence_state, num_fences, !ret); 2345 2346 if (ret == -EDEADLK) { 2347 drm_atomic_state_clear(state); 2348 ret = drm_modeset_backoff(&ctx); 2349 if (!ret) 2350 goto retry; 2351 } 2352 2353 drm_atomic_state_put(state); 2354 2355 drm_modeset_drop_locks(&ctx); 2356 drm_modeset_acquire_fini(&ctx); 2357 2358 return ret; 2359 } 2360