1 /* 2 * Copyright (C) 2014 Red Hat 3 * Copyright (C) 2014 Intel Corp. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robdclark@gmail.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 */ 27 28 29 #include <drm/drmP.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_mode.h> 32 #include <drm/drm_print.h> 33 #include <drm/drm_writeback.h> 34 #include <linux/sync_file.h> 35 36 #include "drm_crtc_internal.h" 37 #include "drm_internal.h" 38 39 void __drm_crtc_commit_free(struct kref *kref) 40 { 41 struct drm_crtc_commit *commit = 42 container_of(kref, struct drm_crtc_commit, ref); 43 44 kfree(commit); 45 } 46 EXPORT_SYMBOL(__drm_crtc_commit_free); 47 48 /** 49 * drm_atomic_state_default_release - 50 * release memory initialized by drm_atomic_state_init 51 * @state: atomic state 52 * 53 * Free all the memory allocated by drm_atomic_state_init. 54 * This should only be used by drivers which are still subclassing 55 * &drm_atomic_state and haven't switched to &drm_private_state yet. 56 */ 57 void drm_atomic_state_default_release(struct drm_atomic_state *state) 58 { 59 kfree(state->connectors); 60 kfree(state->crtcs); 61 kfree(state->planes); 62 kfree(state->private_objs); 63 } 64 EXPORT_SYMBOL(drm_atomic_state_default_release); 65 66 /** 67 * drm_atomic_state_init - init new atomic state 68 * @dev: DRM device 69 * @state: atomic state 70 * 71 * Default implementation for filling in a new atomic state. 72 * This should only be used by drivers which are still subclassing 73 * &drm_atomic_state and haven't switched to &drm_private_state yet. 74 */ 75 int 76 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) 77 { 78 kref_init(&state->ref); 79 80 /* TODO legacy paths should maybe do a better job about 81 * setting this appropriately? 82 */ 83 state->allow_modeset = true; 84 85 state->crtcs = kcalloc(dev->mode_config.num_crtc, 86 sizeof(*state->crtcs), GFP_KERNEL); 87 if (!state->crtcs) 88 goto fail; 89 state->planes = kcalloc(dev->mode_config.num_total_plane, 90 sizeof(*state->planes), GFP_KERNEL); 91 if (!state->planes) 92 goto fail; 93 94 state->dev = dev; 95 96 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state); 97 98 return 0; 99 fail: 100 drm_atomic_state_default_release(state); 101 return -ENOMEM; 102 } 103 EXPORT_SYMBOL(drm_atomic_state_init); 104 105 /** 106 * drm_atomic_state_alloc - allocate atomic state 107 * @dev: DRM device 108 * 109 * This allocates an empty atomic state to track updates. 110 */ 111 struct drm_atomic_state * 112 drm_atomic_state_alloc(struct drm_device *dev) 113 { 114 struct drm_mode_config *config = &dev->mode_config; 115 116 if (!config->funcs->atomic_state_alloc) { 117 struct drm_atomic_state *state; 118 119 state = kzalloc(sizeof(*state), GFP_KERNEL); 120 if (!state) 121 return NULL; 122 if (drm_atomic_state_init(dev, state) < 0) { 123 kfree(state); 124 return NULL; 125 } 126 return state; 127 } 128 129 return config->funcs->atomic_state_alloc(dev); 130 } 131 EXPORT_SYMBOL(drm_atomic_state_alloc); 132 133 /** 134 * drm_atomic_state_default_clear - clear base atomic state 135 * @state: atomic state 136 * 137 * Default implementation for clearing atomic state. 138 * This should only be used by drivers which are still subclassing 139 * &drm_atomic_state and haven't switched to &drm_private_state yet. 140 */ 141 void drm_atomic_state_default_clear(struct drm_atomic_state *state) 142 { 143 struct drm_device *dev = state->dev; 144 struct drm_mode_config *config = &dev->mode_config; 145 int i; 146 147 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); 148 149 for (i = 0; i < state->num_connector; i++) { 150 struct drm_connector *connector = state->connectors[i].ptr; 151 152 if (!connector) 153 continue; 154 155 connector->funcs->atomic_destroy_state(connector, 156 state->connectors[i].state); 157 state->connectors[i].ptr = NULL; 158 state->connectors[i].state = NULL; 159 state->connectors[i].old_state = NULL; 160 state->connectors[i].new_state = NULL; 161 drm_connector_put(connector); 162 } 163 164 for (i = 0; i < config->num_crtc; i++) { 165 struct drm_crtc *crtc = state->crtcs[i].ptr; 166 167 if (!crtc) 168 continue; 169 170 crtc->funcs->atomic_destroy_state(crtc, 171 state->crtcs[i].state); 172 173 state->crtcs[i].ptr = NULL; 174 state->crtcs[i].state = NULL; 175 state->crtcs[i].old_state = NULL; 176 state->crtcs[i].new_state = NULL; 177 } 178 179 for (i = 0; i < config->num_total_plane; i++) { 180 struct drm_plane *plane = state->planes[i].ptr; 181 182 if (!plane) 183 continue; 184 185 plane->funcs->atomic_destroy_state(plane, 186 state->planes[i].state); 187 state->planes[i].ptr = NULL; 188 state->planes[i].state = NULL; 189 state->planes[i].old_state = NULL; 190 state->planes[i].new_state = NULL; 191 } 192 193 for (i = 0; i < state->num_private_objs; i++) { 194 struct drm_private_obj *obj = state->private_objs[i].ptr; 195 196 obj->funcs->atomic_destroy_state(obj, 197 state->private_objs[i].state); 198 state->private_objs[i].ptr = NULL; 199 state->private_objs[i].state = NULL; 200 state->private_objs[i].old_state = NULL; 201 state->private_objs[i].new_state = NULL; 202 } 203 state->num_private_objs = 0; 204 205 if (state->fake_commit) { 206 drm_crtc_commit_put(state->fake_commit); 207 state->fake_commit = NULL; 208 } 209 } 210 EXPORT_SYMBOL(drm_atomic_state_default_clear); 211 212 /** 213 * drm_atomic_state_clear - clear state object 214 * @state: atomic state 215 * 216 * When the w/w mutex algorithm detects a deadlock we need to back off and drop 217 * all locks. So someone else could sneak in and change the current modeset 218 * configuration. Which means that all the state assembled in @state is no 219 * longer an atomic update to the current state, but to some arbitrary earlier 220 * state. Which could break assumptions the driver's 221 * &drm_mode_config_funcs.atomic_check likely relies on. 222 * 223 * Hence we must clear all cached state and completely start over, using this 224 * function. 225 */ 226 void drm_atomic_state_clear(struct drm_atomic_state *state) 227 { 228 struct drm_device *dev = state->dev; 229 struct drm_mode_config *config = &dev->mode_config; 230 231 if (config->funcs->atomic_state_clear) 232 config->funcs->atomic_state_clear(state); 233 else 234 drm_atomic_state_default_clear(state); 235 } 236 EXPORT_SYMBOL(drm_atomic_state_clear); 237 238 /** 239 * __drm_atomic_state_free - free all memory for an atomic state 240 * @ref: This atomic state to deallocate 241 * 242 * This frees all memory associated with an atomic state, including all the 243 * per-object state for planes, crtcs and connectors. 244 */ 245 void __drm_atomic_state_free(struct kref *ref) 246 { 247 struct drm_atomic_state *state = container_of(ref, typeof(*state), ref); 248 struct drm_mode_config *config = &state->dev->mode_config; 249 250 drm_atomic_state_clear(state); 251 252 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); 253 254 if (config->funcs->atomic_state_free) { 255 config->funcs->atomic_state_free(state); 256 } else { 257 drm_atomic_state_default_release(state); 258 kfree(state); 259 } 260 } 261 EXPORT_SYMBOL(__drm_atomic_state_free); 262 263 /** 264 * drm_atomic_get_crtc_state - get crtc state 265 * @state: global atomic state object 266 * @crtc: crtc to get state object for 267 * 268 * This function returns the crtc state for the given crtc, allocating it if 269 * needed. It will also grab the relevant crtc lock to make sure that the state 270 * is consistent. 271 * 272 * Returns: 273 * 274 * Either the allocated state or the error code encoded into the pointer. When 275 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 276 * entire atomic sequence must be restarted. All other errors are fatal. 277 */ 278 struct drm_crtc_state * 279 drm_atomic_get_crtc_state(struct drm_atomic_state *state, 280 struct drm_crtc *crtc) 281 { 282 int ret, index = drm_crtc_index(crtc); 283 struct drm_crtc_state *crtc_state; 284 285 WARN_ON(!state->acquire_ctx); 286 287 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); 288 if (crtc_state) 289 return crtc_state; 290 291 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); 292 if (ret) 293 return ERR_PTR(ret); 294 295 crtc_state = crtc->funcs->atomic_duplicate_state(crtc); 296 if (!crtc_state) 297 return ERR_PTR(-ENOMEM); 298 299 state->crtcs[index].state = crtc_state; 300 state->crtcs[index].old_state = crtc->state; 301 state->crtcs[index].new_state = crtc_state; 302 state->crtcs[index].ptr = crtc; 303 crtc_state->state = state; 304 305 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", 306 crtc->base.id, crtc->name, crtc_state, state); 307 308 return crtc_state; 309 } 310 EXPORT_SYMBOL(drm_atomic_get_crtc_state); 311 312 static void set_out_fence_for_crtc(struct drm_atomic_state *state, 313 struct drm_crtc *crtc, s32 __user *fence_ptr) 314 { 315 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; 316 } 317 318 static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, 319 struct drm_crtc *crtc) 320 { 321 s32 __user *fence_ptr; 322 323 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; 324 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; 325 326 return fence_ptr; 327 } 328 329 static int set_out_fence_for_connector(struct drm_atomic_state *state, 330 struct drm_connector *connector, 331 s32 __user *fence_ptr) 332 { 333 unsigned int index = drm_connector_index(connector); 334 335 if (!fence_ptr) 336 return 0; 337 338 if (put_user(-1, fence_ptr)) 339 return -EFAULT; 340 341 state->connectors[index].out_fence_ptr = fence_ptr; 342 343 return 0; 344 } 345 346 static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state, 347 struct drm_connector *connector) 348 { 349 unsigned int index = drm_connector_index(connector); 350 s32 __user *fence_ptr; 351 352 fence_ptr = state->connectors[index].out_fence_ptr; 353 state->connectors[index].out_fence_ptr = NULL; 354 355 return fence_ptr; 356 } 357 358 /** 359 * drm_atomic_set_mode_for_crtc - set mode for CRTC 360 * @state: the CRTC whose incoming state to update 361 * @mode: kernel-internal mode to use for the CRTC, or NULL to disable 362 * 363 * Set a mode (originating from the kernel) on the desired CRTC state and update 364 * the enable property. 365 * 366 * RETURNS: 367 * Zero on success, error code on failure. Cannot return -EDEADLK. 368 */ 369 int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, 370 const struct drm_display_mode *mode) 371 { 372 struct drm_crtc *crtc = state->crtc; 373 struct drm_mode_modeinfo umode; 374 375 /* Early return for no change. */ 376 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0) 377 return 0; 378 379 drm_property_blob_put(state->mode_blob); 380 state->mode_blob = NULL; 381 382 if (mode) { 383 drm_mode_convert_to_umode(&umode, mode); 384 state->mode_blob = 385 drm_property_create_blob(state->crtc->dev, 386 sizeof(umode), 387 &umode); 388 if (IS_ERR(state->mode_blob)) 389 return PTR_ERR(state->mode_blob); 390 391 drm_mode_copy(&state->mode, mode); 392 state->enable = true; 393 DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", 394 mode->name, crtc->base.id, crtc->name, state); 395 } else { 396 memset(&state->mode, 0, sizeof(state->mode)); 397 state->enable = false; 398 DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", 399 crtc->base.id, crtc->name, state); 400 } 401 402 return 0; 403 } 404 EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc); 405 406 /** 407 * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC 408 * @state: the CRTC whose incoming state to update 409 * @blob: pointer to blob property to use for mode 410 * 411 * Set a mode (originating from a blob property) on the desired CRTC state. 412 * This function will take a reference on the blob property for the CRTC state, 413 * and release the reference held on the state's existing mode property, if any 414 * was set. 415 * 416 * RETURNS: 417 * Zero on success, error code on failure. Cannot return -EDEADLK. 418 */ 419 int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, 420 struct drm_property_blob *blob) 421 { 422 struct drm_crtc *crtc = state->crtc; 423 424 if (blob == state->mode_blob) 425 return 0; 426 427 drm_property_blob_put(state->mode_blob); 428 state->mode_blob = NULL; 429 430 memset(&state->mode, 0, sizeof(state->mode)); 431 432 if (blob) { 433 int ret; 434 435 if (blob->length != sizeof(struct drm_mode_modeinfo)) { 436 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] bad mode blob length: %zu\n", 437 crtc->base.id, crtc->name, 438 blob->length); 439 return -EINVAL; 440 } 441 442 ret = drm_mode_convert_umode(crtc->dev, 443 &state->mode, blob->data); 444 if (ret) { 445 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] invalid mode (ret=%d, status=%s):\n", 446 crtc->base.id, crtc->name, 447 ret, drm_get_mode_status_name(state->mode.status)); 448 drm_mode_debug_printmodeline(&state->mode); 449 return -EINVAL; 450 } 451 452 state->mode_blob = drm_property_blob_get(blob); 453 state->enable = true; 454 DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n", 455 state->mode.name, crtc->base.id, crtc->name, 456 state); 457 } else { 458 state->enable = false; 459 DRM_DEBUG_ATOMIC("Set [NOMODE] for [CRTC:%d:%s] state %p\n", 460 crtc->base.id, crtc->name, state); 461 } 462 463 return 0; 464 } 465 EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); 466 467 /** 468 * drm_atomic_replace_property_blob_from_id - lookup the new blob and replace the old one with it 469 * @dev: DRM device 470 * @blob: a pointer to the member blob to be replaced 471 * @blob_id: ID of the new blob 472 * @expected_size: total expected size of the blob data (in bytes) 473 * @expected_elem_size: expected element size of the blob data (in bytes) 474 * @replaced: did the blob get replaced? 475 * 476 * Replace @blob with another blob with the ID @blob_id. If @blob_id is zero 477 * @blob becomes NULL. 478 * 479 * If @expected_size is positive the new blob length is expected to be equal 480 * to @expected_size bytes. If @expected_elem_size is positive the new blob 481 * length is expected to be a multiple of @expected_elem_size bytes. Otherwise 482 * an error is returned. 483 * 484 * @replaced will indicate to the caller whether the blob was replaced or not. 485 * If the old and new blobs were in fact the same blob @replaced will be false 486 * otherwise it will be true. 487 * 488 * RETURNS: 489 * Zero on success, error code on failure. 490 */ 491 static int 492 drm_atomic_replace_property_blob_from_id(struct drm_device *dev, 493 struct drm_property_blob **blob, 494 uint64_t blob_id, 495 ssize_t expected_size, 496 ssize_t expected_elem_size, 497 bool *replaced) 498 { 499 struct drm_property_blob *new_blob = NULL; 500 501 if (blob_id != 0) { 502 new_blob = drm_property_lookup_blob(dev, blob_id); 503 if (new_blob == NULL) 504 return -EINVAL; 505 506 if (expected_size > 0 && 507 new_blob->length != expected_size) { 508 drm_property_blob_put(new_blob); 509 return -EINVAL; 510 } 511 if (expected_elem_size > 0 && 512 new_blob->length % expected_elem_size != 0) { 513 drm_property_blob_put(new_blob); 514 return -EINVAL; 515 } 516 } 517 518 *replaced |= drm_property_replace_blob(blob, new_blob); 519 drm_property_blob_put(new_blob); 520 521 return 0; 522 } 523 524 /** 525 * drm_atomic_crtc_set_property - set property on CRTC 526 * @crtc: the drm CRTC to set a property on 527 * @state: the state object to update with the new property value 528 * @property: the property to set 529 * @val: the new property value 530 * 531 * This function handles generic/core properties and calls out to driver's 532 * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure 533 * consistent behavior you must call this function rather than the driver hook 534 * directly. 535 * 536 * RETURNS: 537 * Zero on success, error code on failure 538 */ 539 int drm_atomic_crtc_set_property(struct drm_crtc *crtc, 540 struct drm_crtc_state *state, struct drm_property *property, 541 uint64_t val) 542 { 543 struct drm_device *dev = crtc->dev; 544 struct drm_mode_config *config = &dev->mode_config; 545 bool replaced = false; 546 int ret; 547 548 if (property == config->prop_active) 549 state->active = val; 550 else if (property == config->prop_mode_id) { 551 struct drm_property_blob *mode = 552 drm_property_lookup_blob(dev, val); 553 ret = drm_atomic_set_mode_prop_for_crtc(state, mode); 554 drm_property_blob_put(mode); 555 return ret; 556 } else if (property == config->degamma_lut_property) { 557 ret = drm_atomic_replace_property_blob_from_id(dev, 558 &state->degamma_lut, 559 val, 560 -1, sizeof(struct drm_color_lut), 561 &replaced); 562 state->color_mgmt_changed |= replaced; 563 return ret; 564 } else if (property == config->ctm_property) { 565 ret = drm_atomic_replace_property_blob_from_id(dev, 566 &state->ctm, 567 val, 568 sizeof(struct drm_color_ctm), -1, 569 &replaced); 570 state->color_mgmt_changed |= replaced; 571 return ret; 572 } else if (property == config->gamma_lut_property) { 573 ret = drm_atomic_replace_property_blob_from_id(dev, 574 &state->gamma_lut, 575 val, 576 -1, sizeof(struct drm_color_lut), 577 &replaced); 578 state->color_mgmt_changed |= replaced; 579 return ret; 580 } else if (property == config->prop_out_fence_ptr) { 581 s32 __user *fence_ptr = u64_to_user_ptr(val); 582 583 if (!fence_ptr) 584 return 0; 585 586 if (put_user(-1, fence_ptr)) 587 return -EFAULT; 588 589 set_out_fence_for_crtc(state->state, crtc, fence_ptr); 590 } else if (crtc->funcs->atomic_set_property) { 591 return crtc->funcs->atomic_set_property(crtc, state, property, val); 592 } else { 593 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] unknown property [PROP:%d:%s]]\n", 594 crtc->base.id, crtc->name, 595 property->base.id, property->name); 596 return -EINVAL; 597 } 598 599 return 0; 600 } 601 EXPORT_SYMBOL(drm_atomic_crtc_set_property); 602 603 /** 604 * drm_atomic_crtc_get_property - get property value from CRTC state 605 * @crtc: the drm CRTC to set a property on 606 * @state: the state object to get the property value from 607 * @property: the property to set 608 * @val: return location for the property value 609 * 610 * This function handles generic/core properties and calls out to driver's 611 * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure 612 * consistent behavior you must call this function rather than the driver hook 613 * directly. 614 * 615 * RETURNS: 616 * Zero on success, error code on failure 617 */ 618 static int 619 drm_atomic_crtc_get_property(struct drm_crtc *crtc, 620 const struct drm_crtc_state *state, 621 struct drm_property *property, uint64_t *val) 622 { 623 struct drm_device *dev = crtc->dev; 624 struct drm_mode_config *config = &dev->mode_config; 625 626 if (property == config->prop_active) 627 *val = state->active; 628 else if (property == config->prop_mode_id) 629 *val = (state->mode_blob) ? state->mode_blob->base.id : 0; 630 else if (property == config->degamma_lut_property) 631 *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; 632 else if (property == config->ctm_property) 633 *val = (state->ctm) ? state->ctm->base.id : 0; 634 else if (property == config->gamma_lut_property) 635 *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; 636 else if (property == config->prop_out_fence_ptr) 637 *val = 0; 638 else if (crtc->funcs->atomic_get_property) 639 return crtc->funcs->atomic_get_property(crtc, state, property, val); 640 else 641 return -EINVAL; 642 643 return 0; 644 } 645 646 /** 647 * drm_atomic_crtc_check - check crtc state 648 * @crtc: crtc to check 649 * @state: crtc state to check 650 * 651 * Provides core sanity checks for crtc state. 652 * 653 * RETURNS: 654 * Zero on success, error code on failure 655 */ 656 static int drm_atomic_crtc_check(struct drm_crtc *crtc, 657 struct drm_crtc_state *state) 658 { 659 /* NOTE: we explicitly don't enforce constraints such as primary 660 * layer covering entire screen, since that is something we want 661 * to allow (on hw that supports it). For hw that does not, it 662 * should be checked in driver's crtc->atomic_check() vfunc. 663 * 664 * TODO: Add generic modeset state checks once we support those. 665 */ 666 667 if (state->active && !state->enable) { 668 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", 669 crtc->base.id, crtc->name); 670 return -EINVAL; 671 } 672 673 /* The state->enable vs. state->mode_blob checks can be WARN_ON, 674 * as this is a kernel-internal detail that userspace should never 675 * be able to trigger. */ 676 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 677 WARN_ON(state->enable && !state->mode_blob)) { 678 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", 679 crtc->base.id, crtc->name); 680 return -EINVAL; 681 } 682 683 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 684 WARN_ON(!state->enable && state->mode_blob)) { 685 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", 686 crtc->base.id, crtc->name); 687 return -EINVAL; 688 } 689 690 /* 691 * Reject event generation for when a CRTC is off and stays off. 692 * It wouldn't be hard to implement this, but userspace has a track 693 * record of happily burning through 100% cpu (or worse, crash) when the 694 * display pipe is suspended. To avoid all that fun just reject updates 695 * that ask for events since likely that indicates a bug in the 696 * compositor's drawing loop. This is consistent with the vblank IOCTL 697 * and legacy page_flip IOCTL which also reject service on a disabled 698 * pipe. 699 */ 700 if (state->event && !state->active && !crtc->state->active) { 701 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n", 702 crtc->base.id, crtc->name); 703 return -EINVAL; 704 } 705 706 return 0; 707 } 708 709 static void drm_atomic_crtc_print_state(struct drm_printer *p, 710 const struct drm_crtc_state *state) 711 { 712 struct drm_crtc *crtc = state->crtc; 713 714 drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name); 715 drm_printf(p, "\tenable=%d\n", state->enable); 716 drm_printf(p, "\tactive=%d\n", state->active); 717 drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed); 718 drm_printf(p, "\tmode_changed=%d\n", state->mode_changed); 719 drm_printf(p, "\tactive_changed=%d\n", state->active_changed); 720 drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed); 721 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); 722 drm_printf(p, "\tplane_mask=%x\n", state->plane_mask); 723 drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask); 724 drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask); 725 drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode)); 726 727 if (crtc->funcs->atomic_print_state) 728 crtc->funcs->atomic_print_state(p, state); 729 } 730 731 /** 732 * drm_atomic_connector_check - check connector state 733 * @connector: connector to check 734 * @state: connector state to check 735 * 736 * Provides core sanity checks for connector state. 737 * 738 * RETURNS: 739 * Zero on success, error code on failure 740 */ 741 static int drm_atomic_connector_check(struct drm_connector *connector, 742 struct drm_connector_state *state) 743 { 744 struct drm_crtc_state *crtc_state; 745 struct drm_writeback_job *writeback_job = state->writeback_job; 746 747 if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job) 748 return 0; 749 750 if (writeback_job->fb && !state->crtc) { 751 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n", 752 connector->base.id, connector->name); 753 return -EINVAL; 754 } 755 756 if (state->crtc) 757 crtc_state = drm_atomic_get_existing_crtc_state(state->state, 758 state->crtc); 759 760 if (writeback_job->fb && !crtc_state->active) { 761 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n", 762 connector->base.id, connector->name, 763 state->crtc->base.id); 764 return -EINVAL; 765 } 766 767 if (writeback_job->out_fence && !writeback_job->fb) { 768 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", 769 connector->base.id, connector->name); 770 return -EINVAL; 771 } 772 773 return 0; 774 } 775 776 /** 777 * drm_atomic_get_plane_state - get plane state 778 * @state: global atomic state object 779 * @plane: plane to get state object for 780 * 781 * This function returns the plane state for the given plane, allocating it if 782 * needed. It will also grab the relevant plane lock to make sure that the state 783 * is consistent. 784 * 785 * Returns: 786 * 787 * Either the allocated state or the error code encoded into the pointer. When 788 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 789 * entire atomic sequence must be restarted. All other errors are fatal. 790 */ 791 struct drm_plane_state * 792 drm_atomic_get_plane_state(struct drm_atomic_state *state, 793 struct drm_plane *plane) 794 { 795 int ret, index = drm_plane_index(plane); 796 struct drm_plane_state *plane_state; 797 798 WARN_ON(!state->acquire_ctx); 799 800 /* the legacy pointers should never be set */ 801 WARN_ON(plane->fb); 802 WARN_ON(plane->old_fb); 803 WARN_ON(plane->crtc); 804 805 plane_state = drm_atomic_get_existing_plane_state(state, plane); 806 if (plane_state) 807 return plane_state; 808 809 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); 810 if (ret) 811 return ERR_PTR(ret); 812 813 plane_state = plane->funcs->atomic_duplicate_state(plane); 814 if (!plane_state) 815 return ERR_PTR(-ENOMEM); 816 817 state->planes[index].state = plane_state; 818 state->planes[index].ptr = plane; 819 state->planes[index].old_state = plane->state; 820 state->planes[index].new_state = plane_state; 821 plane_state->state = state; 822 823 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", 824 plane->base.id, plane->name, plane_state, state); 825 826 if (plane_state->crtc) { 827 struct drm_crtc_state *crtc_state; 828 829 crtc_state = drm_atomic_get_crtc_state(state, 830 plane_state->crtc); 831 if (IS_ERR(crtc_state)) 832 return ERR_CAST(crtc_state); 833 } 834 835 return plane_state; 836 } 837 EXPORT_SYMBOL(drm_atomic_get_plane_state); 838 839 /** 840 * drm_atomic_plane_set_property - set property on plane 841 * @plane: the drm plane to set a property on 842 * @state: the state object to update with the new property value 843 * @property: the property to set 844 * @val: the new property value 845 * 846 * This function handles generic/core properties and calls out to driver's 847 * &drm_plane_funcs.atomic_set_property for driver properties. To ensure 848 * consistent behavior you must call this function rather than the driver hook 849 * directly. 850 * 851 * RETURNS: 852 * Zero on success, error code on failure 853 */ 854 static int drm_atomic_plane_set_property(struct drm_plane *plane, 855 struct drm_plane_state *state, struct drm_property *property, 856 uint64_t val) 857 { 858 struct drm_device *dev = plane->dev; 859 struct drm_mode_config *config = &dev->mode_config; 860 861 if (property == config->prop_fb_id) { 862 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); 863 drm_atomic_set_fb_for_plane(state, fb); 864 if (fb) 865 drm_framebuffer_put(fb); 866 } else if (property == config->prop_in_fence_fd) { 867 if (state->fence) 868 return -EINVAL; 869 870 if (U642I64(val) == -1) 871 return 0; 872 873 state->fence = sync_file_get_fence(val); 874 if (!state->fence) 875 return -EINVAL; 876 877 } else if (property == config->prop_crtc_id) { 878 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); 879 return drm_atomic_set_crtc_for_plane(state, crtc); 880 } else if (property == config->prop_crtc_x) { 881 state->crtc_x = U642I64(val); 882 } else if (property == config->prop_crtc_y) { 883 state->crtc_y = U642I64(val); 884 } else if (property == config->prop_crtc_w) { 885 state->crtc_w = val; 886 } else if (property == config->prop_crtc_h) { 887 state->crtc_h = val; 888 } else if (property == config->prop_src_x) { 889 state->src_x = val; 890 } else if (property == config->prop_src_y) { 891 state->src_y = val; 892 } else if (property == config->prop_src_w) { 893 state->src_w = val; 894 } else if (property == config->prop_src_h) { 895 state->src_h = val; 896 } else if (property == plane->alpha_property) { 897 state->alpha = val; 898 } else if (property == plane->blend_mode_property) { 899 state->pixel_blend_mode = val; 900 } else if (property == plane->rotation_property) { 901 if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) { 902 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n", 903 plane->base.id, plane->name, val); 904 return -EINVAL; 905 } 906 state->rotation = val; 907 } else if (property == plane->zpos_property) { 908 state->zpos = val; 909 } else if (property == plane->color_encoding_property) { 910 state->color_encoding = val; 911 } else if (property == plane->color_range_property) { 912 state->color_range = val; 913 } else if (plane->funcs->atomic_set_property) { 914 return plane->funcs->atomic_set_property(plane, state, 915 property, val); 916 } else { 917 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] unknown property [PROP:%d:%s]]\n", 918 plane->base.id, plane->name, 919 property->base.id, property->name); 920 return -EINVAL; 921 } 922 923 return 0; 924 } 925 926 /** 927 * drm_atomic_plane_get_property - get property value from plane state 928 * @plane: the drm plane to set a property on 929 * @state: the state object to get the property value from 930 * @property: the property to set 931 * @val: return location for the property value 932 * 933 * This function handles generic/core properties and calls out to driver's 934 * &drm_plane_funcs.atomic_get_property for driver properties. To ensure 935 * consistent behavior you must call this function rather than the driver hook 936 * directly. 937 * 938 * RETURNS: 939 * Zero on success, error code on failure 940 */ 941 static int 942 drm_atomic_plane_get_property(struct drm_plane *plane, 943 const struct drm_plane_state *state, 944 struct drm_property *property, uint64_t *val) 945 { 946 struct drm_device *dev = plane->dev; 947 struct drm_mode_config *config = &dev->mode_config; 948 949 if (property == config->prop_fb_id) { 950 *val = (state->fb) ? state->fb->base.id : 0; 951 } else if (property == config->prop_in_fence_fd) { 952 *val = -1; 953 } else if (property == config->prop_crtc_id) { 954 *val = (state->crtc) ? state->crtc->base.id : 0; 955 } else if (property == config->prop_crtc_x) { 956 *val = I642U64(state->crtc_x); 957 } else if (property == config->prop_crtc_y) { 958 *val = I642U64(state->crtc_y); 959 } else if (property == config->prop_crtc_w) { 960 *val = state->crtc_w; 961 } else if (property == config->prop_crtc_h) { 962 *val = state->crtc_h; 963 } else if (property == config->prop_src_x) { 964 *val = state->src_x; 965 } else if (property == config->prop_src_y) { 966 *val = state->src_y; 967 } else if (property == config->prop_src_w) { 968 *val = state->src_w; 969 } else if (property == config->prop_src_h) { 970 *val = state->src_h; 971 } else if (property == plane->alpha_property) { 972 *val = state->alpha; 973 } else if (property == plane->blend_mode_property) { 974 *val = state->pixel_blend_mode; 975 } else if (property == plane->rotation_property) { 976 *val = state->rotation; 977 } else if (property == plane->zpos_property) { 978 *val = state->zpos; 979 } else if (property == plane->color_encoding_property) { 980 *val = state->color_encoding; 981 } else if (property == plane->color_range_property) { 982 *val = state->color_range; 983 } else if (plane->funcs->atomic_get_property) { 984 return plane->funcs->atomic_get_property(plane, state, property, val); 985 } else { 986 return -EINVAL; 987 } 988 989 return 0; 990 } 991 992 static bool 993 plane_switching_crtc(struct drm_atomic_state *state, 994 struct drm_plane *plane, 995 struct drm_plane_state *plane_state) 996 { 997 if (!plane->state->crtc || !plane_state->crtc) 998 return false; 999 1000 if (plane->state->crtc == plane_state->crtc) 1001 return false; 1002 1003 /* This could be refined, but currently there's no helper or driver code 1004 * to implement direct switching of active planes nor userspace to take 1005 * advantage of more direct plane switching without the intermediate 1006 * full OFF state. 1007 */ 1008 return true; 1009 } 1010 1011 /** 1012 * drm_atomic_plane_check - check plane state 1013 * @plane: plane to check 1014 * @state: plane state to check 1015 * 1016 * Provides core sanity checks for plane state. 1017 * 1018 * RETURNS: 1019 * Zero on success, error code on failure 1020 */ 1021 static int drm_atomic_plane_check(struct drm_plane *plane, 1022 struct drm_plane_state *state) 1023 { 1024 unsigned int fb_width, fb_height; 1025 int ret; 1026 1027 /* either *both* CRTC and FB must be set, or neither */ 1028 if (state->crtc && !state->fb) { 1029 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n", 1030 plane->base.id, plane->name); 1031 return -EINVAL; 1032 } else if (state->fb && !state->crtc) { 1033 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n", 1034 plane->base.id, plane->name); 1035 return -EINVAL; 1036 } 1037 1038 /* if disabled, we don't care about the rest of the state: */ 1039 if (!state->crtc) 1040 return 0; 1041 1042 /* Check whether this plane is usable on this CRTC */ 1043 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) { 1044 DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", 1045 state->crtc->base.id, state->crtc->name, 1046 plane->base.id, plane->name); 1047 return -EINVAL; 1048 } 1049 1050 /* Check whether this plane supports the fb pixel format. */ 1051 ret = drm_plane_check_pixel_format(plane, state->fb->format->format, 1052 state->fb->modifier); 1053 if (ret) { 1054 struct drm_format_name_buf format_name; 1055 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n", 1056 plane->base.id, plane->name, 1057 drm_get_format_name(state->fb->format->format, 1058 &format_name), 1059 state->fb->modifier); 1060 return ret; 1061 } 1062 1063 /* Give drivers some help against integer overflows */ 1064 if (state->crtc_w > INT_MAX || 1065 state->crtc_x > INT_MAX - (int32_t) state->crtc_w || 1066 state->crtc_h > INT_MAX || 1067 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) { 1068 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", 1069 plane->base.id, plane->name, 1070 state->crtc_w, state->crtc_h, 1071 state->crtc_x, state->crtc_y); 1072 return -ERANGE; 1073 } 1074 1075 fb_width = state->fb->width << 16; 1076 fb_height = state->fb->height << 16; 1077 1078 /* Make sure source coordinates are inside the fb. */ 1079 if (state->src_w > fb_width || 1080 state->src_x > fb_width - state->src_w || 1081 state->src_h > fb_height || 1082 state->src_y > fb_height - state->src_h) { 1083 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates " 1084 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", 1085 plane->base.id, plane->name, 1086 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10, 1087 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10, 1088 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10, 1089 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10, 1090 state->fb->width, state->fb->height); 1091 return -ENOSPC; 1092 } 1093 1094 if (plane_switching_crtc(state->state, plane, state)) { 1095 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", 1096 plane->base.id, plane->name); 1097 return -EINVAL; 1098 } 1099 1100 return 0; 1101 } 1102 1103 static void drm_atomic_plane_print_state(struct drm_printer *p, 1104 const struct drm_plane_state *state) 1105 { 1106 struct drm_plane *plane = state->plane; 1107 struct drm_rect src = drm_plane_state_src(state); 1108 struct drm_rect dest = drm_plane_state_dest(state); 1109 1110 drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); 1111 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 1112 drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); 1113 if (state->fb) 1114 drm_framebuffer_print_info(p, 2, state->fb); 1115 drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); 1116 drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); 1117 drm_printf(p, "\trotation=%x\n", state->rotation); 1118 drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos); 1119 drm_printf(p, "\tcolor-encoding=%s\n", 1120 drm_get_color_encoding_name(state->color_encoding)); 1121 drm_printf(p, "\tcolor-range=%s\n", 1122 drm_get_color_range_name(state->color_range)); 1123 1124 if (plane->funcs->atomic_print_state) 1125 plane->funcs->atomic_print_state(p, state); 1126 } 1127 1128 /** 1129 * DOC: handling driver private state 1130 * 1131 * Very often the DRM objects exposed to userspace in the atomic modeset api 1132 * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the 1133 * underlying hardware. Especially for any kind of shared resources (e.g. shared 1134 * clocks, scaler units, bandwidth and fifo limits shared among a group of 1135 * planes or CRTCs, and so on) it makes sense to model these as independent 1136 * objects. Drivers then need to do similar state tracking and commit ordering for 1137 * such private (since not exposed to userpace) objects as the atomic core and 1138 * helpers already provide for connectors, planes and CRTCs. 1139 * 1140 * To make this easier on drivers the atomic core provides some support to track 1141 * driver private state objects using struct &drm_private_obj, with the 1142 * associated state struct &drm_private_state. 1143 * 1144 * Similar to userspace-exposed objects, private state structures can be 1145 * acquired by calling drm_atomic_get_private_obj_state(). Since this function 1146 * does not take care of locking, drivers should wrap it for each type of 1147 * private state object they have with the required call to drm_modeset_lock() 1148 * for the corresponding &drm_modeset_lock. 1149 * 1150 * All private state structures contained in a &drm_atomic_state update can be 1151 * iterated using for_each_oldnew_private_obj_in_state(), 1152 * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state(). 1153 * Drivers are recommended to wrap these for each type of driver private state 1154 * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at 1155 * least if they want to iterate over all objects of a given type. 1156 * 1157 * An earlier way to handle driver private state was by subclassing struct 1158 * &drm_atomic_state. But since that encourages non-standard ways to implement 1159 * the check/commit split atomic requires (by using e.g. "check and rollback or 1160 * commit instead" of "duplicate state, check, then either commit or release 1161 * duplicated state) it is deprecated in favour of using &drm_private_state. 1162 */ 1163 1164 /** 1165 * drm_atomic_private_obj_init - initialize private object 1166 * @obj: private object 1167 * @state: initial private object state 1168 * @funcs: pointer to the struct of function pointers that identify the object 1169 * type 1170 * 1171 * Initialize the private object, which can be embedded into any 1172 * driver private object that needs its own atomic state. 1173 */ 1174 void 1175 drm_atomic_private_obj_init(struct drm_private_obj *obj, 1176 struct drm_private_state *state, 1177 const struct drm_private_state_funcs *funcs) 1178 { 1179 memset(obj, 0, sizeof(*obj)); 1180 1181 obj->state = state; 1182 obj->funcs = funcs; 1183 } 1184 EXPORT_SYMBOL(drm_atomic_private_obj_init); 1185 1186 /** 1187 * drm_atomic_private_obj_fini - finalize private object 1188 * @obj: private object 1189 * 1190 * Finalize the private object. 1191 */ 1192 void 1193 drm_atomic_private_obj_fini(struct drm_private_obj *obj) 1194 { 1195 obj->funcs->atomic_destroy_state(obj, obj->state); 1196 } 1197 EXPORT_SYMBOL(drm_atomic_private_obj_fini); 1198 1199 /** 1200 * drm_atomic_get_private_obj_state - get private object state 1201 * @state: global atomic state 1202 * @obj: private object to get the state for 1203 * 1204 * This function returns the private object state for the given private object, 1205 * allocating the state if needed. It does not grab any locks as the caller is 1206 * expected to care of any required locking. 1207 * 1208 * RETURNS: 1209 * 1210 * Either the allocated state or the error code encoded into a pointer. 1211 */ 1212 struct drm_private_state * 1213 drm_atomic_get_private_obj_state(struct drm_atomic_state *state, 1214 struct drm_private_obj *obj) 1215 { 1216 int index, num_objs, i; 1217 size_t size; 1218 struct __drm_private_objs_state *arr; 1219 struct drm_private_state *obj_state; 1220 1221 for (i = 0; i < state->num_private_objs; i++) 1222 if (obj == state->private_objs[i].ptr) 1223 return state->private_objs[i].state; 1224 1225 num_objs = state->num_private_objs + 1; 1226 size = sizeof(*state->private_objs) * num_objs; 1227 arr = krealloc(state->private_objs, size, GFP_KERNEL); 1228 if (!arr) 1229 return ERR_PTR(-ENOMEM); 1230 1231 state->private_objs = arr; 1232 index = state->num_private_objs; 1233 memset(&state->private_objs[index], 0, sizeof(*state->private_objs)); 1234 1235 obj_state = obj->funcs->atomic_duplicate_state(obj); 1236 if (!obj_state) 1237 return ERR_PTR(-ENOMEM); 1238 1239 state->private_objs[index].state = obj_state; 1240 state->private_objs[index].old_state = obj->state; 1241 state->private_objs[index].new_state = obj_state; 1242 state->private_objs[index].ptr = obj; 1243 obj_state->state = state; 1244 1245 state->num_private_objs = num_objs; 1246 1247 DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n", 1248 obj, obj_state, state); 1249 1250 return obj_state; 1251 } 1252 EXPORT_SYMBOL(drm_atomic_get_private_obj_state); 1253 1254 /** 1255 * drm_atomic_get_connector_state - get connector state 1256 * @state: global atomic state object 1257 * @connector: connector to get state object for 1258 * 1259 * This function returns the connector state for the given connector, 1260 * allocating it if needed. It will also grab the relevant connector lock to 1261 * make sure that the state is consistent. 1262 * 1263 * Returns: 1264 * 1265 * Either the allocated state or the error code encoded into the pointer. When 1266 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 1267 * entire atomic sequence must be restarted. All other errors are fatal. 1268 */ 1269 struct drm_connector_state * 1270 drm_atomic_get_connector_state(struct drm_atomic_state *state, 1271 struct drm_connector *connector) 1272 { 1273 int ret, index; 1274 struct drm_mode_config *config = &connector->dev->mode_config; 1275 struct drm_connector_state *connector_state; 1276 1277 WARN_ON(!state->acquire_ctx); 1278 1279 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1280 if (ret) 1281 return ERR_PTR(ret); 1282 1283 index = drm_connector_index(connector); 1284 1285 if (index >= state->num_connector) { 1286 struct __drm_connnectors_state *c; 1287 int alloc = max(index + 1, config->num_connector); 1288 1289 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); 1290 if (!c) 1291 return ERR_PTR(-ENOMEM); 1292 1293 state->connectors = c; 1294 memset(&state->connectors[state->num_connector], 0, 1295 sizeof(*state->connectors) * (alloc - state->num_connector)); 1296 1297 state->num_connector = alloc; 1298 } 1299 1300 if (state->connectors[index].state) 1301 return state->connectors[index].state; 1302 1303 connector_state = connector->funcs->atomic_duplicate_state(connector); 1304 if (!connector_state) 1305 return ERR_PTR(-ENOMEM); 1306 1307 drm_connector_get(connector); 1308 state->connectors[index].state = connector_state; 1309 state->connectors[index].old_state = connector->state; 1310 state->connectors[index].new_state = connector_state; 1311 state->connectors[index].ptr = connector; 1312 connector_state->state = state; 1313 1314 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n", 1315 connector->base.id, connector->name, 1316 connector_state, state); 1317 1318 if (connector_state->crtc) { 1319 struct drm_crtc_state *crtc_state; 1320 1321 crtc_state = drm_atomic_get_crtc_state(state, 1322 connector_state->crtc); 1323 if (IS_ERR(crtc_state)) 1324 return ERR_CAST(crtc_state); 1325 } 1326 1327 return connector_state; 1328 } 1329 EXPORT_SYMBOL(drm_atomic_get_connector_state); 1330 1331 /** 1332 * drm_atomic_connector_set_property - set property on connector. 1333 * @connector: the drm connector to set a property on 1334 * @state: the state object to update with the new property value 1335 * @property: the property to set 1336 * @val: the new property value 1337 * 1338 * This function handles generic/core properties and calls out to driver's 1339 * &drm_connector_funcs.atomic_set_property for driver properties. To ensure 1340 * consistent behavior you must call this function rather than the driver hook 1341 * directly. 1342 * 1343 * RETURNS: 1344 * Zero on success, error code on failure 1345 */ 1346 static int drm_atomic_connector_set_property(struct drm_connector *connector, 1347 struct drm_connector_state *state, struct drm_property *property, 1348 uint64_t val) 1349 { 1350 struct drm_device *dev = connector->dev; 1351 struct drm_mode_config *config = &dev->mode_config; 1352 1353 if (property == config->prop_crtc_id) { 1354 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); 1355 return drm_atomic_set_crtc_for_connector(state, crtc); 1356 } else if (property == config->dpms_property) { 1357 /* setting DPMS property requires special handling, which 1358 * is done in legacy setprop path for us. Disallow (for 1359 * now?) atomic writes to DPMS property: 1360 */ 1361 return -EINVAL; 1362 } else if (property == config->tv_select_subconnector_property) { 1363 state->tv.subconnector = val; 1364 } else if (property == config->tv_left_margin_property) { 1365 state->tv.margins.left = val; 1366 } else if (property == config->tv_right_margin_property) { 1367 state->tv.margins.right = val; 1368 } else if (property == config->tv_top_margin_property) { 1369 state->tv.margins.top = val; 1370 } else if (property == config->tv_bottom_margin_property) { 1371 state->tv.margins.bottom = val; 1372 } else if (property == config->tv_mode_property) { 1373 state->tv.mode = val; 1374 } else if (property == config->tv_brightness_property) { 1375 state->tv.brightness = val; 1376 } else if (property == config->tv_contrast_property) { 1377 state->tv.contrast = val; 1378 } else if (property == config->tv_flicker_reduction_property) { 1379 state->tv.flicker_reduction = val; 1380 } else if (property == config->tv_overscan_property) { 1381 state->tv.overscan = val; 1382 } else if (property == config->tv_saturation_property) { 1383 state->tv.saturation = val; 1384 } else if (property == config->tv_hue_property) { 1385 state->tv.hue = val; 1386 } else if (property == config->link_status_property) { 1387 /* Never downgrade from GOOD to BAD on userspace's request here, 1388 * only hw issues can do that. 1389 * 1390 * For an atomic property the userspace doesn't need to be able 1391 * to understand all the properties, but needs to be able to 1392 * restore the state it wants on VT switch. So if the userspace 1393 * tries to change the link_status from GOOD to BAD, driver 1394 * silently rejects it and returns a 0. This prevents userspace 1395 * from accidently breaking the display when it restores the 1396 * state. 1397 */ 1398 if (state->link_status != DRM_LINK_STATUS_GOOD) 1399 state->link_status = val; 1400 } else if (property == config->aspect_ratio_property) { 1401 state->picture_aspect_ratio = val; 1402 } else if (property == config->content_type_property) { 1403 state->content_type = val; 1404 } else if (property == connector->scaling_mode_property) { 1405 state->scaling_mode = val; 1406 } else if (property == connector->content_protection_property) { 1407 if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1408 DRM_DEBUG_KMS("only drivers can set CP Enabled\n"); 1409 return -EINVAL; 1410 } 1411 state->content_protection = val; 1412 } else if (property == config->writeback_fb_id_property) { 1413 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); 1414 int ret = drm_atomic_set_writeback_fb_for_connector(state, fb); 1415 if (fb) 1416 drm_framebuffer_put(fb); 1417 return ret; 1418 } else if (property == config->writeback_out_fence_ptr_property) { 1419 s32 __user *fence_ptr = u64_to_user_ptr(val); 1420 1421 return set_out_fence_for_connector(state->state, connector, 1422 fence_ptr); 1423 } else if (connector->funcs->atomic_set_property) { 1424 return connector->funcs->atomic_set_property(connector, 1425 state, property, val); 1426 } else { 1427 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]]\n", 1428 connector->base.id, connector->name, 1429 property->base.id, property->name); 1430 return -EINVAL; 1431 } 1432 1433 return 0; 1434 } 1435 1436 static void drm_atomic_connector_print_state(struct drm_printer *p, 1437 const struct drm_connector_state *state) 1438 { 1439 struct drm_connector *connector = state->connector; 1440 1441 drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); 1442 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 1443 1444 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 1445 if (state->writeback_job && state->writeback_job->fb) 1446 drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id); 1447 1448 if (connector->funcs->atomic_print_state) 1449 connector->funcs->atomic_print_state(p, state); 1450 } 1451 1452 /** 1453 * drm_atomic_connector_get_property - get property value from connector state 1454 * @connector: the drm connector to set a property on 1455 * @state: the state object to get the property value from 1456 * @property: the property to set 1457 * @val: return location for the property value 1458 * 1459 * This function handles generic/core properties and calls out to driver's 1460 * &drm_connector_funcs.atomic_get_property for driver properties. To ensure 1461 * consistent behavior you must call this function rather than the driver hook 1462 * directly. 1463 * 1464 * RETURNS: 1465 * Zero on success, error code on failure 1466 */ 1467 static int 1468 drm_atomic_connector_get_property(struct drm_connector *connector, 1469 const struct drm_connector_state *state, 1470 struct drm_property *property, uint64_t *val) 1471 { 1472 struct drm_device *dev = connector->dev; 1473 struct drm_mode_config *config = &dev->mode_config; 1474 1475 if (property == config->prop_crtc_id) { 1476 *val = (state->crtc) ? state->crtc->base.id : 0; 1477 } else if (property == config->dpms_property) { 1478 *val = connector->dpms; 1479 } else if (property == config->tv_select_subconnector_property) { 1480 *val = state->tv.subconnector; 1481 } else if (property == config->tv_left_margin_property) { 1482 *val = state->tv.margins.left; 1483 } else if (property == config->tv_right_margin_property) { 1484 *val = state->tv.margins.right; 1485 } else if (property == config->tv_top_margin_property) { 1486 *val = state->tv.margins.top; 1487 } else if (property == config->tv_bottom_margin_property) { 1488 *val = state->tv.margins.bottom; 1489 } else if (property == config->tv_mode_property) { 1490 *val = state->tv.mode; 1491 } else if (property == config->tv_brightness_property) { 1492 *val = state->tv.brightness; 1493 } else if (property == config->tv_contrast_property) { 1494 *val = state->tv.contrast; 1495 } else if (property == config->tv_flicker_reduction_property) { 1496 *val = state->tv.flicker_reduction; 1497 } else if (property == config->tv_overscan_property) { 1498 *val = state->tv.overscan; 1499 } else if (property == config->tv_saturation_property) { 1500 *val = state->tv.saturation; 1501 } else if (property == config->tv_hue_property) { 1502 *val = state->tv.hue; 1503 } else if (property == config->link_status_property) { 1504 *val = state->link_status; 1505 } else if (property == config->aspect_ratio_property) { 1506 *val = state->picture_aspect_ratio; 1507 } else if (property == config->content_type_property) { 1508 *val = state->content_type; 1509 } else if (property == connector->scaling_mode_property) { 1510 *val = state->scaling_mode; 1511 } else if (property == connector->content_protection_property) { 1512 *val = state->content_protection; 1513 } else if (property == config->writeback_fb_id_property) { 1514 /* Writeback framebuffer is one-shot, write and forget */ 1515 *val = 0; 1516 } else if (property == config->writeback_out_fence_ptr_property) { 1517 *val = 0; 1518 } else if (connector->funcs->atomic_get_property) { 1519 return connector->funcs->atomic_get_property(connector, 1520 state, property, val); 1521 } else { 1522 return -EINVAL; 1523 } 1524 1525 return 0; 1526 } 1527 1528 int drm_atomic_get_property(struct drm_mode_object *obj, 1529 struct drm_property *property, uint64_t *val) 1530 { 1531 struct drm_device *dev = property->dev; 1532 int ret; 1533 1534 switch (obj->type) { 1535 case DRM_MODE_OBJECT_CONNECTOR: { 1536 struct drm_connector *connector = obj_to_connector(obj); 1537 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 1538 ret = drm_atomic_connector_get_property(connector, 1539 connector->state, property, val); 1540 break; 1541 } 1542 case DRM_MODE_OBJECT_CRTC: { 1543 struct drm_crtc *crtc = obj_to_crtc(obj); 1544 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 1545 ret = drm_atomic_crtc_get_property(crtc, 1546 crtc->state, property, val); 1547 break; 1548 } 1549 case DRM_MODE_OBJECT_PLANE: { 1550 struct drm_plane *plane = obj_to_plane(obj); 1551 WARN_ON(!drm_modeset_is_locked(&plane->mutex)); 1552 ret = drm_atomic_plane_get_property(plane, 1553 plane->state, property, val); 1554 break; 1555 } 1556 default: 1557 ret = -EINVAL; 1558 break; 1559 } 1560 1561 return ret; 1562 } 1563 1564 /** 1565 * drm_atomic_set_crtc_for_plane - set crtc for plane 1566 * @plane_state: the plane whose incoming state to update 1567 * @crtc: crtc to use for the plane 1568 * 1569 * Changing the assigned crtc for a plane requires us to grab the lock and state 1570 * for the new crtc, as needed. This function takes care of all these details 1571 * besides updating the pointer in the state object itself. 1572 * 1573 * Returns: 1574 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1575 * then the w/w mutex code has detected a deadlock and the entire atomic 1576 * sequence must be restarted. All other errors are fatal. 1577 */ 1578 int 1579 drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, 1580 struct drm_crtc *crtc) 1581 { 1582 struct drm_plane *plane = plane_state->plane; 1583 struct drm_crtc_state *crtc_state; 1584 /* Nothing to do for same crtc*/ 1585 if (plane_state->crtc == crtc) 1586 return 0; 1587 if (plane_state->crtc) { 1588 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1589 plane_state->crtc); 1590 if (WARN_ON(IS_ERR(crtc_state))) 1591 return PTR_ERR(crtc_state); 1592 1593 crtc_state->plane_mask &= ~drm_plane_mask(plane); 1594 } 1595 1596 plane_state->crtc = crtc; 1597 1598 if (crtc) { 1599 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1600 crtc); 1601 if (IS_ERR(crtc_state)) 1602 return PTR_ERR(crtc_state); 1603 crtc_state->plane_mask |= drm_plane_mask(plane); 1604 } 1605 1606 if (crtc) 1607 DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n", 1608 plane->base.id, plane->name, plane_state, 1609 crtc->base.id, crtc->name); 1610 else 1611 DRM_DEBUG_ATOMIC("Link [PLANE:%d:%s] state %p to [NOCRTC]\n", 1612 plane->base.id, plane->name, plane_state); 1613 1614 return 0; 1615 } 1616 EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane); 1617 1618 /** 1619 * drm_atomic_set_fb_for_plane - set framebuffer for plane 1620 * @plane_state: atomic state object for the plane 1621 * @fb: fb to use for the plane 1622 * 1623 * Changing the assigned framebuffer for a plane requires us to grab a reference 1624 * to the new fb and drop the reference to the old fb, if there is one. This 1625 * function takes care of all these details besides updating the pointer in the 1626 * state object itself. 1627 */ 1628 void 1629 drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, 1630 struct drm_framebuffer *fb) 1631 { 1632 struct drm_plane *plane = plane_state->plane; 1633 1634 if (fb) 1635 DRM_DEBUG_ATOMIC("Set [FB:%d] for [PLANE:%d:%s] state %p\n", 1636 fb->base.id, plane->base.id, plane->name, 1637 plane_state); 1638 else 1639 DRM_DEBUG_ATOMIC("Set [NOFB] for [PLANE:%d:%s] state %p\n", 1640 plane->base.id, plane->name, plane_state); 1641 1642 drm_framebuffer_assign(&plane_state->fb, fb); 1643 } 1644 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); 1645 1646 /** 1647 * drm_atomic_set_fence_for_plane - set fence for plane 1648 * @plane_state: atomic state object for the plane 1649 * @fence: dma_fence to use for the plane 1650 * 1651 * Helper to setup the plane_state fence in case it is not set yet. 1652 * By using this drivers doesn't need to worry if the user choose 1653 * implicit or explicit fencing. 1654 * 1655 * This function will not set the fence to the state if it was set 1656 * via explicit fencing interfaces on the atomic ioctl. In that case it will 1657 * drop the reference to the fence as we are not storing it anywhere. 1658 * Otherwise, if &drm_plane_state.fence is not set this function we just set it 1659 * with the received implicit fence. In both cases this function consumes a 1660 * reference for @fence. 1661 * 1662 * This way explicit fencing can be used to overrule implicit fencing, which is 1663 * important to make explicit fencing use-cases work: One example is using one 1664 * buffer for 2 screens with different refresh rates. Implicit fencing will 1665 * clamp rendering to the refresh rate of the slower screen, whereas explicit 1666 * fence allows 2 independent render and display loops on a single buffer. If a 1667 * driver allows obeys both implicit and explicit fences for plane updates, then 1668 * it will break all the benefits of explicit fencing. 1669 */ 1670 void 1671 drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, 1672 struct dma_fence *fence) 1673 { 1674 if (plane_state->fence) { 1675 dma_fence_put(fence); 1676 return; 1677 } 1678 1679 plane_state->fence = fence; 1680 } 1681 EXPORT_SYMBOL(drm_atomic_set_fence_for_plane); 1682 1683 /** 1684 * drm_atomic_set_crtc_for_connector - set crtc for connector 1685 * @conn_state: atomic state object for the connector 1686 * @crtc: crtc to use for the connector 1687 * 1688 * Changing the assigned crtc for a connector requires us to grab the lock and 1689 * state for the new crtc, as needed. This function takes care of all these 1690 * details besides updating the pointer in the state object itself. 1691 * 1692 * Returns: 1693 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1694 * then the w/w mutex code has detected a deadlock and the entire atomic 1695 * sequence must be restarted. All other errors are fatal. 1696 */ 1697 int 1698 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, 1699 struct drm_crtc *crtc) 1700 { 1701 struct drm_connector *connector = conn_state->connector; 1702 struct drm_crtc_state *crtc_state; 1703 1704 if (conn_state->crtc == crtc) 1705 return 0; 1706 1707 if (conn_state->crtc) { 1708 crtc_state = drm_atomic_get_new_crtc_state(conn_state->state, 1709 conn_state->crtc); 1710 1711 crtc_state->connector_mask &= 1712 ~drm_connector_mask(conn_state->connector); 1713 1714 drm_connector_put(conn_state->connector); 1715 conn_state->crtc = NULL; 1716 } 1717 1718 if (crtc) { 1719 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); 1720 if (IS_ERR(crtc_state)) 1721 return PTR_ERR(crtc_state); 1722 1723 crtc_state->connector_mask |= 1724 drm_connector_mask(conn_state->connector); 1725 1726 drm_connector_get(conn_state->connector); 1727 conn_state->crtc = crtc; 1728 1729 DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n", 1730 connector->base.id, connector->name, 1731 conn_state, crtc->base.id, crtc->name); 1732 } else { 1733 DRM_DEBUG_ATOMIC("Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n", 1734 connector->base.id, connector->name, 1735 conn_state); 1736 } 1737 1738 return 0; 1739 } 1740 EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector); 1741 1742 /* 1743 * drm_atomic_get_writeback_job - return or allocate a writeback job 1744 * @conn_state: Connector state to get the job for 1745 * 1746 * Writeback jobs have a different lifetime to the atomic state they are 1747 * associated with. This convenience function takes care of allocating a job 1748 * if there isn't yet one associated with the connector state, otherwise 1749 * it just returns the existing job. 1750 * 1751 * Returns: The writeback job for the given connector state 1752 */ 1753 static struct drm_writeback_job * 1754 drm_atomic_get_writeback_job(struct drm_connector_state *conn_state) 1755 { 1756 WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); 1757 1758 if (!conn_state->writeback_job) 1759 conn_state->writeback_job = 1760 kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL); 1761 1762 return conn_state->writeback_job; 1763 } 1764 1765 /** 1766 * drm_atomic_set_writeback_fb_for_connector - set writeback framebuffer 1767 * @conn_state: atomic state object for the connector 1768 * @fb: fb to use for the connector 1769 * 1770 * This is used to set the framebuffer for a writeback connector, which outputs 1771 * to a buffer instead of an actual physical connector. 1772 * Changing the assigned framebuffer requires us to grab a reference to the new 1773 * fb and drop the reference to the old fb, if there is one. This function 1774 * takes care of all these details besides updating the pointer in the 1775 * state object itself. 1776 * 1777 * Note: The only way conn_state can already have an fb set is if the commit 1778 * sets the property more than once. 1779 * 1780 * See also: drm_writeback_connector_init() 1781 * 1782 * Returns: 0 on success 1783 */ 1784 int drm_atomic_set_writeback_fb_for_connector( 1785 struct drm_connector_state *conn_state, 1786 struct drm_framebuffer *fb) 1787 { 1788 struct drm_writeback_job *job = 1789 drm_atomic_get_writeback_job(conn_state); 1790 if (!job) 1791 return -ENOMEM; 1792 1793 drm_framebuffer_assign(&job->fb, fb); 1794 1795 if (fb) 1796 DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n", 1797 fb->base.id, conn_state); 1798 else 1799 DRM_DEBUG_ATOMIC("Set [NOFB] for connector state %p\n", 1800 conn_state); 1801 1802 return 0; 1803 } 1804 EXPORT_SYMBOL(drm_atomic_set_writeback_fb_for_connector); 1805 1806 /** 1807 * drm_atomic_add_affected_connectors - add connectors for crtc 1808 * @state: atomic state 1809 * @crtc: DRM crtc 1810 * 1811 * This function walks the current configuration and adds all connectors 1812 * currently using @crtc to the atomic configuration @state. Note that this 1813 * function must acquire the connection mutex. This can potentially cause 1814 * unneeded seralization if the update is just for the planes on one crtc. Hence 1815 * drivers and helpers should only call this when really needed (e.g. when a 1816 * full modeset needs to happen due to some change). 1817 * 1818 * Returns: 1819 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1820 * then the w/w mutex code has detected a deadlock and the entire atomic 1821 * sequence must be restarted. All other errors are fatal. 1822 */ 1823 int 1824 drm_atomic_add_affected_connectors(struct drm_atomic_state *state, 1825 struct drm_crtc *crtc) 1826 { 1827 struct drm_mode_config *config = &state->dev->mode_config; 1828 struct drm_connector *connector; 1829 struct drm_connector_state *conn_state; 1830 struct drm_connector_list_iter conn_iter; 1831 struct drm_crtc_state *crtc_state; 1832 int ret; 1833 1834 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1835 if (IS_ERR(crtc_state)) 1836 return PTR_ERR(crtc_state); 1837 1838 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1839 if (ret) 1840 return ret; 1841 1842 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n", 1843 crtc->base.id, crtc->name, state); 1844 1845 /* 1846 * Changed connectors are already in @state, so only need to look 1847 * at the connector_mask in crtc_state. 1848 */ 1849 drm_connector_list_iter_begin(state->dev, &conn_iter); 1850 drm_for_each_connector_iter(connector, &conn_iter) { 1851 if (!(crtc_state->connector_mask & drm_connector_mask(connector))) 1852 continue; 1853 1854 conn_state = drm_atomic_get_connector_state(state, connector); 1855 if (IS_ERR(conn_state)) { 1856 drm_connector_list_iter_end(&conn_iter); 1857 return PTR_ERR(conn_state); 1858 } 1859 } 1860 drm_connector_list_iter_end(&conn_iter); 1861 1862 return 0; 1863 } 1864 EXPORT_SYMBOL(drm_atomic_add_affected_connectors); 1865 1866 /** 1867 * drm_atomic_add_affected_planes - add planes for crtc 1868 * @state: atomic state 1869 * @crtc: DRM crtc 1870 * 1871 * This function walks the current configuration and adds all planes 1872 * currently used by @crtc to the atomic configuration @state. This is useful 1873 * when an atomic commit also needs to check all currently enabled plane on 1874 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC 1875 * to avoid special code to force-enable all planes. 1876 * 1877 * Since acquiring a plane state will always also acquire the w/w mutex of the 1878 * current CRTC for that plane (if there is any) adding all the plane states for 1879 * a CRTC will not reduce parallism of atomic updates. 1880 * 1881 * Returns: 1882 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1883 * then the w/w mutex code has detected a deadlock and the entire atomic 1884 * sequence must be restarted. All other errors are fatal. 1885 */ 1886 int 1887 drm_atomic_add_affected_planes(struct drm_atomic_state *state, 1888 struct drm_crtc *crtc) 1889 { 1890 struct drm_plane *plane; 1891 1892 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); 1893 1894 DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n", 1895 crtc->base.id, crtc->name, state); 1896 1897 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 1898 struct drm_plane_state *plane_state = 1899 drm_atomic_get_plane_state(state, plane); 1900 1901 if (IS_ERR(plane_state)) 1902 return PTR_ERR(plane_state); 1903 } 1904 return 0; 1905 } 1906 EXPORT_SYMBOL(drm_atomic_add_affected_planes); 1907 1908 /** 1909 * drm_atomic_check_only - check whether a given config would work 1910 * @state: atomic configuration to check 1911 * 1912 * Note that this function can return -EDEADLK if the driver needed to acquire 1913 * more locks but encountered a deadlock. The caller must then do the usual w/w 1914 * backoff dance and restart. All other errors are fatal. 1915 * 1916 * Returns: 1917 * 0 on success, negative error code on failure. 1918 */ 1919 int drm_atomic_check_only(struct drm_atomic_state *state) 1920 { 1921 struct drm_device *dev = state->dev; 1922 struct drm_mode_config *config = &dev->mode_config; 1923 struct drm_plane *plane; 1924 struct drm_plane_state *plane_state; 1925 struct drm_crtc *crtc; 1926 struct drm_crtc_state *crtc_state; 1927 struct drm_connector *conn; 1928 struct drm_connector_state *conn_state; 1929 int i, ret = 0; 1930 1931 DRM_DEBUG_ATOMIC("checking %p\n", state); 1932 1933 for_each_new_plane_in_state(state, plane, plane_state, i) { 1934 ret = drm_atomic_plane_check(plane, plane_state); 1935 if (ret) { 1936 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", 1937 plane->base.id, plane->name); 1938 return ret; 1939 } 1940 } 1941 1942 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1943 ret = drm_atomic_crtc_check(crtc, crtc_state); 1944 if (ret) { 1945 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", 1946 crtc->base.id, crtc->name); 1947 return ret; 1948 } 1949 } 1950 1951 for_each_new_connector_in_state(state, conn, conn_state, i) { 1952 ret = drm_atomic_connector_check(conn, conn_state); 1953 if (ret) { 1954 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n", 1955 conn->base.id, conn->name); 1956 return ret; 1957 } 1958 } 1959 1960 if (config->funcs->atomic_check) { 1961 ret = config->funcs->atomic_check(state->dev, state); 1962 1963 if (ret) { 1964 DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n", 1965 state, ret); 1966 return ret; 1967 } 1968 } 1969 1970 if (!state->allow_modeset) { 1971 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1972 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 1973 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", 1974 crtc->base.id, crtc->name); 1975 return -EINVAL; 1976 } 1977 } 1978 } 1979 1980 return 0; 1981 } 1982 EXPORT_SYMBOL(drm_atomic_check_only); 1983 1984 /** 1985 * drm_atomic_commit - commit configuration atomically 1986 * @state: atomic configuration to check 1987 * 1988 * Note that this function can return -EDEADLK if the driver needed to acquire 1989 * more locks but encountered a deadlock. The caller must then do the usual w/w 1990 * backoff dance and restart. All other errors are fatal. 1991 * 1992 * This function will take its own reference on @state. 1993 * Callers should always release their reference with drm_atomic_state_put(). 1994 * 1995 * Returns: 1996 * 0 on success, negative error code on failure. 1997 */ 1998 int drm_atomic_commit(struct drm_atomic_state *state) 1999 { 2000 struct drm_mode_config *config = &state->dev->mode_config; 2001 int ret; 2002 2003 ret = drm_atomic_check_only(state); 2004 if (ret) 2005 return ret; 2006 2007 DRM_DEBUG_ATOMIC("committing %p\n", state); 2008 2009 return config->funcs->atomic_commit(state->dev, state, false); 2010 } 2011 EXPORT_SYMBOL(drm_atomic_commit); 2012 2013 /** 2014 * drm_atomic_nonblocking_commit - atomic nonblocking commit 2015 * @state: atomic configuration to check 2016 * 2017 * Note that this function can return -EDEADLK if the driver needed to acquire 2018 * more locks but encountered a deadlock. The caller must then do the usual w/w 2019 * backoff dance and restart. All other errors are fatal. 2020 * 2021 * This function will take its own reference on @state. 2022 * Callers should always release their reference with drm_atomic_state_put(). 2023 * 2024 * Returns: 2025 * 0 on success, negative error code on failure. 2026 */ 2027 int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) 2028 { 2029 struct drm_mode_config *config = &state->dev->mode_config; 2030 int ret; 2031 2032 ret = drm_atomic_check_only(state); 2033 if (ret) 2034 return ret; 2035 2036 DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state); 2037 2038 return config->funcs->atomic_commit(state->dev, state, true); 2039 } 2040 EXPORT_SYMBOL(drm_atomic_nonblocking_commit); 2041 2042 static void drm_atomic_print_state(const struct drm_atomic_state *state) 2043 { 2044 struct drm_printer p = drm_info_printer(state->dev->dev); 2045 struct drm_plane *plane; 2046 struct drm_plane_state *plane_state; 2047 struct drm_crtc *crtc; 2048 struct drm_crtc_state *crtc_state; 2049 struct drm_connector *connector; 2050 struct drm_connector_state *connector_state; 2051 int i; 2052 2053 DRM_DEBUG_ATOMIC("checking %p\n", state); 2054 2055 for_each_new_plane_in_state(state, plane, plane_state, i) 2056 drm_atomic_plane_print_state(&p, plane_state); 2057 2058 for_each_new_crtc_in_state(state, crtc, crtc_state, i) 2059 drm_atomic_crtc_print_state(&p, crtc_state); 2060 2061 for_each_new_connector_in_state(state, connector, connector_state, i) 2062 drm_atomic_connector_print_state(&p, connector_state); 2063 } 2064 2065 static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, 2066 bool take_locks) 2067 { 2068 struct drm_mode_config *config = &dev->mode_config; 2069 struct drm_plane *plane; 2070 struct drm_crtc *crtc; 2071 struct drm_connector *connector; 2072 struct drm_connector_list_iter conn_iter; 2073 2074 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 2075 return; 2076 2077 list_for_each_entry(plane, &config->plane_list, head) { 2078 if (take_locks) 2079 drm_modeset_lock(&plane->mutex, NULL); 2080 drm_atomic_plane_print_state(p, plane->state); 2081 if (take_locks) 2082 drm_modeset_unlock(&plane->mutex); 2083 } 2084 2085 list_for_each_entry(crtc, &config->crtc_list, head) { 2086 if (take_locks) 2087 drm_modeset_lock(&crtc->mutex, NULL); 2088 drm_atomic_crtc_print_state(p, crtc->state); 2089 if (take_locks) 2090 drm_modeset_unlock(&crtc->mutex); 2091 } 2092 2093 drm_connector_list_iter_begin(dev, &conn_iter); 2094 if (take_locks) 2095 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 2096 drm_for_each_connector_iter(connector, &conn_iter) 2097 drm_atomic_connector_print_state(p, connector->state); 2098 if (take_locks) 2099 drm_modeset_unlock(&dev->mode_config.connection_mutex); 2100 drm_connector_list_iter_end(&conn_iter); 2101 } 2102 2103 /** 2104 * drm_state_dump - dump entire device atomic state 2105 * @dev: the drm device 2106 * @p: where to print the state to 2107 * 2108 * Just for debugging. Drivers might want an option to dump state 2109 * to dmesg in case of error irq's. (Hint, you probably want to 2110 * ratelimit this!) 2111 * 2112 * The caller must drm_modeset_lock_all(), or if this is called 2113 * from error irq handler, it should not be enabled by default. 2114 * (Ie. if you are debugging errors you might not care that this 2115 * is racey. But calling this without all modeset locks held is 2116 * not inherently safe.) 2117 */ 2118 void drm_state_dump(struct drm_device *dev, struct drm_printer *p) 2119 { 2120 __drm_state_dump(dev, p, false); 2121 } 2122 EXPORT_SYMBOL(drm_state_dump); 2123 2124 #ifdef CONFIG_DEBUG_FS 2125 static int drm_state_info(struct seq_file *m, void *data) 2126 { 2127 struct drm_info_node *node = (struct drm_info_node *) m->private; 2128 struct drm_device *dev = node->minor->dev; 2129 struct drm_printer p = drm_seq_file_printer(m); 2130 2131 __drm_state_dump(dev, &p, true); 2132 2133 return 0; 2134 } 2135 2136 /* any use in debugfs files to dump individual planes/crtc/etc? */ 2137 static const struct drm_info_list drm_atomic_debugfs_list[] = { 2138 {"state", drm_state_info, 0}, 2139 }; 2140 2141 int drm_atomic_debugfs_init(struct drm_minor *minor) 2142 { 2143 return drm_debugfs_create_files(drm_atomic_debugfs_list, 2144 ARRAY_SIZE(drm_atomic_debugfs_list), 2145 minor->debugfs_root, minor); 2146 } 2147 #endif 2148 2149 /* 2150 * The big monster ioctl 2151 */ 2152 2153 static struct drm_pending_vblank_event *create_vblank_event( 2154 struct drm_crtc *crtc, uint64_t user_data) 2155 { 2156 struct drm_pending_vblank_event *e = NULL; 2157 2158 e = kzalloc(sizeof *e, GFP_KERNEL); 2159 if (!e) 2160 return NULL; 2161 2162 e->event.base.type = DRM_EVENT_FLIP_COMPLETE; 2163 e->event.base.length = sizeof(e->event); 2164 e->event.vbl.crtc_id = crtc->base.id; 2165 e->event.vbl.user_data = user_data; 2166 2167 return e; 2168 } 2169 2170 int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, 2171 struct drm_connector *connector, 2172 int mode) 2173 { 2174 struct drm_connector *tmp_connector; 2175 struct drm_connector_state *new_conn_state; 2176 struct drm_crtc *crtc; 2177 struct drm_crtc_state *crtc_state; 2178 int i, ret, old_mode = connector->dpms; 2179 bool active = false; 2180 2181 ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, 2182 state->acquire_ctx); 2183 if (ret) 2184 return ret; 2185 2186 if (mode != DRM_MODE_DPMS_ON) 2187 mode = DRM_MODE_DPMS_OFF; 2188 connector->dpms = mode; 2189 2190 crtc = connector->state->crtc; 2191 if (!crtc) 2192 goto out; 2193 ret = drm_atomic_add_affected_connectors(state, crtc); 2194 if (ret) 2195 goto out; 2196 2197 crtc_state = drm_atomic_get_crtc_state(state, crtc); 2198 if (IS_ERR(crtc_state)) { 2199 ret = PTR_ERR(crtc_state); 2200 goto out; 2201 } 2202 2203 for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) { 2204 if (new_conn_state->crtc != crtc) 2205 continue; 2206 if (tmp_connector->dpms == DRM_MODE_DPMS_ON) { 2207 active = true; 2208 break; 2209 } 2210 } 2211 2212 crtc_state->active = active; 2213 ret = drm_atomic_commit(state); 2214 out: 2215 if (ret != 0) 2216 connector->dpms = old_mode; 2217 return ret; 2218 } 2219 2220 int drm_atomic_set_property(struct drm_atomic_state *state, 2221 struct drm_mode_object *obj, 2222 struct drm_property *prop, 2223 uint64_t prop_value) 2224 { 2225 struct drm_mode_object *ref; 2226 int ret; 2227 2228 if (!drm_property_change_valid_get(prop, prop_value, &ref)) 2229 return -EINVAL; 2230 2231 switch (obj->type) { 2232 case DRM_MODE_OBJECT_CONNECTOR: { 2233 struct drm_connector *connector = obj_to_connector(obj); 2234 struct drm_connector_state *connector_state; 2235 2236 connector_state = drm_atomic_get_connector_state(state, connector); 2237 if (IS_ERR(connector_state)) { 2238 ret = PTR_ERR(connector_state); 2239 break; 2240 } 2241 2242 ret = drm_atomic_connector_set_property(connector, 2243 connector_state, prop, prop_value); 2244 break; 2245 } 2246 case DRM_MODE_OBJECT_CRTC: { 2247 struct drm_crtc *crtc = obj_to_crtc(obj); 2248 struct drm_crtc_state *crtc_state; 2249 2250 crtc_state = drm_atomic_get_crtc_state(state, crtc); 2251 if (IS_ERR(crtc_state)) { 2252 ret = PTR_ERR(crtc_state); 2253 break; 2254 } 2255 2256 ret = drm_atomic_crtc_set_property(crtc, 2257 crtc_state, prop, prop_value); 2258 break; 2259 } 2260 case DRM_MODE_OBJECT_PLANE: { 2261 struct drm_plane *plane = obj_to_plane(obj); 2262 struct drm_plane_state *plane_state; 2263 2264 plane_state = drm_atomic_get_plane_state(state, plane); 2265 if (IS_ERR(plane_state)) { 2266 ret = PTR_ERR(plane_state); 2267 break; 2268 } 2269 2270 ret = drm_atomic_plane_set_property(plane, 2271 plane_state, prop, prop_value); 2272 break; 2273 } 2274 default: 2275 ret = -EINVAL; 2276 break; 2277 } 2278 2279 drm_property_change_valid_put(prop, ref); 2280 return ret; 2281 } 2282 2283 /** 2284 * DOC: explicit fencing properties 2285 * 2286 * Explicit fencing allows userspace to control the buffer synchronization 2287 * between devices. A Fence or a group of fences are transfered to/from 2288 * userspace using Sync File fds and there are two DRM properties for that. 2289 * IN_FENCE_FD on each DRM Plane to send fences to the kernel and 2290 * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel. 2291 * 2292 * As a contrast, with implicit fencing the kernel keeps track of any 2293 * ongoing rendering, and automatically ensures that the atomic update waits 2294 * for any pending rendering to complete. For shared buffers represented with 2295 * a &struct dma_buf this is tracked in &struct reservation_object. 2296 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), 2297 * whereas explicit fencing is what Android wants. 2298 * 2299 * "IN_FENCE_FD”: 2300 * Use this property to pass a fence that DRM should wait on before 2301 * proceeding with the Atomic Commit request and show the framebuffer for 2302 * the plane on the screen. The fence can be either a normal fence or a 2303 * merged one, the sync_file framework will handle both cases and use a 2304 * fence_array if a merged fence is received. Passing -1 here means no 2305 * fences to wait on. 2306 * 2307 * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag 2308 * it will only check if the Sync File is a valid one. 2309 * 2310 * On the driver side the fence is stored on the @fence parameter of 2311 * &struct drm_plane_state. Drivers which also support implicit fencing 2312 * should set the implicit fence using drm_atomic_set_fence_for_plane(), 2313 * to make sure there's consistent behaviour between drivers in precedence 2314 * of implicit vs. explicit fencing. 2315 * 2316 * "OUT_FENCE_PTR”: 2317 * Use this property to pass a file descriptor pointer to DRM. Once the 2318 * Atomic Commit request call returns OUT_FENCE_PTR will be filled with 2319 * the file descriptor number of a Sync File. This Sync File contains the 2320 * CRTC fence that will be signaled when all framebuffers present on the 2321 * Atomic Commit * request for that given CRTC are scanned out on the 2322 * screen. 2323 * 2324 * The Atomic Commit request fails if a invalid pointer is passed. If the 2325 * Atomic Commit request fails for any other reason the out fence fd 2326 * returned will be -1. On a Atomic Commit with the 2327 * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1. 2328 * 2329 * Note that out-fences don't have a special interface to drivers and are 2330 * internally represented by a &struct drm_pending_vblank_event in struct 2331 * &drm_crtc_state, which is also used by the nonblocking atomic commit 2332 * helpers and for the DRM event handling for existing userspace. 2333 */ 2334 2335 struct drm_out_fence_state { 2336 s32 __user *out_fence_ptr; 2337 struct sync_file *sync_file; 2338 int fd; 2339 }; 2340 2341 static int setup_out_fence(struct drm_out_fence_state *fence_state, 2342 struct dma_fence *fence) 2343 { 2344 fence_state->fd = get_unused_fd_flags(O_CLOEXEC); 2345 if (fence_state->fd < 0) 2346 return fence_state->fd; 2347 2348 if (put_user(fence_state->fd, fence_state->out_fence_ptr)) 2349 return -EFAULT; 2350 2351 fence_state->sync_file = sync_file_create(fence); 2352 if (!fence_state->sync_file) 2353 return -ENOMEM; 2354 2355 return 0; 2356 } 2357 2358 static int prepare_signaling(struct drm_device *dev, 2359 struct drm_atomic_state *state, 2360 struct drm_mode_atomic *arg, 2361 struct drm_file *file_priv, 2362 struct drm_out_fence_state **fence_state, 2363 unsigned int *num_fences) 2364 { 2365 struct drm_crtc *crtc; 2366 struct drm_crtc_state *crtc_state; 2367 struct drm_connector *conn; 2368 struct drm_connector_state *conn_state; 2369 int i, c = 0, ret; 2370 2371 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) 2372 return 0; 2373 2374 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2375 s32 __user *fence_ptr; 2376 2377 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); 2378 2379 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) { 2380 struct drm_pending_vblank_event *e; 2381 2382 e = create_vblank_event(crtc, arg->user_data); 2383 if (!e) 2384 return -ENOMEM; 2385 2386 crtc_state->event = e; 2387 } 2388 2389 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { 2390 struct drm_pending_vblank_event *e = crtc_state->event; 2391 2392 if (!file_priv) 2393 continue; 2394 2395 ret = drm_event_reserve_init(dev, file_priv, &e->base, 2396 &e->event.base); 2397 if (ret) { 2398 kfree(e); 2399 crtc_state->event = NULL; 2400 return ret; 2401 } 2402 } 2403 2404 if (fence_ptr) { 2405 struct dma_fence *fence; 2406 struct drm_out_fence_state *f; 2407 2408 f = krealloc(*fence_state, sizeof(**fence_state) * 2409 (*num_fences + 1), GFP_KERNEL); 2410 if (!f) 2411 return -ENOMEM; 2412 2413 memset(&f[*num_fences], 0, sizeof(*f)); 2414 2415 f[*num_fences].out_fence_ptr = fence_ptr; 2416 *fence_state = f; 2417 2418 fence = drm_crtc_create_fence(crtc); 2419 if (!fence) 2420 return -ENOMEM; 2421 2422 ret = setup_out_fence(&f[(*num_fences)++], fence); 2423 if (ret) { 2424 dma_fence_put(fence); 2425 return ret; 2426 } 2427 2428 crtc_state->event->base.fence = fence; 2429 } 2430 2431 c++; 2432 } 2433 2434 for_each_new_connector_in_state(state, conn, conn_state, i) { 2435 struct drm_writeback_connector *wb_conn; 2436 struct drm_writeback_job *job; 2437 struct drm_out_fence_state *f; 2438 struct dma_fence *fence; 2439 s32 __user *fence_ptr; 2440 2441 fence_ptr = get_out_fence_for_connector(state, conn); 2442 if (!fence_ptr) 2443 continue; 2444 2445 job = drm_atomic_get_writeback_job(conn_state); 2446 if (!job) 2447 return -ENOMEM; 2448 2449 f = krealloc(*fence_state, sizeof(**fence_state) * 2450 (*num_fences + 1), GFP_KERNEL); 2451 if (!f) 2452 return -ENOMEM; 2453 2454 memset(&f[*num_fences], 0, sizeof(*f)); 2455 2456 f[*num_fences].out_fence_ptr = fence_ptr; 2457 *fence_state = f; 2458 2459 wb_conn = drm_connector_to_writeback(conn); 2460 fence = drm_writeback_get_out_fence(wb_conn); 2461 if (!fence) 2462 return -ENOMEM; 2463 2464 ret = setup_out_fence(&f[(*num_fences)++], fence); 2465 if (ret) { 2466 dma_fence_put(fence); 2467 return ret; 2468 } 2469 2470 job->out_fence = fence; 2471 } 2472 2473 /* 2474 * Having this flag means user mode pends on event which will never 2475 * reach due to lack of at least one CRTC for signaling 2476 */ 2477 if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 2478 return -EINVAL; 2479 2480 return 0; 2481 } 2482 2483 static void complete_signaling(struct drm_device *dev, 2484 struct drm_atomic_state *state, 2485 struct drm_out_fence_state *fence_state, 2486 unsigned int num_fences, 2487 bool install_fds) 2488 { 2489 struct drm_crtc *crtc; 2490 struct drm_crtc_state *crtc_state; 2491 int i; 2492 2493 if (install_fds) { 2494 for (i = 0; i < num_fences; i++) 2495 fd_install(fence_state[i].fd, 2496 fence_state[i].sync_file->file); 2497 2498 kfree(fence_state); 2499 return; 2500 } 2501 2502 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2503 struct drm_pending_vblank_event *event = crtc_state->event; 2504 /* 2505 * Free the allocated event. drm_atomic_helper_setup_commit 2506 * can allocate an event too, so only free it if it's ours 2507 * to prevent a double free in drm_atomic_state_clear. 2508 */ 2509 if (event && (event->base.fence || event->base.file_priv)) { 2510 drm_event_cancel_free(dev, &event->base); 2511 crtc_state->event = NULL; 2512 } 2513 } 2514 2515 if (!fence_state) 2516 return; 2517 2518 for (i = 0; i < num_fences; i++) { 2519 if (fence_state[i].sync_file) 2520 fput(fence_state[i].sync_file->file); 2521 if (fence_state[i].fd >= 0) 2522 put_unused_fd(fence_state[i].fd); 2523 2524 /* If this fails log error to the user */ 2525 if (fence_state[i].out_fence_ptr && 2526 put_user(-1, fence_state[i].out_fence_ptr)) 2527 DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n"); 2528 } 2529 2530 kfree(fence_state); 2531 } 2532 2533 int drm_mode_atomic_ioctl(struct drm_device *dev, 2534 void *data, struct drm_file *file_priv) 2535 { 2536 struct drm_mode_atomic *arg = data; 2537 uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr); 2538 uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr); 2539 uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr); 2540 uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr); 2541 unsigned int copied_objs, copied_props; 2542 struct drm_atomic_state *state; 2543 struct drm_modeset_acquire_ctx ctx; 2544 struct drm_out_fence_state *fence_state; 2545 int ret = 0; 2546 unsigned int i, j, num_fences; 2547 2548 /* disallow for drivers not supporting atomic: */ 2549 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 2550 return -EINVAL; 2551 2552 /* disallow for userspace that has not enabled atomic cap (even 2553 * though this may be a bit overkill, since legacy userspace 2554 * wouldn't know how to call this ioctl) 2555 */ 2556 if (!file_priv->atomic) 2557 return -EINVAL; 2558 2559 if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) 2560 return -EINVAL; 2561 2562 if (arg->reserved) 2563 return -EINVAL; 2564 2565 if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) && 2566 !dev->mode_config.async_page_flip) 2567 return -EINVAL; 2568 2569 /* can't test and expect an event at the same time. */ 2570 if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) && 2571 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 2572 return -EINVAL; 2573 2574 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2575 2576 state = drm_atomic_state_alloc(dev); 2577 if (!state) 2578 return -ENOMEM; 2579 2580 state->acquire_ctx = &ctx; 2581 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 2582 2583 retry: 2584 copied_objs = 0; 2585 copied_props = 0; 2586 fence_state = NULL; 2587 num_fences = 0; 2588 2589 for (i = 0; i < arg->count_objs; i++) { 2590 uint32_t obj_id, count_props; 2591 struct drm_mode_object *obj; 2592 2593 if (get_user(obj_id, objs_ptr + copied_objs)) { 2594 ret = -EFAULT; 2595 goto out; 2596 } 2597 2598 obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY); 2599 if (!obj) { 2600 ret = -ENOENT; 2601 goto out; 2602 } 2603 2604 if (!obj->properties) { 2605 drm_mode_object_put(obj); 2606 ret = -ENOENT; 2607 goto out; 2608 } 2609 2610 if (get_user(count_props, count_props_ptr + copied_objs)) { 2611 drm_mode_object_put(obj); 2612 ret = -EFAULT; 2613 goto out; 2614 } 2615 2616 copied_objs++; 2617 2618 for (j = 0; j < count_props; j++) { 2619 uint32_t prop_id; 2620 uint64_t prop_value; 2621 struct drm_property *prop; 2622 2623 if (get_user(prop_id, props_ptr + copied_props)) { 2624 drm_mode_object_put(obj); 2625 ret = -EFAULT; 2626 goto out; 2627 } 2628 2629 prop = drm_mode_obj_find_prop_id(obj, prop_id); 2630 if (!prop) { 2631 drm_mode_object_put(obj); 2632 ret = -ENOENT; 2633 goto out; 2634 } 2635 2636 if (copy_from_user(&prop_value, 2637 prop_values_ptr + copied_props, 2638 sizeof(prop_value))) { 2639 drm_mode_object_put(obj); 2640 ret = -EFAULT; 2641 goto out; 2642 } 2643 2644 ret = drm_atomic_set_property(state, obj, prop, 2645 prop_value); 2646 if (ret) { 2647 drm_mode_object_put(obj); 2648 goto out; 2649 } 2650 2651 copied_props++; 2652 } 2653 2654 drm_mode_object_put(obj); 2655 } 2656 2657 ret = prepare_signaling(dev, state, arg, file_priv, &fence_state, 2658 &num_fences); 2659 if (ret) 2660 goto out; 2661 2662 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { 2663 ret = drm_atomic_check_only(state); 2664 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { 2665 ret = drm_atomic_nonblocking_commit(state); 2666 } else { 2667 if (unlikely(drm_debug & DRM_UT_STATE)) 2668 drm_atomic_print_state(state); 2669 2670 ret = drm_atomic_commit(state); 2671 } 2672 2673 out: 2674 complete_signaling(dev, state, fence_state, num_fences, !ret); 2675 2676 if (ret == -EDEADLK) { 2677 drm_atomic_state_clear(state); 2678 ret = drm_modeset_backoff(&ctx); 2679 if (!ret) 2680 goto retry; 2681 } 2682 2683 drm_atomic_state_put(state); 2684 2685 drm_modeset_drop_locks(&ctx); 2686 drm_modeset_acquire_fini(&ctx); 2687 2688 return ret; 2689 } 2690