1 /* 2 * Copyright (C) 2014 Red Hat 3 * Copyright (C) 2014 Intel Corp. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robdclark@gmail.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 */ 27 28 29 #include <drm/drmP.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_mode.h> 32 #include <drm/drm_print.h> 33 #include <linux/sync_file.h> 34 35 #include "drm_crtc_internal.h" 36 #include "drm_internal.h" 37 38 void __drm_crtc_commit_free(struct kref *kref) 39 { 40 struct drm_crtc_commit *commit = 41 container_of(kref, struct drm_crtc_commit, ref); 42 43 kfree(commit); 44 } 45 EXPORT_SYMBOL(__drm_crtc_commit_free); 46 47 /** 48 * drm_atomic_state_default_release - 49 * release memory initialized by drm_atomic_state_init 50 * @state: atomic state 51 * 52 * Free all the memory allocated by drm_atomic_state_init. 53 * This should only be used by drivers which are still subclassing 54 * &drm_atomic_state and haven't switched to &drm_private_state yet. 55 */ 56 void drm_atomic_state_default_release(struct drm_atomic_state *state) 57 { 58 kfree(state->connectors); 59 kfree(state->crtcs); 60 kfree(state->planes); 61 kfree(state->private_objs); 62 } 63 EXPORT_SYMBOL(drm_atomic_state_default_release); 64 65 /** 66 * drm_atomic_state_init - init new atomic state 67 * @dev: DRM device 68 * @state: atomic state 69 * 70 * Default implementation for filling in a new atomic state. 71 * This should only be used by drivers which are still subclassing 72 * &drm_atomic_state and haven't switched to &drm_private_state yet. 73 */ 74 int 75 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) 76 { 77 kref_init(&state->ref); 78 79 /* TODO legacy paths should maybe do a better job about 80 * setting this appropriately? 81 */ 82 state->allow_modeset = true; 83 84 state->crtcs = kcalloc(dev->mode_config.num_crtc, 85 sizeof(*state->crtcs), GFP_KERNEL); 86 if (!state->crtcs) 87 goto fail; 88 state->planes = kcalloc(dev->mode_config.num_total_plane, 89 sizeof(*state->planes), GFP_KERNEL); 90 if (!state->planes) 91 goto fail; 92 93 state->dev = dev; 94 95 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state); 96 97 return 0; 98 fail: 99 drm_atomic_state_default_release(state); 100 return -ENOMEM; 101 } 102 EXPORT_SYMBOL(drm_atomic_state_init); 103 104 /** 105 * drm_atomic_state_alloc - allocate atomic state 106 * @dev: DRM device 107 * 108 * This allocates an empty atomic state to track updates. 109 */ 110 struct drm_atomic_state * 111 drm_atomic_state_alloc(struct drm_device *dev) 112 { 113 struct drm_mode_config *config = &dev->mode_config; 114 115 if (!config->funcs->atomic_state_alloc) { 116 struct drm_atomic_state *state; 117 118 state = kzalloc(sizeof(*state), GFP_KERNEL); 119 if (!state) 120 return NULL; 121 if (drm_atomic_state_init(dev, state) < 0) { 122 kfree(state); 123 return NULL; 124 } 125 return state; 126 } 127 128 return config->funcs->atomic_state_alloc(dev); 129 } 130 EXPORT_SYMBOL(drm_atomic_state_alloc); 131 132 /** 133 * drm_atomic_state_default_clear - clear base atomic state 134 * @state: atomic state 135 * 136 * Default implementation for clearing atomic state. 137 * This should only be used by drivers which are still subclassing 138 * &drm_atomic_state and haven't switched to &drm_private_state yet. 139 */ 140 void drm_atomic_state_default_clear(struct drm_atomic_state *state) 141 { 142 struct drm_device *dev = state->dev; 143 struct drm_mode_config *config = &dev->mode_config; 144 int i; 145 146 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); 147 148 for (i = 0; i < state->num_connector; i++) { 149 struct drm_connector *connector = state->connectors[i].ptr; 150 151 if (!connector) 152 continue; 153 154 connector->funcs->atomic_destroy_state(connector, 155 state->connectors[i].state); 156 state->connectors[i].ptr = NULL; 157 state->connectors[i].state = NULL; 158 state->connectors[i].old_state = NULL; 159 state->connectors[i].new_state = NULL; 160 drm_connector_put(connector); 161 } 162 163 for (i = 0; i < config->num_crtc; i++) { 164 struct drm_crtc *crtc = state->crtcs[i].ptr; 165 166 if (!crtc) 167 continue; 168 169 crtc->funcs->atomic_destroy_state(crtc, 170 state->crtcs[i].state); 171 172 state->crtcs[i].ptr = NULL; 173 state->crtcs[i].state = NULL; 174 state->crtcs[i].old_state = NULL; 175 state->crtcs[i].new_state = NULL; 176 } 177 178 for (i = 0; i < config->num_total_plane; i++) { 179 struct drm_plane *plane = state->planes[i].ptr; 180 181 if (!plane) 182 continue; 183 184 plane->funcs->atomic_destroy_state(plane, 185 state->planes[i].state); 186 state->planes[i].ptr = NULL; 187 state->planes[i].state = NULL; 188 state->planes[i].old_state = NULL; 189 state->planes[i].new_state = NULL; 190 } 191 192 for (i = 0; i < state->num_private_objs; i++) { 193 struct drm_private_obj *obj = state->private_objs[i].ptr; 194 195 obj->funcs->atomic_destroy_state(obj, 196 state->private_objs[i].state); 197 state->private_objs[i].ptr = NULL; 198 state->private_objs[i].state = NULL; 199 state->private_objs[i].old_state = NULL; 200 state->private_objs[i].new_state = NULL; 201 } 202 state->num_private_objs = 0; 203 204 if (state->fake_commit) { 205 drm_crtc_commit_put(state->fake_commit); 206 state->fake_commit = NULL; 207 } 208 } 209 EXPORT_SYMBOL(drm_atomic_state_default_clear); 210 211 /** 212 * drm_atomic_state_clear - clear state object 213 * @state: atomic state 214 * 215 * When the w/w mutex algorithm detects a deadlock we need to back off and drop 216 * all locks. So someone else could sneak in and change the current modeset 217 * configuration. Which means that all the state assembled in @state is no 218 * longer an atomic update to the current state, but to some arbitrary earlier 219 * state. Which could break assumptions the driver's 220 * &drm_mode_config_funcs.atomic_check likely relies on. 221 * 222 * Hence we must clear all cached state and completely start over, using this 223 * function. 224 */ 225 void drm_atomic_state_clear(struct drm_atomic_state *state) 226 { 227 struct drm_device *dev = state->dev; 228 struct drm_mode_config *config = &dev->mode_config; 229 230 if (config->funcs->atomic_state_clear) 231 config->funcs->atomic_state_clear(state); 232 else 233 drm_atomic_state_default_clear(state); 234 } 235 EXPORT_SYMBOL(drm_atomic_state_clear); 236 237 /** 238 * __drm_atomic_state_free - free all memory for an atomic state 239 * @ref: This atomic state to deallocate 240 * 241 * This frees all memory associated with an atomic state, including all the 242 * per-object state for planes, crtcs and connectors. 243 */ 244 void __drm_atomic_state_free(struct kref *ref) 245 { 246 struct drm_atomic_state *state = container_of(ref, typeof(*state), ref); 247 struct drm_mode_config *config = &state->dev->mode_config; 248 249 drm_atomic_state_clear(state); 250 251 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); 252 253 if (config->funcs->atomic_state_free) { 254 config->funcs->atomic_state_free(state); 255 } else { 256 drm_atomic_state_default_release(state); 257 kfree(state); 258 } 259 } 260 EXPORT_SYMBOL(__drm_atomic_state_free); 261 262 /** 263 * drm_atomic_get_crtc_state - get crtc state 264 * @state: global atomic state object 265 * @crtc: crtc to get state object for 266 * 267 * This function returns the crtc state for the given crtc, allocating it if 268 * needed. It will also grab the relevant crtc lock to make sure that the state 269 * is consistent. 270 * 271 * Returns: 272 * 273 * Either the allocated state or the error code encoded into the pointer. When 274 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 275 * entire atomic sequence must be restarted. All other errors are fatal. 276 */ 277 struct drm_crtc_state * 278 drm_atomic_get_crtc_state(struct drm_atomic_state *state, 279 struct drm_crtc *crtc) 280 { 281 int ret, index = drm_crtc_index(crtc); 282 struct drm_crtc_state *crtc_state; 283 284 WARN_ON(!state->acquire_ctx); 285 286 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); 287 if (crtc_state) 288 return crtc_state; 289 290 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); 291 if (ret) 292 return ERR_PTR(ret); 293 294 crtc_state = crtc->funcs->atomic_duplicate_state(crtc); 295 if (!crtc_state) 296 return ERR_PTR(-ENOMEM); 297 298 state->crtcs[index].state = crtc_state; 299 state->crtcs[index].old_state = crtc->state; 300 state->crtcs[index].new_state = crtc_state; 301 state->crtcs[index].ptr = crtc; 302 crtc_state->state = state; 303 304 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", 305 crtc->base.id, crtc->name, crtc_state, state); 306 307 return crtc_state; 308 } 309 EXPORT_SYMBOL(drm_atomic_get_crtc_state); 310 311 static void set_out_fence_for_crtc(struct drm_atomic_state *state, 312 struct drm_crtc *crtc, s32 __user *fence_ptr) 313 { 314 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; 315 } 316 317 static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, 318 struct drm_crtc *crtc) 319 { 320 s32 __user *fence_ptr; 321 322 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; 323 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; 324 325 return fence_ptr; 326 } 327 328 /** 329 * drm_atomic_set_mode_for_crtc - set mode for CRTC 330 * @state: the CRTC whose incoming state to update 331 * @mode: kernel-internal mode to use for the CRTC, or NULL to disable 332 * 333 * Set a mode (originating from the kernel) on the desired CRTC state and update 334 * the enable property. 335 * 336 * RETURNS: 337 * Zero on success, error code on failure. Cannot return -EDEADLK. 338 */ 339 int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, 340 const struct drm_display_mode *mode) 341 { 342 struct drm_mode_modeinfo umode; 343 344 /* Early return for no change. */ 345 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0) 346 return 0; 347 348 drm_property_blob_put(state->mode_blob); 349 state->mode_blob = NULL; 350 351 if (mode) { 352 drm_mode_convert_to_umode(&umode, mode); 353 state->mode_blob = 354 drm_property_create_blob(state->crtc->dev, 355 sizeof(umode), 356 &umode); 357 if (IS_ERR(state->mode_blob)) 358 return PTR_ERR(state->mode_blob); 359 360 drm_mode_copy(&state->mode, mode); 361 state->enable = true; 362 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", 363 mode->name, state); 364 } else { 365 memset(&state->mode, 0, sizeof(state->mode)); 366 state->enable = false; 367 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", 368 state); 369 } 370 371 return 0; 372 } 373 EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc); 374 375 /** 376 * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC 377 * @state: the CRTC whose incoming state to update 378 * @blob: pointer to blob property to use for mode 379 * 380 * Set a mode (originating from a blob property) on the desired CRTC state. 381 * This function will take a reference on the blob property for the CRTC state, 382 * and release the reference held on the state's existing mode property, if any 383 * was set. 384 * 385 * RETURNS: 386 * Zero on success, error code on failure. Cannot return -EDEADLK. 387 */ 388 int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, 389 struct drm_property_blob *blob) 390 { 391 if (blob == state->mode_blob) 392 return 0; 393 394 drm_property_blob_put(state->mode_blob); 395 state->mode_blob = NULL; 396 397 memset(&state->mode, 0, sizeof(state->mode)); 398 399 if (blob) { 400 if (blob->length != sizeof(struct drm_mode_modeinfo) || 401 drm_mode_convert_umode(state->crtc->dev, &state->mode, 402 blob->data)) 403 return -EINVAL; 404 405 state->mode_blob = drm_property_blob_get(blob); 406 state->enable = true; 407 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", 408 state->mode.name, state); 409 } else { 410 state->enable = false; 411 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", 412 state); 413 } 414 415 return 0; 416 } 417 EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); 418 419 /** 420 * drm_atomic_replace_property_blob_from_id - lookup the new blob and replace the old one with it 421 * @dev: DRM device 422 * @blob: a pointer to the member blob to be replaced 423 * @blob_id: ID of the new blob 424 * @expected_size: total expected size of the blob data (in bytes) 425 * @expected_elem_size: expected element size of the blob data (in bytes) 426 * @replaced: did the blob get replaced? 427 * 428 * Replace @blob with another blob with the ID @blob_id. If @blob_id is zero 429 * @blob becomes NULL. 430 * 431 * If @expected_size is positive the new blob length is expected to be equal 432 * to @expected_size bytes. If @expected_elem_size is positive the new blob 433 * length is expected to be a multiple of @expected_elem_size bytes. Otherwise 434 * an error is returned. 435 * 436 * @replaced will indicate to the caller whether the blob was replaced or not. 437 * If the old and new blobs were in fact the same blob @replaced will be false 438 * otherwise it will be true. 439 * 440 * RETURNS: 441 * Zero on success, error code on failure. 442 */ 443 static int 444 drm_atomic_replace_property_blob_from_id(struct drm_device *dev, 445 struct drm_property_blob **blob, 446 uint64_t blob_id, 447 ssize_t expected_size, 448 ssize_t expected_elem_size, 449 bool *replaced) 450 { 451 struct drm_property_blob *new_blob = NULL; 452 453 if (blob_id != 0) { 454 new_blob = drm_property_lookup_blob(dev, blob_id); 455 if (new_blob == NULL) 456 return -EINVAL; 457 458 if (expected_size > 0 && 459 new_blob->length != expected_size) { 460 drm_property_blob_put(new_blob); 461 return -EINVAL; 462 } 463 if (expected_elem_size > 0 && 464 new_blob->length % expected_elem_size != 0) { 465 drm_property_blob_put(new_blob); 466 return -EINVAL; 467 } 468 } 469 470 *replaced |= drm_property_replace_blob(blob, new_blob); 471 drm_property_blob_put(new_blob); 472 473 return 0; 474 } 475 476 /** 477 * drm_atomic_crtc_set_property - set property on CRTC 478 * @crtc: the drm CRTC to set a property on 479 * @state: the state object to update with the new property value 480 * @property: the property to set 481 * @val: the new property value 482 * 483 * This function handles generic/core properties and calls out to driver's 484 * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure 485 * consistent behavior you must call this function rather than the driver hook 486 * directly. 487 * 488 * RETURNS: 489 * Zero on success, error code on failure 490 */ 491 int drm_atomic_crtc_set_property(struct drm_crtc *crtc, 492 struct drm_crtc_state *state, struct drm_property *property, 493 uint64_t val) 494 { 495 struct drm_device *dev = crtc->dev; 496 struct drm_mode_config *config = &dev->mode_config; 497 bool replaced = false; 498 int ret; 499 500 if (property == config->prop_active) 501 state->active = val; 502 else if (property == config->prop_mode_id) { 503 struct drm_property_blob *mode = 504 drm_property_lookup_blob(dev, val); 505 ret = drm_atomic_set_mode_prop_for_crtc(state, mode); 506 drm_property_blob_put(mode); 507 return ret; 508 } else if (property == config->degamma_lut_property) { 509 ret = drm_atomic_replace_property_blob_from_id(dev, 510 &state->degamma_lut, 511 val, 512 -1, sizeof(struct drm_color_lut), 513 &replaced); 514 state->color_mgmt_changed |= replaced; 515 return ret; 516 } else if (property == config->ctm_property) { 517 ret = drm_atomic_replace_property_blob_from_id(dev, 518 &state->ctm, 519 val, 520 sizeof(struct drm_color_ctm), -1, 521 &replaced); 522 state->color_mgmt_changed |= replaced; 523 return ret; 524 } else if (property == config->gamma_lut_property) { 525 ret = drm_atomic_replace_property_blob_from_id(dev, 526 &state->gamma_lut, 527 val, 528 -1, sizeof(struct drm_color_lut), 529 &replaced); 530 state->color_mgmt_changed |= replaced; 531 return ret; 532 } else if (property == config->prop_out_fence_ptr) { 533 s32 __user *fence_ptr = u64_to_user_ptr(val); 534 535 if (!fence_ptr) 536 return 0; 537 538 if (put_user(-1, fence_ptr)) 539 return -EFAULT; 540 541 set_out_fence_for_crtc(state->state, crtc, fence_ptr); 542 } else if (crtc->funcs->atomic_set_property) 543 return crtc->funcs->atomic_set_property(crtc, state, property, val); 544 else 545 return -EINVAL; 546 547 return 0; 548 } 549 EXPORT_SYMBOL(drm_atomic_crtc_set_property); 550 551 /** 552 * drm_atomic_crtc_get_property - get property value from CRTC state 553 * @crtc: the drm CRTC to set a property on 554 * @state: the state object to get the property value from 555 * @property: the property to set 556 * @val: return location for the property value 557 * 558 * This function handles generic/core properties and calls out to driver's 559 * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure 560 * consistent behavior you must call this function rather than the driver hook 561 * directly. 562 * 563 * RETURNS: 564 * Zero on success, error code on failure 565 */ 566 static int 567 drm_atomic_crtc_get_property(struct drm_crtc *crtc, 568 const struct drm_crtc_state *state, 569 struct drm_property *property, uint64_t *val) 570 { 571 struct drm_device *dev = crtc->dev; 572 struct drm_mode_config *config = &dev->mode_config; 573 574 if (property == config->prop_active) 575 *val = state->active; 576 else if (property == config->prop_mode_id) 577 *val = (state->mode_blob) ? state->mode_blob->base.id : 0; 578 else if (property == config->degamma_lut_property) 579 *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; 580 else if (property == config->ctm_property) 581 *val = (state->ctm) ? state->ctm->base.id : 0; 582 else if (property == config->gamma_lut_property) 583 *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; 584 else if (property == config->prop_out_fence_ptr) 585 *val = 0; 586 else if (crtc->funcs->atomic_get_property) 587 return crtc->funcs->atomic_get_property(crtc, state, property, val); 588 else 589 return -EINVAL; 590 591 return 0; 592 } 593 594 /** 595 * drm_atomic_crtc_check - check crtc state 596 * @crtc: crtc to check 597 * @state: crtc state to check 598 * 599 * Provides core sanity checks for crtc state. 600 * 601 * RETURNS: 602 * Zero on success, error code on failure 603 */ 604 static int drm_atomic_crtc_check(struct drm_crtc *crtc, 605 struct drm_crtc_state *state) 606 { 607 /* NOTE: we explicitly don't enforce constraints such as primary 608 * layer covering entire screen, since that is something we want 609 * to allow (on hw that supports it). For hw that does not, it 610 * should be checked in driver's crtc->atomic_check() vfunc. 611 * 612 * TODO: Add generic modeset state checks once we support those. 613 */ 614 615 if (state->active && !state->enable) { 616 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", 617 crtc->base.id, crtc->name); 618 return -EINVAL; 619 } 620 621 /* The state->enable vs. state->mode_blob checks can be WARN_ON, 622 * as this is a kernel-internal detail that userspace should never 623 * be able to trigger. */ 624 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 625 WARN_ON(state->enable && !state->mode_blob)) { 626 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", 627 crtc->base.id, crtc->name); 628 return -EINVAL; 629 } 630 631 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 632 WARN_ON(!state->enable && state->mode_blob)) { 633 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", 634 crtc->base.id, crtc->name); 635 return -EINVAL; 636 } 637 638 /* 639 * Reject event generation for when a CRTC is off and stays off. 640 * It wouldn't be hard to implement this, but userspace has a track 641 * record of happily burning through 100% cpu (or worse, crash) when the 642 * display pipe is suspended. To avoid all that fun just reject updates 643 * that ask for events since likely that indicates a bug in the 644 * compositor's drawing loop. This is consistent with the vblank IOCTL 645 * and legacy page_flip IOCTL which also reject service on a disabled 646 * pipe. 647 */ 648 if (state->event && !state->active && !crtc->state->active) { 649 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n", 650 crtc->base.id, crtc->name); 651 return -EINVAL; 652 } 653 654 return 0; 655 } 656 657 static void drm_atomic_crtc_print_state(struct drm_printer *p, 658 const struct drm_crtc_state *state) 659 { 660 struct drm_crtc *crtc = state->crtc; 661 662 drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name); 663 drm_printf(p, "\tenable=%d\n", state->enable); 664 drm_printf(p, "\tactive=%d\n", state->active); 665 drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed); 666 drm_printf(p, "\tmode_changed=%d\n", state->mode_changed); 667 drm_printf(p, "\tactive_changed=%d\n", state->active_changed); 668 drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed); 669 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); 670 drm_printf(p, "\tplane_mask=%x\n", state->plane_mask); 671 drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask); 672 drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask); 673 drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode)); 674 675 if (crtc->funcs->atomic_print_state) 676 crtc->funcs->atomic_print_state(p, state); 677 } 678 679 /** 680 * drm_atomic_get_plane_state - get plane state 681 * @state: global atomic state object 682 * @plane: plane to get state object for 683 * 684 * This function returns the plane state for the given plane, allocating it if 685 * needed. It will also grab the relevant plane lock to make sure that the state 686 * is consistent. 687 * 688 * Returns: 689 * 690 * Either the allocated state or the error code encoded into the pointer. When 691 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 692 * entire atomic sequence must be restarted. All other errors are fatal. 693 */ 694 struct drm_plane_state * 695 drm_atomic_get_plane_state(struct drm_atomic_state *state, 696 struct drm_plane *plane) 697 { 698 int ret, index = drm_plane_index(plane); 699 struct drm_plane_state *plane_state; 700 701 WARN_ON(!state->acquire_ctx); 702 703 plane_state = drm_atomic_get_existing_plane_state(state, plane); 704 if (plane_state) 705 return plane_state; 706 707 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); 708 if (ret) 709 return ERR_PTR(ret); 710 711 plane_state = plane->funcs->atomic_duplicate_state(plane); 712 if (!plane_state) 713 return ERR_PTR(-ENOMEM); 714 715 state->planes[index].state = plane_state; 716 state->planes[index].ptr = plane; 717 state->planes[index].old_state = plane->state; 718 state->planes[index].new_state = plane_state; 719 plane_state->state = state; 720 721 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", 722 plane->base.id, plane->name, plane_state, state); 723 724 if (plane_state->crtc) { 725 struct drm_crtc_state *crtc_state; 726 727 crtc_state = drm_atomic_get_crtc_state(state, 728 plane_state->crtc); 729 if (IS_ERR(crtc_state)) 730 return ERR_CAST(crtc_state); 731 } 732 733 return plane_state; 734 } 735 EXPORT_SYMBOL(drm_atomic_get_plane_state); 736 737 /** 738 * drm_atomic_plane_set_property - set property on plane 739 * @plane: the drm plane to set a property on 740 * @state: the state object to update with the new property value 741 * @property: the property to set 742 * @val: the new property value 743 * 744 * This function handles generic/core properties and calls out to driver's 745 * &drm_plane_funcs.atomic_set_property for driver properties. To ensure 746 * consistent behavior you must call this function rather than the driver hook 747 * directly. 748 * 749 * RETURNS: 750 * Zero on success, error code on failure 751 */ 752 static int drm_atomic_plane_set_property(struct drm_plane *plane, 753 struct drm_plane_state *state, struct drm_property *property, 754 uint64_t val) 755 { 756 struct drm_device *dev = plane->dev; 757 struct drm_mode_config *config = &dev->mode_config; 758 759 if (property == config->prop_fb_id) { 760 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); 761 drm_atomic_set_fb_for_plane(state, fb); 762 if (fb) 763 drm_framebuffer_put(fb); 764 } else if (property == config->prop_in_fence_fd) { 765 if (state->fence) 766 return -EINVAL; 767 768 if (U642I64(val) == -1) 769 return 0; 770 771 state->fence = sync_file_get_fence(val); 772 if (!state->fence) 773 return -EINVAL; 774 775 } else if (property == config->prop_crtc_id) { 776 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); 777 return drm_atomic_set_crtc_for_plane(state, crtc); 778 } else if (property == config->prop_crtc_x) { 779 state->crtc_x = U642I64(val); 780 } else if (property == config->prop_crtc_y) { 781 state->crtc_y = U642I64(val); 782 } else if (property == config->prop_crtc_w) { 783 state->crtc_w = val; 784 } else if (property == config->prop_crtc_h) { 785 state->crtc_h = val; 786 } else if (property == config->prop_src_x) { 787 state->src_x = val; 788 } else if (property == config->prop_src_y) { 789 state->src_y = val; 790 } else if (property == config->prop_src_w) { 791 state->src_w = val; 792 } else if (property == config->prop_src_h) { 793 state->src_h = val; 794 } else if (property == plane->alpha_property) { 795 state->alpha = val; 796 } else if (property == plane->rotation_property) { 797 if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) 798 return -EINVAL; 799 state->rotation = val; 800 } else if (property == plane->zpos_property) { 801 state->zpos = val; 802 } else if (property == plane->color_encoding_property) { 803 state->color_encoding = val; 804 } else if (property == plane->color_range_property) { 805 state->color_range = val; 806 } else if (plane->funcs->atomic_set_property) { 807 return plane->funcs->atomic_set_property(plane, state, 808 property, val); 809 } else { 810 return -EINVAL; 811 } 812 813 return 0; 814 } 815 816 /** 817 * drm_atomic_plane_get_property - get property value from plane state 818 * @plane: the drm plane to set a property on 819 * @state: the state object to get the property value from 820 * @property: the property to set 821 * @val: return location for the property value 822 * 823 * This function handles generic/core properties and calls out to driver's 824 * &drm_plane_funcs.atomic_get_property for driver properties. To ensure 825 * consistent behavior you must call this function rather than the driver hook 826 * directly. 827 * 828 * RETURNS: 829 * Zero on success, error code on failure 830 */ 831 static int 832 drm_atomic_plane_get_property(struct drm_plane *plane, 833 const struct drm_plane_state *state, 834 struct drm_property *property, uint64_t *val) 835 { 836 struct drm_device *dev = plane->dev; 837 struct drm_mode_config *config = &dev->mode_config; 838 839 if (property == config->prop_fb_id) { 840 *val = (state->fb) ? state->fb->base.id : 0; 841 } else if (property == config->prop_in_fence_fd) { 842 *val = -1; 843 } else if (property == config->prop_crtc_id) { 844 *val = (state->crtc) ? state->crtc->base.id : 0; 845 } else if (property == config->prop_crtc_x) { 846 *val = I642U64(state->crtc_x); 847 } else if (property == config->prop_crtc_y) { 848 *val = I642U64(state->crtc_y); 849 } else if (property == config->prop_crtc_w) { 850 *val = state->crtc_w; 851 } else if (property == config->prop_crtc_h) { 852 *val = state->crtc_h; 853 } else if (property == config->prop_src_x) { 854 *val = state->src_x; 855 } else if (property == config->prop_src_y) { 856 *val = state->src_y; 857 } else if (property == config->prop_src_w) { 858 *val = state->src_w; 859 } else if (property == config->prop_src_h) { 860 *val = state->src_h; 861 } else if (property == plane->alpha_property) { 862 *val = state->alpha; 863 } else if (property == plane->rotation_property) { 864 *val = state->rotation; 865 } else if (property == plane->zpos_property) { 866 *val = state->zpos; 867 } else if (property == plane->color_encoding_property) { 868 *val = state->color_encoding; 869 } else if (property == plane->color_range_property) { 870 *val = state->color_range; 871 } else if (plane->funcs->atomic_get_property) { 872 return plane->funcs->atomic_get_property(plane, state, property, val); 873 } else { 874 return -EINVAL; 875 } 876 877 return 0; 878 } 879 880 static bool 881 plane_switching_crtc(struct drm_atomic_state *state, 882 struct drm_plane *plane, 883 struct drm_plane_state *plane_state) 884 { 885 if (!plane->state->crtc || !plane_state->crtc) 886 return false; 887 888 if (plane->state->crtc == plane_state->crtc) 889 return false; 890 891 /* This could be refined, but currently there's no helper or driver code 892 * to implement direct switching of active planes nor userspace to take 893 * advantage of more direct plane switching without the intermediate 894 * full OFF state. 895 */ 896 return true; 897 } 898 899 /** 900 * drm_atomic_plane_check - check plane state 901 * @plane: plane to check 902 * @state: plane state to check 903 * 904 * Provides core sanity checks for plane state. 905 * 906 * RETURNS: 907 * Zero on success, error code on failure 908 */ 909 static int drm_atomic_plane_check(struct drm_plane *plane, 910 struct drm_plane_state *state) 911 { 912 unsigned int fb_width, fb_height; 913 int ret; 914 915 /* either *both* CRTC and FB must be set, or neither */ 916 if (state->crtc && !state->fb) { 917 DRM_DEBUG_ATOMIC("CRTC set but no FB\n"); 918 return -EINVAL; 919 } else if (state->fb && !state->crtc) { 920 DRM_DEBUG_ATOMIC("FB set but no CRTC\n"); 921 return -EINVAL; 922 } 923 924 /* if disabled, we don't care about the rest of the state: */ 925 if (!state->crtc) 926 return 0; 927 928 /* Check whether this plane is usable on this CRTC */ 929 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) { 930 DRM_DEBUG_ATOMIC("Invalid crtc for plane\n"); 931 return -EINVAL; 932 } 933 934 /* Check whether this plane supports the fb pixel format. */ 935 ret = drm_plane_check_pixel_format(plane, state->fb->format->format, 936 state->fb->modifier); 937 if (ret) { 938 struct drm_format_name_buf format_name; 939 DRM_DEBUG_ATOMIC("Invalid pixel format %s, modifier 0x%llx\n", 940 drm_get_format_name(state->fb->format->format, 941 &format_name), 942 state->fb->modifier); 943 return ret; 944 } 945 946 /* Give drivers some help against integer overflows */ 947 if (state->crtc_w > INT_MAX || 948 state->crtc_x > INT_MAX - (int32_t) state->crtc_w || 949 state->crtc_h > INT_MAX || 950 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) { 951 DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n", 952 state->crtc_w, state->crtc_h, 953 state->crtc_x, state->crtc_y); 954 return -ERANGE; 955 } 956 957 fb_width = state->fb->width << 16; 958 fb_height = state->fb->height << 16; 959 960 /* Make sure source coordinates are inside the fb. */ 961 if (state->src_w > fb_width || 962 state->src_x > fb_width - state->src_w || 963 state->src_h > fb_height || 964 state->src_y > fb_height - state->src_h) { 965 DRM_DEBUG_ATOMIC("Invalid source coordinates " 966 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", 967 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10, 968 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10, 969 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10, 970 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10, 971 state->fb->width, state->fb->height); 972 return -ENOSPC; 973 } 974 975 if (plane_switching_crtc(state->state, plane, state)) { 976 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", 977 plane->base.id, plane->name); 978 return -EINVAL; 979 } 980 981 return 0; 982 } 983 984 static void drm_atomic_plane_print_state(struct drm_printer *p, 985 const struct drm_plane_state *state) 986 { 987 struct drm_plane *plane = state->plane; 988 struct drm_rect src = drm_plane_state_src(state); 989 struct drm_rect dest = drm_plane_state_dest(state); 990 991 drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); 992 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 993 drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); 994 if (state->fb) 995 drm_framebuffer_print_info(p, 2, state->fb); 996 drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); 997 drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); 998 drm_printf(p, "\trotation=%x\n", state->rotation); 999 drm_printf(p, "\tcolor-encoding=%s\n", 1000 drm_get_color_encoding_name(state->color_encoding)); 1001 drm_printf(p, "\tcolor-range=%s\n", 1002 drm_get_color_range_name(state->color_range)); 1003 1004 if (plane->funcs->atomic_print_state) 1005 plane->funcs->atomic_print_state(p, state); 1006 } 1007 1008 /** 1009 * DOC: handling driver private state 1010 * 1011 * Very often the DRM objects exposed to userspace in the atomic modeset api 1012 * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the 1013 * underlying hardware. Especially for any kind of shared resources (e.g. shared 1014 * clocks, scaler units, bandwidth and fifo limits shared among a group of 1015 * planes or CRTCs, and so on) it makes sense to model these as independent 1016 * objects. Drivers then need to do similar state tracking and commit ordering for 1017 * such private (since not exposed to userpace) objects as the atomic core and 1018 * helpers already provide for connectors, planes and CRTCs. 1019 * 1020 * To make this easier on drivers the atomic core provides some support to track 1021 * driver private state objects using struct &drm_private_obj, with the 1022 * associated state struct &drm_private_state. 1023 * 1024 * Similar to userspace-exposed objects, private state structures can be 1025 * acquired by calling drm_atomic_get_private_obj_state(). Since this function 1026 * does not take care of locking, drivers should wrap it for each type of 1027 * private state object they have with the required call to drm_modeset_lock() 1028 * for the corresponding &drm_modeset_lock. 1029 * 1030 * All private state structures contained in a &drm_atomic_state update can be 1031 * iterated using for_each_oldnew_private_obj_in_state(), 1032 * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state(). 1033 * Drivers are recommended to wrap these for each type of driver private state 1034 * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at 1035 * least if they want to iterate over all objects of a given type. 1036 * 1037 * An earlier way to handle driver private state was by subclassing struct 1038 * &drm_atomic_state. But since that encourages non-standard ways to implement 1039 * the check/commit split atomic requires (by using e.g. "check and rollback or 1040 * commit instead" of "duplicate state, check, then either commit or release 1041 * duplicated state) it is deprecated in favour of using &drm_private_state. 1042 */ 1043 1044 /** 1045 * drm_atomic_private_obj_init - initialize private object 1046 * @obj: private object 1047 * @state: initial private object state 1048 * @funcs: pointer to the struct of function pointers that identify the object 1049 * type 1050 * 1051 * Initialize the private object, which can be embedded into any 1052 * driver private object that needs its own atomic state. 1053 */ 1054 void 1055 drm_atomic_private_obj_init(struct drm_private_obj *obj, 1056 struct drm_private_state *state, 1057 const struct drm_private_state_funcs *funcs) 1058 { 1059 memset(obj, 0, sizeof(*obj)); 1060 1061 obj->state = state; 1062 obj->funcs = funcs; 1063 } 1064 EXPORT_SYMBOL(drm_atomic_private_obj_init); 1065 1066 /** 1067 * drm_atomic_private_obj_fini - finalize private object 1068 * @obj: private object 1069 * 1070 * Finalize the private object. 1071 */ 1072 void 1073 drm_atomic_private_obj_fini(struct drm_private_obj *obj) 1074 { 1075 obj->funcs->atomic_destroy_state(obj, obj->state); 1076 } 1077 EXPORT_SYMBOL(drm_atomic_private_obj_fini); 1078 1079 /** 1080 * drm_atomic_get_private_obj_state - get private object state 1081 * @state: global atomic state 1082 * @obj: private object to get the state for 1083 * 1084 * This function returns the private object state for the given private object, 1085 * allocating the state if needed. It does not grab any locks as the caller is 1086 * expected to care of any required locking. 1087 * 1088 * RETURNS: 1089 * 1090 * Either the allocated state or the error code encoded into a pointer. 1091 */ 1092 struct drm_private_state * 1093 drm_atomic_get_private_obj_state(struct drm_atomic_state *state, 1094 struct drm_private_obj *obj) 1095 { 1096 int index, num_objs, i; 1097 size_t size; 1098 struct __drm_private_objs_state *arr; 1099 struct drm_private_state *obj_state; 1100 1101 for (i = 0; i < state->num_private_objs; i++) 1102 if (obj == state->private_objs[i].ptr) 1103 return state->private_objs[i].state; 1104 1105 num_objs = state->num_private_objs + 1; 1106 size = sizeof(*state->private_objs) * num_objs; 1107 arr = krealloc(state->private_objs, size, GFP_KERNEL); 1108 if (!arr) 1109 return ERR_PTR(-ENOMEM); 1110 1111 state->private_objs = arr; 1112 index = state->num_private_objs; 1113 memset(&state->private_objs[index], 0, sizeof(*state->private_objs)); 1114 1115 obj_state = obj->funcs->atomic_duplicate_state(obj); 1116 if (!obj_state) 1117 return ERR_PTR(-ENOMEM); 1118 1119 state->private_objs[index].state = obj_state; 1120 state->private_objs[index].old_state = obj->state; 1121 state->private_objs[index].new_state = obj_state; 1122 state->private_objs[index].ptr = obj; 1123 1124 state->num_private_objs = num_objs; 1125 1126 DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n", 1127 obj, obj_state, state); 1128 1129 return obj_state; 1130 } 1131 EXPORT_SYMBOL(drm_atomic_get_private_obj_state); 1132 1133 /** 1134 * drm_atomic_get_connector_state - get connector state 1135 * @state: global atomic state object 1136 * @connector: connector to get state object for 1137 * 1138 * This function returns the connector state for the given connector, 1139 * allocating it if needed. It will also grab the relevant connector lock to 1140 * make sure that the state is consistent. 1141 * 1142 * Returns: 1143 * 1144 * Either the allocated state or the error code encoded into the pointer. When 1145 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 1146 * entire atomic sequence must be restarted. All other errors are fatal. 1147 */ 1148 struct drm_connector_state * 1149 drm_atomic_get_connector_state(struct drm_atomic_state *state, 1150 struct drm_connector *connector) 1151 { 1152 int ret, index; 1153 struct drm_mode_config *config = &connector->dev->mode_config; 1154 struct drm_connector_state *connector_state; 1155 1156 WARN_ON(!state->acquire_ctx); 1157 1158 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1159 if (ret) 1160 return ERR_PTR(ret); 1161 1162 index = drm_connector_index(connector); 1163 1164 if (index >= state->num_connector) { 1165 struct __drm_connnectors_state *c; 1166 int alloc = max(index + 1, config->num_connector); 1167 1168 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); 1169 if (!c) 1170 return ERR_PTR(-ENOMEM); 1171 1172 state->connectors = c; 1173 memset(&state->connectors[state->num_connector], 0, 1174 sizeof(*state->connectors) * (alloc - state->num_connector)); 1175 1176 state->num_connector = alloc; 1177 } 1178 1179 if (state->connectors[index].state) 1180 return state->connectors[index].state; 1181 1182 connector_state = connector->funcs->atomic_duplicate_state(connector); 1183 if (!connector_state) 1184 return ERR_PTR(-ENOMEM); 1185 1186 drm_connector_get(connector); 1187 state->connectors[index].state = connector_state; 1188 state->connectors[index].old_state = connector->state; 1189 state->connectors[index].new_state = connector_state; 1190 state->connectors[index].ptr = connector; 1191 connector_state->state = state; 1192 1193 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n", 1194 connector->base.id, connector->name, 1195 connector_state, state); 1196 1197 if (connector_state->crtc) { 1198 struct drm_crtc_state *crtc_state; 1199 1200 crtc_state = drm_atomic_get_crtc_state(state, 1201 connector_state->crtc); 1202 if (IS_ERR(crtc_state)) 1203 return ERR_CAST(crtc_state); 1204 } 1205 1206 return connector_state; 1207 } 1208 EXPORT_SYMBOL(drm_atomic_get_connector_state); 1209 1210 /** 1211 * drm_atomic_connector_set_property - set property on connector. 1212 * @connector: the drm connector to set a property on 1213 * @state: the state object to update with the new property value 1214 * @property: the property to set 1215 * @val: the new property value 1216 * 1217 * This function handles generic/core properties and calls out to driver's 1218 * &drm_connector_funcs.atomic_set_property for driver properties. To ensure 1219 * consistent behavior you must call this function rather than the driver hook 1220 * directly. 1221 * 1222 * RETURNS: 1223 * Zero on success, error code on failure 1224 */ 1225 static int drm_atomic_connector_set_property(struct drm_connector *connector, 1226 struct drm_connector_state *state, struct drm_property *property, 1227 uint64_t val) 1228 { 1229 struct drm_device *dev = connector->dev; 1230 struct drm_mode_config *config = &dev->mode_config; 1231 1232 if (property == config->prop_crtc_id) { 1233 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); 1234 return drm_atomic_set_crtc_for_connector(state, crtc); 1235 } else if (property == config->dpms_property) { 1236 /* setting DPMS property requires special handling, which 1237 * is done in legacy setprop path for us. Disallow (for 1238 * now?) atomic writes to DPMS property: 1239 */ 1240 return -EINVAL; 1241 } else if (property == config->tv_select_subconnector_property) { 1242 state->tv.subconnector = val; 1243 } else if (property == config->tv_left_margin_property) { 1244 state->tv.margins.left = val; 1245 } else if (property == config->tv_right_margin_property) { 1246 state->tv.margins.right = val; 1247 } else if (property == config->tv_top_margin_property) { 1248 state->tv.margins.top = val; 1249 } else if (property == config->tv_bottom_margin_property) { 1250 state->tv.margins.bottom = val; 1251 } else if (property == config->tv_mode_property) { 1252 state->tv.mode = val; 1253 } else if (property == config->tv_brightness_property) { 1254 state->tv.brightness = val; 1255 } else if (property == config->tv_contrast_property) { 1256 state->tv.contrast = val; 1257 } else if (property == config->tv_flicker_reduction_property) { 1258 state->tv.flicker_reduction = val; 1259 } else if (property == config->tv_overscan_property) { 1260 state->tv.overscan = val; 1261 } else if (property == config->tv_saturation_property) { 1262 state->tv.saturation = val; 1263 } else if (property == config->tv_hue_property) { 1264 state->tv.hue = val; 1265 } else if (property == config->link_status_property) { 1266 /* Never downgrade from GOOD to BAD on userspace's request here, 1267 * only hw issues can do that. 1268 * 1269 * For an atomic property the userspace doesn't need to be able 1270 * to understand all the properties, but needs to be able to 1271 * restore the state it wants on VT switch. So if the userspace 1272 * tries to change the link_status from GOOD to BAD, driver 1273 * silently rejects it and returns a 0. This prevents userspace 1274 * from accidently breaking the display when it restores the 1275 * state. 1276 */ 1277 if (state->link_status != DRM_LINK_STATUS_GOOD) 1278 state->link_status = val; 1279 } else if (property == config->aspect_ratio_property) { 1280 state->picture_aspect_ratio = val; 1281 } else if (property == connector->scaling_mode_property) { 1282 state->scaling_mode = val; 1283 } else if (property == connector->content_protection_property) { 1284 if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1285 DRM_DEBUG_KMS("only drivers can set CP Enabled\n"); 1286 return -EINVAL; 1287 } 1288 state->content_protection = val; 1289 } else if (connector->funcs->atomic_set_property) { 1290 return connector->funcs->atomic_set_property(connector, 1291 state, property, val); 1292 } else { 1293 return -EINVAL; 1294 } 1295 1296 return 0; 1297 } 1298 1299 static void drm_atomic_connector_print_state(struct drm_printer *p, 1300 const struct drm_connector_state *state) 1301 { 1302 struct drm_connector *connector = state->connector; 1303 1304 drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); 1305 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 1306 1307 if (connector->funcs->atomic_print_state) 1308 connector->funcs->atomic_print_state(p, state); 1309 } 1310 1311 /** 1312 * drm_atomic_connector_get_property - get property value from connector state 1313 * @connector: the drm connector to set a property on 1314 * @state: the state object to get the property value from 1315 * @property: the property to set 1316 * @val: return location for the property value 1317 * 1318 * This function handles generic/core properties and calls out to driver's 1319 * &drm_connector_funcs.atomic_get_property for driver properties. To ensure 1320 * consistent behavior you must call this function rather than the driver hook 1321 * directly. 1322 * 1323 * RETURNS: 1324 * Zero on success, error code on failure 1325 */ 1326 static int 1327 drm_atomic_connector_get_property(struct drm_connector *connector, 1328 const struct drm_connector_state *state, 1329 struct drm_property *property, uint64_t *val) 1330 { 1331 struct drm_device *dev = connector->dev; 1332 struct drm_mode_config *config = &dev->mode_config; 1333 1334 if (property == config->prop_crtc_id) { 1335 *val = (state->crtc) ? state->crtc->base.id : 0; 1336 } else if (property == config->dpms_property) { 1337 *val = connector->dpms; 1338 } else if (property == config->tv_select_subconnector_property) { 1339 *val = state->tv.subconnector; 1340 } else if (property == config->tv_left_margin_property) { 1341 *val = state->tv.margins.left; 1342 } else if (property == config->tv_right_margin_property) { 1343 *val = state->tv.margins.right; 1344 } else if (property == config->tv_top_margin_property) { 1345 *val = state->tv.margins.top; 1346 } else if (property == config->tv_bottom_margin_property) { 1347 *val = state->tv.margins.bottom; 1348 } else if (property == config->tv_mode_property) { 1349 *val = state->tv.mode; 1350 } else if (property == config->tv_brightness_property) { 1351 *val = state->tv.brightness; 1352 } else if (property == config->tv_contrast_property) { 1353 *val = state->tv.contrast; 1354 } else if (property == config->tv_flicker_reduction_property) { 1355 *val = state->tv.flicker_reduction; 1356 } else if (property == config->tv_overscan_property) { 1357 *val = state->tv.overscan; 1358 } else if (property == config->tv_saturation_property) { 1359 *val = state->tv.saturation; 1360 } else if (property == config->tv_hue_property) { 1361 *val = state->tv.hue; 1362 } else if (property == config->link_status_property) { 1363 *val = state->link_status; 1364 } else if (property == config->aspect_ratio_property) { 1365 *val = state->picture_aspect_ratio; 1366 } else if (property == connector->scaling_mode_property) { 1367 *val = state->scaling_mode; 1368 } else if (property == connector->content_protection_property) { 1369 *val = state->content_protection; 1370 } else if (connector->funcs->atomic_get_property) { 1371 return connector->funcs->atomic_get_property(connector, 1372 state, property, val); 1373 } else { 1374 return -EINVAL; 1375 } 1376 1377 return 0; 1378 } 1379 1380 int drm_atomic_get_property(struct drm_mode_object *obj, 1381 struct drm_property *property, uint64_t *val) 1382 { 1383 struct drm_device *dev = property->dev; 1384 int ret; 1385 1386 switch (obj->type) { 1387 case DRM_MODE_OBJECT_CONNECTOR: { 1388 struct drm_connector *connector = obj_to_connector(obj); 1389 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 1390 ret = drm_atomic_connector_get_property(connector, 1391 connector->state, property, val); 1392 break; 1393 } 1394 case DRM_MODE_OBJECT_CRTC: { 1395 struct drm_crtc *crtc = obj_to_crtc(obj); 1396 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 1397 ret = drm_atomic_crtc_get_property(crtc, 1398 crtc->state, property, val); 1399 break; 1400 } 1401 case DRM_MODE_OBJECT_PLANE: { 1402 struct drm_plane *plane = obj_to_plane(obj); 1403 WARN_ON(!drm_modeset_is_locked(&plane->mutex)); 1404 ret = drm_atomic_plane_get_property(plane, 1405 plane->state, property, val); 1406 break; 1407 } 1408 default: 1409 ret = -EINVAL; 1410 break; 1411 } 1412 1413 return ret; 1414 } 1415 1416 /** 1417 * drm_atomic_set_crtc_for_plane - set crtc for plane 1418 * @plane_state: the plane whose incoming state to update 1419 * @crtc: crtc to use for the plane 1420 * 1421 * Changing the assigned crtc for a plane requires us to grab the lock and state 1422 * for the new crtc, as needed. This function takes care of all these details 1423 * besides updating the pointer in the state object itself. 1424 * 1425 * Returns: 1426 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1427 * then the w/w mutex code has detected a deadlock and the entire atomic 1428 * sequence must be restarted. All other errors are fatal. 1429 */ 1430 int 1431 drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, 1432 struct drm_crtc *crtc) 1433 { 1434 struct drm_plane *plane = plane_state->plane; 1435 struct drm_crtc_state *crtc_state; 1436 /* Nothing to do for same crtc*/ 1437 if (plane_state->crtc == crtc) 1438 return 0; 1439 if (plane_state->crtc) { 1440 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1441 plane_state->crtc); 1442 if (WARN_ON(IS_ERR(crtc_state))) 1443 return PTR_ERR(crtc_state); 1444 1445 crtc_state->plane_mask &= ~(1 << drm_plane_index(plane)); 1446 } 1447 1448 plane_state->crtc = crtc; 1449 1450 if (crtc) { 1451 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1452 crtc); 1453 if (IS_ERR(crtc_state)) 1454 return PTR_ERR(crtc_state); 1455 crtc_state->plane_mask |= (1 << drm_plane_index(plane)); 1456 } 1457 1458 if (crtc) 1459 DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n", 1460 plane_state, crtc->base.id, crtc->name); 1461 else 1462 DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n", 1463 plane_state); 1464 1465 return 0; 1466 } 1467 EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane); 1468 1469 /** 1470 * drm_atomic_set_fb_for_plane - set framebuffer for plane 1471 * @plane_state: atomic state object for the plane 1472 * @fb: fb to use for the plane 1473 * 1474 * Changing the assigned framebuffer for a plane requires us to grab a reference 1475 * to the new fb and drop the reference to the old fb, if there is one. This 1476 * function takes care of all these details besides updating the pointer in the 1477 * state object itself. 1478 */ 1479 void 1480 drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, 1481 struct drm_framebuffer *fb) 1482 { 1483 if (fb) 1484 DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n", 1485 fb->base.id, plane_state); 1486 else 1487 DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n", 1488 plane_state); 1489 1490 drm_framebuffer_assign(&plane_state->fb, fb); 1491 } 1492 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); 1493 1494 /** 1495 * drm_atomic_set_fence_for_plane - set fence for plane 1496 * @plane_state: atomic state object for the plane 1497 * @fence: dma_fence to use for the plane 1498 * 1499 * Helper to setup the plane_state fence in case it is not set yet. 1500 * By using this drivers doesn't need to worry if the user choose 1501 * implicit or explicit fencing. 1502 * 1503 * This function will not set the fence to the state if it was set 1504 * via explicit fencing interfaces on the atomic ioctl. In that case it will 1505 * drop the reference to the fence as we are not storing it anywhere. 1506 * Otherwise, if &drm_plane_state.fence is not set this function we just set it 1507 * with the received implicit fence. In both cases this function consumes a 1508 * reference for @fence. 1509 * 1510 * This way explicit fencing can be used to overrule implicit fencing, which is 1511 * important to make explicit fencing use-cases work: One example is using one 1512 * buffer for 2 screens with different refresh rates. Implicit fencing will 1513 * clamp rendering to the refresh rate of the slower screen, whereas explicit 1514 * fence allows 2 independent render and display loops on a single buffer. If a 1515 * driver allows obeys both implicit and explicit fences for plane updates, then 1516 * it will break all the benefits of explicit fencing. 1517 */ 1518 void 1519 drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, 1520 struct dma_fence *fence) 1521 { 1522 if (plane_state->fence) { 1523 dma_fence_put(fence); 1524 return; 1525 } 1526 1527 plane_state->fence = fence; 1528 } 1529 EXPORT_SYMBOL(drm_atomic_set_fence_for_plane); 1530 1531 /** 1532 * drm_atomic_set_crtc_for_connector - set crtc for connector 1533 * @conn_state: atomic state object for the connector 1534 * @crtc: crtc to use for the connector 1535 * 1536 * Changing the assigned crtc for a connector requires us to grab the lock and 1537 * state for the new crtc, as needed. This function takes care of all these 1538 * details besides updating the pointer in the state object itself. 1539 * 1540 * Returns: 1541 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1542 * then the w/w mutex code has detected a deadlock and the entire atomic 1543 * sequence must be restarted. All other errors are fatal. 1544 */ 1545 int 1546 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, 1547 struct drm_crtc *crtc) 1548 { 1549 struct drm_crtc_state *crtc_state; 1550 1551 if (conn_state->crtc == crtc) 1552 return 0; 1553 1554 if (conn_state->crtc) { 1555 crtc_state = drm_atomic_get_new_crtc_state(conn_state->state, 1556 conn_state->crtc); 1557 1558 crtc_state->connector_mask &= 1559 ~(1 << drm_connector_index(conn_state->connector)); 1560 1561 drm_connector_put(conn_state->connector); 1562 conn_state->crtc = NULL; 1563 } 1564 1565 if (crtc) { 1566 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); 1567 if (IS_ERR(crtc_state)) 1568 return PTR_ERR(crtc_state); 1569 1570 crtc_state->connector_mask |= 1571 1 << drm_connector_index(conn_state->connector); 1572 1573 drm_connector_get(conn_state->connector); 1574 conn_state->crtc = crtc; 1575 1576 DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n", 1577 conn_state, crtc->base.id, crtc->name); 1578 } else { 1579 DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n", 1580 conn_state); 1581 } 1582 1583 return 0; 1584 } 1585 EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector); 1586 1587 /** 1588 * drm_atomic_add_affected_connectors - add connectors for crtc 1589 * @state: atomic state 1590 * @crtc: DRM crtc 1591 * 1592 * This function walks the current configuration and adds all connectors 1593 * currently using @crtc to the atomic configuration @state. Note that this 1594 * function must acquire the connection mutex. This can potentially cause 1595 * unneeded seralization if the update is just for the planes on one crtc. Hence 1596 * drivers and helpers should only call this when really needed (e.g. when a 1597 * full modeset needs to happen due to some change). 1598 * 1599 * Returns: 1600 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1601 * then the w/w mutex code has detected a deadlock and the entire atomic 1602 * sequence must be restarted. All other errors are fatal. 1603 */ 1604 int 1605 drm_atomic_add_affected_connectors(struct drm_atomic_state *state, 1606 struct drm_crtc *crtc) 1607 { 1608 struct drm_mode_config *config = &state->dev->mode_config; 1609 struct drm_connector *connector; 1610 struct drm_connector_state *conn_state; 1611 struct drm_connector_list_iter conn_iter; 1612 struct drm_crtc_state *crtc_state; 1613 int ret; 1614 1615 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1616 if (IS_ERR(crtc_state)) 1617 return PTR_ERR(crtc_state); 1618 1619 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1620 if (ret) 1621 return ret; 1622 1623 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n", 1624 crtc->base.id, crtc->name, state); 1625 1626 /* 1627 * Changed connectors are already in @state, so only need to look 1628 * at the connector_mask in crtc_state. 1629 */ 1630 drm_connector_list_iter_begin(state->dev, &conn_iter); 1631 drm_for_each_connector_iter(connector, &conn_iter) { 1632 if (!(crtc_state->connector_mask & (1 << drm_connector_index(connector)))) 1633 continue; 1634 1635 conn_state = drm_atomic_get_connector_state(state, connector); 1636 if (IS_ERR(conn_state)) { 1637 drm_connector_list_iter_end(&conn_iter); 1638 return PTR_ERR(conn_state); 1639 } 1640 } 1641 drm_connector_list_iter_end(&conn_iter); 1642 1643 return 0; 1644 } 1645 EXPORT_SYMBOL(drm_atomic_add_affected_connectors); 1646 1647 /** 1648 * drm_atomic_add_affected_planes - add planes for crtc 1649 * @state: atomic state 1650 * @crtc: DRM crtc 1651 * 1652 * This function walks the current configuration and adds all planes 1653 * currently used by @crtc to the atomic configuration @state. This is useful 1654 * when an atomic commit also needs to check all currently enabled plane on 1655 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC 1656 * to avoid special code to force-enable all planes. 1657 * 1658 * Since acquiring a plane state will always also acquire the w/w mutex of the 1659 * current CRTC for that plane (if there is any) adding all the plane states for 1660 * a CRTC will not reduce parallism of atomic updates. 1661 * 1662 * Returns: 1663 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1664 * then the w/w mutex code has detected a deadlock and the entire atomic 1665 * sequence must be restarted. All other errors are fatal. 1666 */ 1667 int 1668 drm_atomic_add_affected_planes(struct drm_atomic_state *state, 1669 struct drm_crtc *crtc) 1670 { 1671 struct drm_plane *plane; 1672 1673 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); 1674 1675 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 1676 struct drm_plane_state *plane_state = 1677 drm_atomic_get_plane_state(state, plane); 1678 1679 if (IS_ERR(plane_state)) 1680 return PTR_ERR(plane_state); 1681 } 1682 return 0; 1683 } 1684 EXPORT_SYMBOL(drm_atomic_add_affected_planes); 1685 1686 /** 1687 * drm_atomic_check_only - check whether a given config would work 1688 * @state: atomic configuration to check 1689 * 1690 * Note that this function can return -EDEADLK if the driver needed to acquire 1691 * more locks but encountered a deadlock. The caller must then do the usual w/w 1692 * backoff dance and restart. All other errors are fatal. 1693 * 1694 * Returns: 1695 * 0 on success, negative error code on failure. 1696 */ 1697 int drm_atomic_check_only(struct drm_atomic_state *state) 1698 { 1699 struct drm_device *dev = state->dev; 1700 struct drm_mode_config *config = &dev->mode_config; 1701 struct drm_plane *plane; 1702 struct drm_plane_state *plane_state; 1703 struct drm_crtc *crtc; 1704 struct drm_crtc_state *crtc_state; 1705 int i, ret = 0; 1706 1707 DRM_DEBUG_ATOMIC("checking %p\n", state); 1708 1709 for_each_new_plane_in_state(state, plane, plane_state, i) { 1710 ret = drm_atomic_plane_check(plane, plane_state); 1711 if (ret) { 1712 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", 1713 plane->base.id, plane->name); 1714 return ret; 1715 } 1716 } 1717 1718 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1719 ret = drm_atomic_crtc_check(crtc, crtc_state); 1720 if (ret) { 1721 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", 1722 crtc->base.id, crtc->name); 1723 return ret; 1724 } 1725 } 1726 1727 if (config->funcs->atomic_check) { 1728 ret = config->funcs->atomic_check(state->dev, state); 1729 1730 if (ret) { 1731 DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n", 1732 state, ret); 1733 return ret; 1734 } 1735 } 1736 1737 if (!state->allow_modeset) { 1738 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1739 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 1740 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", 1741 crtc->base.id, crtc->name); 1742 return -EINVAL; 1743 } 1744 } 1745 } 1746 1747 return 0; 1748 } 1749 EXPORT_SYMBOL(drm_atomic_check_only); 1750 1751 /** 1752 * drm_atomic_commit - commit configuration atomically 1753 * @state: atomic configuration to check 1754 * 1755 * Note that this function can return -EDEADLK if the driver needed to acquire 1756 * more locks but encountered a deadlock. The caller must then do the usual w/w 1757 * backoff dance and restart. All other errors are fatal. 1758 * 1759 * This function will take its own reference on @state. 1760 * Callers should always release their reference with drm_atomic_state_put(). 1761 * 1762 * Returns: 1763 * 0 on success, negative error code on failure. 1764 */ 1765 int drm_atomic_commit(struct drm_atomic_state *state) 1766 { 1767 struct drm_mode_config *config = &state->dev->mode_config; 1768 int ret; 1769 1770 ret = drm_atomic_check_only(state); 1771 if (ret) 1772 return ret; 1773 1774 DRM_DEBUG_ATOMIC("committing %p\n", state); 1775 1776 return config->funcs->atomic_commit(state->dev, state, false); 1777 } 1778 EXPORT_SYMBOL(drm_atomic_commit); 1779 1780 /** 1781 * drm_atomic_nonblocking_commit - atomic nonblocking commit 1782 * @state: atomic configuration to check 1783 * 1784 * Note that this function can return -EDEADLK if the driver needed to acquire 1785 * more locks but encountered a deadlock. The caller must then do the usual w/w 1786 * backoff dance and restart. All other errors are fatal. 1787 * 1788 * This function will take its own reference on @state. 1789 * Callers should always release their reference with drm_atomic_state_put(). 1790 * 1791 * Returns: 1792 * 0 on success, negative error code on failure. 1793 */ 1794 int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) 1795 { 1796 struct drm_mode_config *config = &state->dev->mode_config; 1797 int ret; 1798 1799 ret = drm_atomic_check_only(state); 1800 if (ret) 1801 return ret; 1802 1803 DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state); 1804 1805 return config->funcs->atomic_commit(state->dev, state, true); 1806 } 1807 EXPORT_SYMBOL(drm_atomic_nonblocking_commit); 1808 1809 static void drm_atomic_print_state(const struct drm_atomic_state *state) 1810 { 1811 struct drm_printer p = drm_info_printer(state->dev->dev); 1812 struct drm_plane *plane; 1813 struct drm_plane_state *plane_state; 1814 struct drm_crtc *crtc; 1815 struct drm_crtc_state *crtc_state; 1816 struct drm_connector *connector; 1817 struct drm_connector_state *connector_state; 1818 int i; 1819 1820 DRM_DEBUG_ATOMIC("checking %p\n", state); 1821 1822 for_each_new_plane_in_state(state, plane, plane_state, i) 1823 drm_atomic_plane_print_state(&p, plane_state); 1824 1825 for_each_new_crtc_in_state(state, crtc, crtc_state, i) 1826 drm_atomic_crtc_print_state(&p, crtc_state); 1827 1828 for_each_new_connector_in_state(state, connector, connector_state, i) 1829 drm_atomic_connector_print_state(&p, connector_state); 1830 } 1831 1832 static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, 1833 bool take_locks) 1834 { 1835 struct drm_mode_config *config = &dev->mode_config; 1836 struct drm_plane *plane; 1837 struct drm_crtc *crtc; 1838 struct drm_connector *connector; 1839 struct drm_connector_list_iter conn_iter; 1840 1841 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 1842 return; 1843 1844 list_for_each_entry(plane, &config->plane_list, head) { 1845 if (take_locks) 1846 drm_modeset_lock(&plane->mutex, NULL); 1847 drm_atomic_plane_print_state(p, plane->state); 1848 if (take_locks) 1849 drm_modeset_unlock(&plane->mutex); 1850 } 1851 1852 list_for_each_entry(crtc, &config->crtc_list, head) { 1853 if (take_locks) 1854 drm_modeset_lock(&crtc->mutex, NULL); 1855 drm_atomic_crtc_print_state(p, crtc->state); 1856 if (take_locks) 1857 drm_modeset_unlock(&crtc->mutex); 1858 } 1859 1860 drm_connector_list_iter_begin(dev, &conn_iter); 1861 if (take_locks) 1862 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1863 drm_for_each_connector_iter(connector, &conn_iter) 1864 drm_atomic_connector_print_state(p, connector->state); 1865 if (take_locks) 1866 drm_modeset_unlock(&dev->mode_config.connection_mutex); 1867 drm_connector_list_iter_end(&conn_iter); 1868 } 1869 1870 /** 1871 * drm_state_dump - dump entire device atomic state 1872 * @dev: the drm device 1873 * @p: where to print the state to 1874 * 1875 * Just for debugging. Drivers might want an option to dump state 1876 * to dmesg in case of error irq's. (Hint, you probably want to 1877 * ratelimit this!) 1878 * 1879 * The caller must drm_modeset_lock_all(), or if this is called 1880 * from error irq handler, it should not be enabled by default. 1881 * (Ie. if you are debugging errors you might not care that this 1882 * is racey. But calling this without all modeset locks held is 1883 * not inherently safe.) 1884 */ 1885 void drm_state_dump(struct drm_device *dev, struct drm_printer *p) 1886 { 1887 __drm_state_dump(dev, p, false); 1888 } 1889 EXPORT_SYMBOL(drm_state_dump); 1890 1891 #ifdef CONFIG_DEBUG_FS 1892 static int drm_state_info(struct seq_file *m, void *data) 1893 { 1894 struct drm_info_node *node = (struct drm_info_node *) m->private; 1895 struct drm_device *dev = node->minor->dev; 1896 struct drm_printer p = drm_seq_file_printer(m); 1897 1898 __drm_state_dump(dev, &p, true); 1899 1900 return 0; 1901 } 1902 1903 /* any use in debugfs files to dump individual planes/crtc/etc? */ 1904 static const struct drm_info_list drm_atomic_debugfs_list[] = { 1905 {"state", drm_state_info, 0}, 1906 }; 1907 1908 int drm_atomic_debugfs_init(struct drm_minor *minor) 1909 { 1910 return drm_debugfs_create_files(drm_atomic_debugfs_list, 1911 ARRAY_SIZE(drm_atomic_debugfs_list), 1912 minor->debugfs_root, minor); 1913 } 1914 #endif 1915 1916 /* 1917 * The big monster ioctl 1918 */ 1919 1920 static struct drm_pending_vblank_event *create_vblank_event( 1921 struct drm_crtc *crtc, uint64_t user_data) 1922 { 1923 struct drm_pending_vblank_event *e = NULL; 1924 1925 e = kzalloc(sizeof *e, GFP_KERNEL); 1926 if (!e) 1927 return NULL; 1928 1929 e->event.base.type = DRM_EVENT_FLIP_COMPLETE; 1930 e->event.base.length = sizeof(e->event); 1931 e->event.vbl.crtc_id = crtc->base.id; 1932 e->event.vbl.user_data = user_data; 1933 1934 return e; 1935 } 1936 1937 int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, 1938 struct drm_connector *connector, 1939 int mode) 1940 { 1941 struct drm_connector *tmp_connector; 1942 struct drm_connector_state *new_conn_state; 1943 struct drm_crtc *crtc; 1944 struct drm_crtc_state *crtc_state; 1945 int i, ret, old_mode = connector->dpms; 1946 bool active = false; 1947 1948 ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, 1949 state->acquire_ctx); 1950 if (ret) 1951 return ret; 1952 1953 if (mode != DRM_MODE_DPMS_ON) 1954 mode = DRM_MODE_DPMS_OFF; 1955 connector->dpms = mode; 1956 1957 crtc = connector->state->crtc; 1958 if (!crtc) 1959 goto out; 1960 ret = drm_atomic_add_affected_connectors(state, crtc); 1961 if (ret) 1962 goto out; 1963 1964 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1965 if (IS_ERR(crtc_state)) { 1966 ret = PTR_ERR(crtc_state); 1967 goto out; 1968 } 1969 1970 for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) { 1971 if (new_conn_state->crtc != crtc) 1972 continue; 1973 if (tmp_connector->dpms == DRM_MODE_DPMS_ON) { 1974 active = true; 1975 break; 1976 } 1977 } 1978 1979 crtc_state->active = active; 1980 ret = drm_atomic_commit(state); 1981 out: 1982 if (ret != 0) 1983 connector->dpms = old_mode; 1984 return ret; 1985 } 1986 1987 int drm_atomic_set_property(struct drm_atomic_state *state, 1988 struct drm_mode_object *obj, 1989 struct drm_property *prop, 1990 uint64_t prop_value) 1991 { 1992 struct drm_mode_object *ref; 1993 int ret; 1994 1995 if (!drm_property_change_valid_get(prop, prop_value, &ref)) 1996 return -EINVAL; 1997 1998 switch (obj->type) { 1999 case DRM_MODE_OBJECT_CONNECTOR: { 2000 struct drm_connector *connector = obj_to_connector(obj); 2001 struct drm_connector_state *connector_state; 2002 2003 connector_state = drm_atomic_get_connector_state(state, connector); 2004 if (IS_ERR(connector_state)) { 2005 ret = PTR_ERR(connector_state); 2006 break; 2007 } 2008 2009 ret = drm_atomic_connector_set_property(connector, 2010 connector_state, prop, prop_value); 2011 break; 2012 } 2013 case DRM_MODE_OBJECT_CRTC: { 2014 struct drm_crtc *crtc = obj_to_crtc(obj); 2015 struct drm_crtc_state *crtc_state; 2016 2017 crtc_state = drm_atomic_get_crtc_state(state, crtc); 2018 if (IS_ERR(crtc_state)) { 2019 ret = PTR_ERR(crtc_state); 2020 break; 2021 } 2022 2023 ret = drm_atomic_crtc_set_property(crtc, 2024 crtc_state, prop, prop_value); 2025 break; 2026 } 2027 case DRM_MODE_OBJECT_PLANE: { 2028 struct drm_plane *plane = obj_to_plane(obj); 2029 struct drm_plane_state *plane_state; 2030 2031 plane_state = drm_atomic_get_plane_state(state, plane); 2032 if (IS_ERR(plane_state)) { 2033 ret = PTR_ERR(plane_state); 2034 break; 2035 } 2036 2037 ret = drm_atomic_plane_set_property(plane, 2038 plane_state, prop, prop_value); 2039 break; 2040 } 2041 default: 2042 ret = -EINVAL; 2043 break; 2044 } 2045 2046 drm_property_change_valid_put(prop, ref); 2047 return ret; 2048 } 2049 2050 /** 2051 * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers. 2052 * 2053 * @dev: drm device to check. 2054 * @plane_mask: plane mask for planes that were updated. 2055 * @ret: return value, can be -EDEADLK for a retry. 2056 * 2057 * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before 2058 * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This 2059 * is a common operation for each atomic update, so this call is split off as a 2060 * helper. 2061 */ 2062 void drm_atomic_clean_old_fb(struct drm_device *dev, 2063 unsigned plane_mask, 2064 int ret) 2065 { 2066 struct drm_plane *plane; 2067 2068 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping 2069 * locks (ie. while it is still safe to deref plane->state). We 2070 * need to do this here because the driver entry points cannot 2071 * distinguish between legacy and atomic ioctls. 2072 */ 2073 drm_for_each_plane_mask(plane, dev, plane_mask) { 2074 if (ret == 0) { 2075 struct drm_framebuffer *new_fb = plane->state->fb; 2076 if (new_fb) 2077 drm_framebuffer_get(new_fb); 2078 plane->fb = new_fb; 2079 plane->crtc = plane->state->crtc; 2080 2081 if (plane->old_fb) 2082 drm_framebuffer_put(plane->old_fb); 2083 } 2084 plane->old_fb = NULL; 2085 } 2086 } 2087 EXPORT_SYMBOL(drm_atomic_clean_old_fb); 2088 2089 /** 2090 * DOC: explicit fencing properties 2091 * 2092 * Explicit fencing allows userspace to control the buffer synchronization 2093 * between devices. A Fence or a group of fences are transfered to/from 2094 * userspace using Sync File fds and there are two DRM properties for that. 2095 * IN_FENCE_FD on each DRM Plane to send fences to the kernel and 2096 * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel. 2097 * 2098 * As a contrast, with implicit fencing the kernel keeps track of any 2099 * ongoing rendering, and automatically ensures that the atomic update waits 2100 * for any pending rendering to complete. For shared buffers represented with 2101 * a &struct dma_buf this is tracked in &struct reservation_object. 2102 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), 2103 * whereas explicit fencing is what Android wants. 2104 * 2105 * "IN_FENCE_FD”: 2106 * Use this property to pass a fence that DRM should wait on before 2107 * proceeding with the Atomic Commit request and show the framebuffer for 2108 * the plane on the screen. The fence can be either a normal fence or a 2109 * merged one, the sync_file framework will handle both cases and use a 2110 * fence_array if a merged fence is received. Passing -1 here means no 2111 * fences to wait on. 2112 * 2113 * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag 2114 * it will only check if the Sync File is a valid one. 2115 * 2116 * On the driver side the fence is stored on the @fence parameter of 2117 * &struct drm_plane_state. Drivers which also support implicit fencing 2118 * should set the implicit fence using drm_atomic_set_fence_for_plane(), 2119 * to make sure there's consistent behaviour between drivers in precedence 2120 * of implicit vs. explicit fencing. 2121 * 2122 * "OUT_FENCE_PTR”: 2123 * Use this property to pass a file descriptor pointer to DRM. Once the 2124 * Atomic Commit request call returns OUT_FENCE_PTR will be filled with 2125 * the file descriptor number of a Sync File. This Sync File contains the 2126 * CRTC fence that will be signaled when all framebuffers present on the 2127 * Atomic Commit * request for that given CRTC are scanned out on the 2128 * screen. 2129 * 2130 * The Atomic Commit request fails if a invalid pointer is passed. If the 2131 * Atomic Commit request fails for any other reason the out fence fd 2132 * returned will be -1. On a Atomic Commit with the 2133 * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1. 2134 * 2135 * Note that out-fences don't have a special interface to drivers and are 2136 * internally represented by a &struct drm_pending_vblank_event in struct 2137 * &drm_crtc_state, which is also used by the nonblocking atomic commit 2138 * helpers and for the DRM event handling for existing userspace. 2139 */ 2140 2141 struct drm_out_fence_state { 2142 s32 __user *out_fence_ptr; 2143 struct sync_file *sync_file; 2144 int fd; 2145 }; 2146 2147 static int setup_out_fence(struct drm_out_fence_state *fence_state, 2148 struct dma_fence *fence) 2149 { 2150 fence_state->fd = get_unused_fd_flags(O_CLOEXEC); 2151 if (fence_state->fd < 0) 2152 return fence_state->fd; 2153 2154 if (put_user(fence_state->fd, fence_state->out_fence_ptr)) 2155 return -EFAULT; 2156 2157 fence_state->sync_file = sync_file_create(fence); 2158 if (!fence_state->sync_file) 2159 return -ENOMEM; 2160 2161 return 0; 2162 } 2163 2164 static int prepare_crtc_signaling(struct drm_device *dev, 2165 struct drm_atomic_state *state, 2166 struct drm_mode_atomic *arg, 2167 struct drm_file *file_priv, 2168 struct drm_out_fence_state **fence_state, 2169 unsigned int *num_fences) 2170 { 2171 struct drm_crtc *crtc; 2172 struct drm_crtc_state *crtc_state; 2173 int i, c = 0, ret; 2174 2175 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) 2176 return 0; 2177 2178 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2179 s32 __user *fence_ptr; 2180 2181 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); 2182 2183 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) { 2184 struct drm_pending_vblank_event *e; 2185 2186 e = create_vblank_event(crtc, arg->user_data); 2187 if (!e) 2188 return -ENOMEM; 2189 2190 crtc_state->event = e; 2191 } 2192 2193 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { 2194 struct drm_pending_vblank_event *e = crtc_state->event; 2195 2196 if (!file_priv) 2197 continue; 2198 2199 ret = drm_event_reserve_init(dev, file_priv, &e->base, 2200 &e->event.base); 2201 if (ret) { 2202 kfree(e); 2203 crtc_state->event = NULL; 2204 return ret; 2205 } 2206 } 2207 2208 if (fence_ptr) { 2209 struct dma_fence *fence; 2210 struct drm_out_fence_state *f; 2211 2212 f = krealloc(*fence_state, sizeof(**fence_state) * 2213 (*num_fences + 1), GFP_KERNEL); 2214 if (!f) 2215 return -ENOMEM; 2216 2217 memset(&f[*num_fences], 0, sizeof(*f)); 2218 2219 f[*num_fences].out_fence_ptr = fence_ptr; 2220 *fence_state = f; 2221 2222 fence = drm_crtc_create_fence(crtc); 2223 if (!fence) 2224 return -ENOMEM; 2225 2226 ret = setup_out_fence(&f[(*num_fences)++], fence); 2227 if (ret) { 2228 dma_fence_put(fence); 2229 return ret; 2230 } 2231 2232 crtc_state->event->base.fence = fence; 2233 } 2234 2235 c++; 2236 } 2237 2238 /* 2239 * Having this flag means user mode pends on event which will never 2240 * reach due to lack of at least one CRTC for signaling 2241 */ 2242 if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 2243 return -EINVAL; 2244 2245 return 0; 2246 } 2247 2248 static void complete_crtc_signaling(struct drm_device *dev, 2249 struct drm_atomic_state *state, 2250 struct drm_out_fence_state *fence_state, 2251 unsigned int num_fences, 2252 bool install_fds) 2253 { 2254 struct drm_crtc *crtc; 2255 struct drm_crtc_state *crtc_state; 2256 int i; 2257 2258 if (install_fds) { 2259 for (i = 0; i < num_fences; i++) 2260 fd_install(fence_state[i].fd, 2261 fence_state[i].sync_file->file); 2262 2263 kfree(fence_state); 2264 return; 2265 } 2266 2267 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2268 struct drm_pending_vblank_event *event = crtc_state->event; 2269 /* 2270 * Free the allocated event. drm_atomic_helper_setup_commit 2271 * can allocate an event too, so only free it if it's ours 2272 * to prevent a double free in drm_atomic_state_clear. 2273 */ 2274 if (event && (event->base.fence || event->base.file_priv)) { 2275 drm_event_cancel_free(dev, &event->base); 2276 crtc_state->event = NULL; 2277 } 2278 } 2279 2280 if (!fence_state) 2281 return; 2282 2283 for (i = 0; i < num_fences; i++) { 2284 if (fence_state[i].sync_file) 2285 fput(fence_state[i].sync_file->file); 2286 if (fence_state[i].fd >= 0) 2287 put_unused_fd(fence_state[i].fd); 2288 2289 /* If this fails log error to the user */ 2290 if (fence_state[i].out_fence_ptr && 2291 put_user(-1, fence_state[i].out_fence_ptr)) 2292 DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n"); 2293 } 2294 2295 kfree(fence_state); 2296 } 2297 2298 int drm_mode_atomic_ioctl(struct drm_device *dev, 2299 void *data, struct drm_file *file_priv) 2300 { 2301 struct drm_mode_atomic *arg = data; 2302 uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr); 2303 uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr); 2304 uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr); 2305 uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr); 2306 unsigned int copied_objs, copied_props; 2307 struct drm_atomic_state *state; 2308 struct drm_modeset_acquire_ctx ctx; 2309 struct drm_plane *plane; 2310 struct drm_out_fence_state *fence_state; 2311 unsigned plane_mask; 2312 int ret = 0; 2313 unsigned int i, j, num_fences; 2314 2315 /* disallow for drivers not supporting atomic: */ 2316 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 2317 return -EINVAL; 2318 2319 /* disallow for userspace that has not enabled atomic cap (even 2320 * though this may be a bit overkill, since legacy userspace 2321 * wouldn't know how to call this ioctl) 2322 */ 2323 if (!file_priv->atomic) 2324 return -EINVAL; 2325 2326 if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) 2327 return -EINVAL; 2328 2329 if (arg->reserved) 2330 return -EINVAL; 2331 2332 if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) && 2333 !dev->mode_config.async_page_flip) 2334 return -EINVAL; 2335 2336 /* can't test and expect an event at the same time. */ 2337 if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) && 2338 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 2339 return -EINVAL; 2340 2341 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2342 2343 state = drm_atomic_state_alloc(dev); 2344 if (!state) 2345 return -ENOMEM; 2346 2347 state->acquire_ctx = &ctx; 2348 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 2349 2350 retry: 2351 plane_mask = 0; 2352 copied_objs = 0; 2353 copied_props = 0; 2354 fence_state = NULL; 2355 num_fences = 0; 2356 2357 for (i = 0; i < arg->count_objs; i++) { 2358 uint32_t obj_id, count_props; 2359 struct drm_mode_object *obj; 2360 2361 if (get_user(obj_id, objs_ptr + copied_objs)) { 2362 ret = -EFAULT; 2363 goto out; 2364 } 2365 2366 obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY); 2367 if (!obj) { 2368 ret = -ENOENT; 2369 goto out; 2370 } 2371 2372 if (!obj->properties) { 2373 drm_mode_object_put(obj); 2374 ret = -ENOENT; 2375 goto out; 2376 } 2377 2378 if (get_user(count_props, count_props_ptr + copied_objs)) { 2379 drm_mode_object_put(obj); 2380 ret = -EFAULT; 2381 goto out; 2382 } 2383 2384 copied_objs++; 2385 2386 for (j = 0; j < count_props; j++) { 2387 uint32_t prop_id; 2388 uint64_t prop_value; 2389 struct drm_property *prop; 2390 2391 if (get_user(prop_id, props_ptr + copied_props)) { 2392 drm_mode_object_put(obj); 2393 ret = -EFAULT; 2394 goto out; 2395 } 2396 2397 prop = drm_mode_obj_find_prop_id(obj, prop_id); 2398 if (!prop) { 2399 drm_mode_object_put(obj); 2400 ret = -ENOENT; 2401 goto out; 2402 } 2403 2404 if (copy_from_user(&prop_value, 2405 prop_values_ptr + copied_props, 2406 sizeof(prop_value))) { 2407 drm_mode_object_put(obj); 2408 ret = -EFAULT; 2409 goto out; 2410 } 2411 2412 ret = drm_atomic_set_property(state, obj, prop, 2413 prop_value); 2414 if (ret) { 2415 drm_mode_object_put(obj); 2416 goto out; 2417 } 2418 2419 copied_props++; 2420 } 2421 2422 if (obj->type == DRM_MODE_OBJECT_PLANE && count_props && 2423 !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) { 2424 plane = obj_to_plane(obj); 2425 plane_mask |= (1 << drm_plane_index(plane)); 2426 plane->old_fb = plane->fb; 2427 } 2428 drm_mode_object_put(obj); 2429 } 2430 2431 ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state, 2432 &num_fences); 2433 if (ret) 2434 goto out; 2435 2436 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { 2437 ret = drm_atomic_check_only(state); 2438 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { 2439 ret = drm_atomic_nonblocking_commit(state); 2440 } else { 2441 if (unlikely(drm_debug & DRM_UT_STATE)) 2442 drm_atomic_print_state(state); 2443 2444 ret = drm_atomic_commit(state); 2445 } 2446 2447 out: 2448 drm_atomic_clean_old_fb(dev, plane_mask, ret); 2449 2450 complete_crtc_signaling(dev, state, fence_state, num_fences, !ret); 2451 2452 if (ret == -EDEADLK) { 2453 drm_atomic_state_clear(state); 2454 ret = drm_modeset_backoff(&ctx); 2455 if (!ret) 2456 goto retry; 2457 } 2458 2459 drm_atomic_state_put(state); 2460 2461 drm_modeset_drop_locks(&ctx); 2462 drm_modeset_acquire_fini(&ctx); 2463 2464 return ret; 2465 } 2466