1 /* 2 * Copyright (C) 2014 Red Hat 3 * Copyright (C) 2014 Intel Corp. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robdclark@gmail.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 */ 27 28 29 #include <drm/drmP.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_mode.h> 32 #include <drm/drm_print.h> 33 #include <linux/sync_file.h> 34 35 #include "drm_crtc_internal.h" 36 #include "drm_internal.h" 37 38 void __drm_crtc_commit_free(struct kref *kref) 39 { 40 struct drm_crtc_commit *commit = 41 container_of(kref, struct drm_crtc_commit, ref); 42 43 kfree(commit); 44 } 45 EXPORT_SYMBOL(__drm_crtc_commit_free); 46 47 /** 48 * drm_atomic_state_default_release - 49 * release memory initialized by drm_atomic_state_init 50 * @state: atomic state 51 * 52 * Free all the memory allocated by drm_atomic_state_init. 53 * This should only be used by drivers which are still subclassing 54 * &drm_atomic_state and haven't switched to &drm_private_state yet. 55 */ 56 void drm_atomic_state_default_release(struct drm_atomic_state *state) 57 { 58 kfree(state->connectors); 59 kfree(state->crtcs); 60 kfree(state->planes); 61 kfree(state->private_objs); 62 } 63 EXPORT_SYMBOL(drm_atomic_state_default_release); 64 65 /** 66 * drm_atomic_state_init - init new atomic state 67 * @dev: DRM device 68 * @state: atomic state 69 * 70 * Default implementation for filling in a new atomic state. 71 * This should only be used by drivers which are still subclassing 72 * &drm_atomic_state and haven't switched to &drm_private_state yet. 73 */ 74 int 75 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) 76 { 77 kref_init(&state->ref); 78 79 /* TODO legacy paths should maybe do a better job about 80 * setting this appropriately? 81 */ 82 state->allow_modeset = true; 83 84 state->crtcs = kcalloc(dev->mode_config.num_crtc, 85 sizeof(*state->crtcs), GFP_KERNEL); 86 if (!state->crtcs) 87 goto fail; 88 state->planes = kcalloc(dev->mode_config.num_total_plane, 89 sizeof(*state->planes), GFP_KERNEL); 90 if (!state->planes) 91 goto fail; 92 93 state->dev = dev; 94 95 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state); 96 97 return 0; 98 fail: 99 drm_atomic_state_default_release(state); 100 return -ENOMEM; 101 } 102 EXPORT_SYMBOL(drm_atomic_state_init); 103 104 /** 105 * drm_atomic_state_alloc - allocate atomic state 106 * @dev: DRM device 107 * 108 * This allocates an empty atomic state to track updates. 109 */ 110 struct drm_atomic_state * 111 drm_atomic_state_alloc(struct drm_device *dev) 112 { 113 struct drm_mode_config *config = &dev->mode_config; 114 115 if (!config->funcs->atomic_state_alloc) { 116 struct drm_atomic_state *state; 117 118 state = kzalloc(sizeof(*state), GFP_KERNEL); 119 if (!state) 120 return NULL; 121 if (drm_atomic_state_init(dev, state) < 0) { 122 kfree(state); 123 return NULL; 124 } 125 return state; 126 } 127 128 return config->funcs->atomic_state_alloc(dev); 129 } 130 EXPORT_SYMBOL(drm_atomic_state_alloc); 131 132 /** 133 * drm_atomic_state_default_clear - clear base atomic state 134 * @state: atomic state 135 * 136 * Default implementation for clearing atomic state. 137 * This should only be used by drivers which are still subclassing 138 * &drm_atomic_state and haven't switched to &drm_private_state yet. 139 */ 140 void drm_atomic_state_default_clear(struct drm_atomic_state *state) 141 { 142 struct drm_device *dev = state->dev; 143 struct drm_mode_config *config = &dev->mode_config; 144 int i; 145 146 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); 147 148 for (i = 0; i < state->num_connector; i++) { 149 struct drm_connector *connector = state->connectors[i].ptr; 150 151 if (!connector) 152 continue; 153 154 connector->funcs->atomic_destroy_state(connector, 155 state->connectors[i].state); 156 state->connectors[i].ptr = NULL; 157 state->connectors[i].state = NULL; 158 state->connectors[i].old_state = NULL; 159 state->connectors[i].new_state = NULL; 160 drm_connector_put(connector); 161 } 162 163 for (i = 0; i < config->num_crtc; i++) { 164 struct drm_crtc *crtc = state->crtcs[i].ptr; 165 166 if (!crtc) 167 continue; 168 169 crtc->funcs->atomic_destroy_state(crtc, 170 state->crtcs[i].state); 171 172 state->crtcs[i].ptr = NULL; 173 state->crtcs[i].state = NULL; 174 state->crtcs[i].old_state = NULL; 175 state->crtcs[i].new_state = NULL; 176 } 177 178 for (i = 0; i < config->num_total_plane; i++) { 179 struct drm_plane *plane = state->planes[i].ptr; 180 181 if (!plane) 182 continue; 183 184 plane->funcs->atomic_destroy_state(plane, 185 state->planes[i].state); 186 state->planes[i].ptr = NULL; 187 state->planes[i].state = NULL; 188 state->planes[i].old_state = NULL; 189 state->planes[i].new_state = NULL; 190 } 191 192 for (i = 0; i < state->num_private_objs; i++) { 193 struct drm_private_obj *obj = state->private_objs[i].ptr; 194 195 obj->funcs->atomic_destroy_state(obj, 196 state->private_objs[i].state); 197 state->private_objs[i].ptr = NULL; 198 state->private_objs[i].state = NULL; 199 state->private_objs[i].old_state = NULL; 200 state->private_objs[i].new_state = NULL; 201 } 202 state->num_private_objs = 0; 203 204 if (state->fake_commit) { 205 drm_crtc_commit_put(state->fake_commit); 206 state->fake_commit = NULL; 207 } 208 } 209 EXPORT_SYMBOL(drm_atomic_state_default_clear); 210 211 /** 212 * drm_atomic_state_clear - clear state object 213 * @state: atomic state 214 * 215 * When the w/w mutex algorithm detects a deadlock we need to back off and drop 216 * all locks. So someone else could sneak in and change the current modeset 217 * configuration. Which means that all the state assembled in @state is no 218 * longer an atomic update to the current state, but to some arbitrary earlier 219 * state. Which could break assumptions the driver's 220 * &drm_mode_config_funcs.atomic_check likely relies on. 221 * 222 * Hence we must clear all cached state and completely start over, using this 223 * function. 224 */ 225 void drm_atomic_state_clear(struct drm_atomic_state *state) 226 { 227 struct drm_device *dev = state->dev; 228 struct drm_mode_config *config = &dev->mode_config; 229 230 if (config->funcs->atomic_state_clear) 231 config->funcs->atomic_state_clear(state); 232 else 233 drm_atomic_state_default_clear(state); 234 } 235 EXPORT_SYMBOL(drm_atomic_state_clear); 236 237 /** 238 * __drm_atomic_state_free - free all memory for an atomic state 239 * @ref: This atomic state to deallocate 240 * 241 * This frees all memory associated with an atomic state, including all the 242 * per-object state for planes, crtcs and connectors. 243 */ 244 void __drm_atomic_state_free(struct kref *ref) 245 { 246 struct drm_atomic_state *state = container_of(ref, typeof(*state), ref); 247 struct drm_mode_config *config = &state->dev->mode_config; 248 249 drm_atomic_state_clear(state); 250 251 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); 252 253 if (config->funcs->atomic_state_free) { 254 config->funcs->atomic_state_free(state); 255 } else { 256 drm_atomic_state_default_release(state); 257 kfree(state); 258 } 259 } 260 EXPORT_SYMBOL(__drm_atomic_state_free); 261 262 /** 263 * drm_atomic_get_crtc_state - get crtc state 264 * @state: global atomic state object 265 * @crtc: crtc to get state object for 266 * 267 * This function returns the crtc state for the given crtc, allocating it if 268 * needed. It will also grab the relevant crtc lock to make sure that the state 269 * is consistent. 270 * 271 * Returns: 272 * 273 * Either the allocated state or the error code encoded into the pointer. When 274 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 275 * entire atomic sequence must be restarted. All other errors are fatal. 276 */ 277 struct drm_crtc_state * 278 drm_atomic_get_crtc_state(struct drm_atomic_state *state, 279 struct drm_crtc *crtc) 280 { 281 int ret, index = drm_crtc_index(crtc); 282 struct drm_crtc_state *crtc_state; 283 284 WARN_ON(!state->acquire_ctx); 285 286 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); 287 if (crtc_state) 288 return crtc_state; 289 290 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); 291 if (ret) 292 return ERR_PTR(ret); 293 294 crtc_state = crtc->funcs->atomic_duplicate_state(crtc); 295 if (!crtc_state) 296 return ERR_PTR(-ENOMEM); 297 298 state->crtcs[index].state = crtc_state; 299 state->crtcs[index].old_state = crtc->state; 300 state->crtcs[index].new_state = crtc_state; 301 state->crtcs[index].ptr = crtc; 302 crtc_state->state = state; 303 304 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", 305 crtc->base.id, crtc->name, crtc_state, state); 306 307 return crtc_state; 308 } 309 EXPORT_SYMBOL(drm_atomic_get_crtc_state); 310 311 static void set_out_fence_for_crtc(struct drm_atomic_state *state, 312 struct drm_crtc *crtc, s32 __user *fence_ptr) 313 { 314 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; 315 } 316 317 static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, 318 struct drm_crtc *crtc) 319 { 320 s32 __user *fence_ptr; 321 322 fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; 323 state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; 324 325 return fence_ptr; 326 } 327 328 /** 329 * drm_atomic_set_mode_for_crtc - set mode for CRTC 330 * @state: the CRTC whose incoming state to update 331 * @mode: kernel-internal mode to use for the CRTC, or NULL to disable 332 * 333 * Set a mode (originating from the kernel) on the desired CRTC state and update 334 * the enable property. 335 * 336 * RETURNS: 337 * Zero on success, error code on failure. Cannot return -EDEADLK. 338 */ 339 int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, 340 const struct drm_display_mode *mode) 341 { 342 struct drm_mode_modeinfo umode; 343 344 /* Early return for no change. */ 345 if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0) 346 return 0; 347 348 drm_property_blob_put(state->mode_blob); 349 state->mode_blob = NULL; 350 351 if (mode) { 352 drm_mode_convert_to_umode(&umode, mode); 353 state->mode_blob = 354 drm_property_create_blob(state->crtc->dev, 355 sizeof(umode), 356 &umode); 357 if (IS_ERR(state->mode_blob)) 358 return PTR_ERR(state->mode_blob); 359 360 drm_mode_copy(&state->mode, mode); 361 state->enable = true; 362 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", 363 mode->name, state); 364 } else { 365 memset(&state->mode, 0, sizeof(state->mode)); 366 state->enable = false; 367 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", 368 state); 369 } 370 371 return 0; 372 } 373 EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc); 374 375 /** 376 * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC 377 * @state: the CRTC whose incoming state to update 378 * @blob: pointer to blob property to use for mode 379 * 380 * Set a mode (originating from a blob property) on the desired CRTC state. 381 * This function will take a reference on the blob property for the CRTC state, 382 * and release the reference held on the state's existing mode property, if any 383 * was set. 384 * 385 * RETURNS: 386 * Zero on success, error code on failure. Cannot return -EDEADLK. 387 */ 388 int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, 389 struct drm_property_blob *blob) 390 { 391 if (blob == state->mode_blob) 392 return 0; 393 394 drm_property_blob_put(state->mode_blob); 395 state->mode_blob = NULL; 396 397 memset(&state->mode, 0, sizeof(state->mode)); 398 399 if (blob) { 400 if (blob->length != sizeof(struct drm_mode_modeinfo) || 401 drm_mode_convert_umode(state->crtc->dev, &state->mode, 402 blob->data)) 403 return -EINVAL; 404 405 state->mode_blob = drm_property_blob_get(blob); 406 state->enable = true; 407 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", 408 state->mode.name, state); 409 } else { 410 state->enable = false; 411 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", 412 state); 413 } 414 415 return 0; 416 } 417 EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); 418 419 /** 420 * drm_atomic_replace_property_blob_from_id - lookup the new blob and replace the old one with it 421 * @dev: DRM device 422 * @blob: a pointer to the member blob to be replaced 423 * @blob_id: ID of the new blob 424 * @expected_size: total expected size of the blob data (in bytes) 425 * @expected_elem_size: expected element size of the blob data (in bytes) 426 * @replaced: did the blob get replaced? 427 * 428 * Replace @blob with another blob with the ID @blob_id. If @blob_id is zero 429 * @blob becomes NULL. 430 * 431 * If @expected_size is positive the new blob length is expected to be equal 432 * to @expected_size bytes. If @expected_elem_size is positive the new blob 433 * length is expected to be a multiple of @expected_elem_size bytes. Otherwise 434 * an error is returned. 435 * 436 * @replaced will indicate to the caller whether the blob was replaced or not. 437 * If the old and new blobs were in fact the same blob @replaced will be false 438 * otherwise it will be true. 439 * 440 * RETURNS: 441 * Zero on success, error code on failure. 442 */ 443 static int 444 drm_atomic_replace_property_blob_from_id(struct drm_device *dev, 445 struct drm_property_blob **blob, 446 uint64_t blob_id, 447 ssize_t expected_size, 448 ssize_t expected_elem_size, 449 bool *replaced) 450 { 451 struct drm_property_blob *new_blob = NULL; 452 453 if (blob_id != 0) { 454 new_blob = drm_property_lookup_blob(dev, blob_id); 455 if (new_blob == NULL) 456 return -EINVAL; 457 458 if (expected_size > 0 && 459 new_blob->length != expected_size) { 460 drm_property_blob_put(new_blob); 461 return -EINVAL; 462 } 463 if (expected_elem_size > 0 && 464 new_blob->length % expected_elem_size != 0) { 465 drm_property_blob_put(new_blob); 466 return -EINVAL; 467 } 468 } 469 470 *replaced |= drm_property_replace_blob(blob, new_blob); 471 drm_property_blob_put(new_blob); 472 473 return 0; 474 } 475 476 /** 477 * drm_atomic_crtc_set_property - set property on CRTC 478 * @crtc: the drm CRTC to set a property on 479 * @state: the state object to update with the new property value 480 * @property: the property to set 481 * @val: the new property value 482 * 483 * This function handles generic/core properties and calls out to driver's 484 * &drm_crtc_funcs.atomic_set_property for driver properties. To ensure 485 * consistent behavior you must call this function rather than the driver hook 486 * directly. 487 * 488 * RETURNS: 489 * Zero on success, error code on failure 490 */ 491 int drm_atomic_crtc_set_property(struct drm_crtc *crtc, 492 struct drm_crtc_state *state, struct drm_property *property, 493 uint64_t val) 494 { 495 struct drm_device *dev = crtc->dev; 496 struct drm_mode_config *config = &dev->mode_config; 497 bool replaced = false; 498 int ret; 499 500 if (property == config->prop_active) 501 state->active = val; 502 else if (property == config->prop_mode_id) { 503 struct drm_property_blob *mode = 504 drm_property_lookup_blob(dev, val); 505 ret = drm_atomic_set_mode_prop_for_crtc(state, mode); 506 drm_property_blob_put(mode); 507 return ret; 508 } else if (property == config->degamma_lut_property) { 509 ret = drm_atomic_replace_property_blob_from_id(dev, 510 &state->degamma_lut, 511 val, 512 -1, sizeof(struct drm_color_lut), 513 &replaced); 514 state->color_mgmt_changed |= replaced; 515 return ret; 516 } else if (property == config->ctm_property) { 517 ret = drm_atomic_replace_property_blob_from_id(dev, 518 &state->ctm, 519 val, 520 sizeof(struct drm_color_ctm), -1, 521 &replaced); 522 state->color_mgmt_changed |= replaced; 523 return ret; 524 } else if (property == config->gamma_lut_property) { 525 ret = drm_atomic_replace_property_blob_from_id(dev, 526 &state->gamma_lut, 527 val, 528 -1, sizeof(struct drm_color_lut), 529 &replaced); 530 state->color_mgmt_changed |= replaced; 531 return ret; 532 } else if (property == config->prop_out_fence_ptr) { 533 s32 __user *fence_ptr = u64_to_user_ptr(val); 534 535 if (!fence_ptr) 536 return 0; 537 538 if (put_user(-1, fence_ptr)) 539 return -EFAULT; 540 541 set_out_fence_for_crtc(state->state, crtc, fence_ptr); 542 } else if (crtc->funcs->atomic_set_property) 543 return crtc->funcs->atomic_set_property(crtc, state, property, val); 544 else 545 return -EINVAL; 546 547 return 0; 548 } 549 EXPORT_SYMBOL(drm_atomic_crtc_set_property); 550 551 /** 552 * drm_atomic_crtc_get_property - get property value from CRTC state 553 * @crtc: the drm CRTC to set a property on 554 * @state: the state object to get the property value from 555 * @property: the property to set 556 * @val: return location for the property value 557 * 558 * This function handles generic/core properties and calls out to driver's 559 * &drm_crtc_funcs.atomic_get_property for driver properties. To ensure 560 * consistent behavior you must call this function rather than the driver hook 561 * directly. 562 * 563 * RETURNS: 564 * Zero on success, error code on failure 565 */ 566 static int 567 drm_atomic_crtc_get_property(struct drm_crtc *crtc, 568 const struct drm_crtc_state *state, 569 struct drm_property *property, uint64_t *val) 570 { 571 struct drm_device *dev = crtc->dev; 572 struct drm_mode_config *config = &dev->mode_config; 573 574 if (property == config->prop_active) 575 *val = state->active; 576 else if (property == config->prop_mode_id) 577 *val = (state->mode_blob) ? state->mode_blob->base.id : 0; 578 else if (property == config->degamma_lut_property) 579 *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; 580 else if (property == config->ctm_property) 581 *val = (state->ctm) ? state->ctm->base.id : 0; 582 else if (property == config->gamma_lut_property) 583 *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; 584 else if (property == config->prop_out_fence_ptr) 585 *val = 0; 586 else if (crtc->funcs->atomic_get_property) 587 return crtc->funcs->atomic_get_property(crtc, state, property, val); 588 else 589 return -EINVAL; 590 591 return 0; 592 } 593 594 /** 595 * drm_atomic_crtc_check - check crtc state 596 * @crtc: crtc to check 597 * @state: crtc state to check 598 * 599 * Provides core sanity checks for crtc state. 600 * 601 * RETURNS: 602 * Zero on success, error code on failure 603 */ 604 static int drm_atomic_crtc_check(struct drm_crtc *crtc, 605 struct drm_crtc_state *state) 606 { 607 /* NOTE: we explicitly don't enforce constraints such as primary 608 * layer covering entire screen, since that is something we want 609 * to allow (on hw that supports it). For hw that does not, it 610 * should be checked in driver's crtc->atomic_check() vfunc. 611 * 612 * TODO: Add generic modeset state checks once we support those. 613 */ 614 615 if (state->active && !state->enable) { 616 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", 617 crtc->base.id, crtc->name); 618 return -EINVAL; 619 } 620 621 /* The state->enable vs. state->mode_blob checks can be WARN_ON, 622 * as this is a kernel-internal detail that userspace should never 623 * be able to trigger. */ 624 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 625 WARN_ON(state->enable && !state->mode_blob)) { 626 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", 627 crtc->base.id, crtc->name); 628 return -EINVAL; 629 } 630 631 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 632 WARN_ON(!state->enable && state->mode_blob)) { 633 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", 634 crtc->base.id, crtc->name); 635 return -EINVAL; 636 } 637 638 /* 639 * Reject event generation for when a CRTC is off and stays off. 640 * It wouldn't be hard to implement this, but userspace has a track 641 * record of happily burning through 100% cpu (or worse, crash) when the 642 * display pipe is suspended. To avoid all that fun just reject updates 643 * that ask for events since likely that indicates a bug in the 644 * compositor's drawing loop. This is consistent with the vblank IOCTL 645 * and legacy page_flip IOCTL which also reject service on a disabled 646 * pipe. 647 */ 648 if (state->event && !state->active && !crtc->state->active) { 649 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n", 650 crtc->base.id, crtc->name); 651 return -EINVAL; 652 } 653 654 return 0; 655 } 656 657 static void drm_atomic_crtc_print_state(struct drm_printer *p, 658 const struct drm_crtc_state *state) 659 { 660 struct drm_crtc *crtc = state->crtc; 661 662 drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name); 663 drm_printf(p, "\tenable=%d\n", state->enable); 664 drm_printf(p, "\tactive=%d\n", state->active); 665 drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed); 666 drm_printf(p, "\tmode_changed=%d\n", state->mode_changed); 667 drm_printf(p, "\tactive_changed=%d\n", state->active_changed); 668 drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed); 669 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); 670 drm_printf(p, "\tplane_mask=%x\n", state->plane_mask); 671 drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask); 672 drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask); 673 drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode)); 674 675 if (crtc->funcs->atomic_print_state) 676 crtc->funcs->atomic_print_state(p, state); 677 } 678 679 /** 680 * drm_atomic_get_plane_state - get plane state 681 * @state: global atomic state object 682 * @plane: plane to get state object for 683 * 684 * This function returns the plane state for the given plane, allocating it if 685 * needed. It will also grab the relevant plane lock to make sure that the state 686 * is consistent. 687 * 688 * Returns: 689 * 690 * Either the allocated state or the error code encoded into the pointer. When 691 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 692 * entire atomic sequence must be restarted. All other errors are fatal. 693 */ 694 struct drm_plane_state * 695 drm_atomic_get_plane_state(struct drm_atomic_state *state, 696 struct drm_plane *plane) 697 { 698 int ret, index = drm_plane_index(plane); 699 struct drm_plane_state *plane_state; 700 701 WARN_ON(!state->acquire_ctx); 702 703 plane_state = drm_atomic_get_existing_plane_state(state, plane); 704 if (plane_state) 705 return plane_state; 706 707 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); 708 if (ret) 709 return ERR_PTR(ret); 710 711 plane_state = plane->funcs->atomic_duplicate_state(plane); 712 if (!plane_state) 713 return ERR_PTR(-ENOMEM); 714 715 state->planes[index].state = plane_state; 716 state->planes[index].ptr = plane; 717 state->planes[index].old_state = plane->state; 718 state->planes[index].new_state = plane_state; 719 plane_state->state = state; 720 721 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", 722 plane->base.id, plane->name, plane_state, state); 723 724 if (plane_state->crtc) { 725 struct drm_crtc_state *crtc_state; 726 727 crtc_state = drm_atomic_get_crtc_state(state, 728 plane_state->crtc); 729 if (IS_ERR(crtc_state)) 730 return ERR_CAST(crtc_state); 731 } 732 733 return plane_state; 734 } 735 EXPORT_SYMBOL(drm_atomic_get_plane_state); 736 737 /** 738 * drm_atomic_plane_set_property - set property on plane 739 * @plane: the drm plane to set a property on 740 * @state: the state object to update with the new property value 741 * @property: the property to set 742 * @val: the new property value 743 * 744 * This function handles generic/core properties and calls out to driver's 745 * &drm_plane_funcs.atomic_set_property for driver properties. To ensure 746 * consistent behavior you must call this function rather than the driver hook 747 * directly. 748 * 749 * RETURNS: 750 * Zero on success, error code on failure 751 */ 752 static int drm_atomic_plane_set_property(struct drm_plane *plane, 753 struct drm_plane_state *state, struct drm_property *property, 754 uint64_t val) 755 { 756 struct drm_device *dev = plane->dev; 757 struct drm_mode_config *config = &dev->mode_config; 758 759 if (property == config->prop_fb_id) { 760 struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); 761 drm_atomic_set_fb_for_plane(state, fb); 762 if (fb) 763 drm_framebuffer_put(fb); 764 } else if (property == config->prop_in_fence_fd) { 765 if (state->fence) 766 return -EINVAL; 767 768 if (U642I64(val) == -1) 769 return 0; 770 771 state->fence = sync_file_get_fence(val); 772 if (!state->fence) 773 return -EINVAL; 774 775 } else if (property == config->prop_crtc_id) { 776 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); 777 return drm_atomic_set_crtc_for_plane(state, crtc); 778 } else if (property == config->prop_crtc_x) { 779 state->crtc_x = U642I64(val); 780 } else if (property == config->prop_crtc_y) { 781 state->crtc_y = U642I64(val); 782 } else if (property == config->prop_crtc_w) { 783 state->crtc_w = val; 784 } else if (property == config->prop_crtc_h) { 785 state->crtc_h = val; 786 } else if (property == config->prop_src_x) { 787 state->src_x = val; 788 } else if (property == config->prop_src_y) { 789 state->src_y = val; 790 } else if (property == config->prop_src_w) { 791 state->src_w = val; 792 } else if (property == config->prop_src_h) { 793 state->src_h = val; 794 } else if (property == plane->rotation_property) { 795 if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) 796 return -EINVAL; 797 state->rotation = val; 798 } else if (property == plane->zpos_property) { 799 state->zpos = val; 800 } else if (property == plane->color_encoding_property) { 801 state->color_encoding = val; 802 } else if (property == plane->color_range_property) { 803 state->color_range = val; 804 } else if (plane->funcs->atomic_set_property) { 805 return plane->funcs->atomic_set_property(plane, state, 806 property, val); 807 } else { 808 return -EINVAL; 809 } 810 811 return 0; 812 } 813 814 /** 815 * drm_atomic_plane_get_property - get property value from plane state 816 * @plane: the drm plane to set a property on 817 * @state: the state object to get the property value from 818 * @property: the property to set 819 * @val: return location for the property value 820 * 821 * This function handles generic/core properties and calls out to driver's 822 * &drm_plane_funcs.atomic_get_property for driver properties. To ensure 823 * consistent behavior you must call this function rather than the driver hook 824 * directly. 825 * 826 * RETURNS: 827 * Zero on success, error code on failure 828 */ 829 static int 830 drm_atomic_plane_get_property(struct drm_plane *plane, 831 const struct drm_plane_state *state, 832 struct drm_property *property, uint64_t *val) 833 { 834 struct drm_device *dev = plane->dev; 835 struct drm_mode_config *config = &dev->mode_config; 836 837 if (property == config->prop_fb_id) { 838 *val = (state->fb) ? state->fb->base.id : 0; 839 } else if (property == config->prop_in_fence_fd) { 840 *val = -1; 841 } else if (property == config->prop_crtc_id) { 842 *val = (state->crtc) ? state->crtc->base.id : 0; 843 } else if (property == config->prop_crtc_x) { 844 *val = I642U64(state->crtc_x); 845 } else if (property == config->prop_crtc_y) { 846 *val = I642U64(state->crtc_y); 847 } else if (property == config->prop_crtc_w) { 848 *val = state->crtc_w; 849 } else if (property == config->prop_crtc_h) { 850 *val = state->crtc_h; 851 } else if (property == config->prop_src_x) { 852 *val = state->src_x; 853 } else if (property == config->prop_src_y) { 854 *val = state->src_y; 855 } else if (property == config->prop_src_w) { 856 *val = state->src_w; 857 } else if (property == config->prop_src_h) { 858 *val = state->src_h; 859 } else if (property == plane->rotation_property) { 860 *val = state->rotation; 861 } else if (property == plane->zpos_property) { 862 *val = state->zpos; 863 } else if (property == plane->color_encoding_property) { 864 *val = state->color_encoding; 865 } else if (property == plane->color_range_property) { 866 *val = state->color_range; 867 } else if (plane->funcs->atomic_get_property) { 868 return plane->funcs->atomic_get_property(plane, state, property, val); 869 } else { 870 return -EINVAL; 871 } 872 873 return 0; 874 } 875 876 static bool 877 plane_switching_crtc(struct drm_atomic_state *state, 878 struct drm_plane *plane, 879 struct drm_plane_state *plane_state) 880 { 881 if (!plane->state->crtc || !plane_state->crtc) 882 return false; 883 884 if (plane->state->crtc == plane_state->crtc) 885 return false; 886 887 /* This could be refined, but currently there's no helper or driver code 888 * to implement direct switching of active planes nor userspace to take 889 * advantage of more direct plane switching without the intermediate 890 * full OFF state. 891 */ 892 return true; 893 } 894 895 /** 896 * drm_atomic_plane_check - check plane state 897 * @plane: plane to check 898 * @state: plane state to check 899 * 900 * Provides core sanity checks for plane state. 901 * 902 * RETURNS: 903 * Zero on success, error code on failure 904 */ 905 static int drm_atomic_plane_check(struct drm_plane *plane, 906 struct drm_plane_state *state) 907 { 908 unsigned int fb_width, fb_height; 909 int ret; 910 911 /* either *both* CRTC and FB must be set, or neither */ 912 if (state->crtc && !state->fb) { 913 DRM_DEBUG_ATOMIC("CRTC set but no FB\n"); 914 return -EINVAL; 915 } else if (state->fb && !state->crtc) { 916 DRM_DEBUG_ATOMIC("FB set but no CRTC\n"); 917 return -EINVAL; 918 } 919 920 /* if disabled, we don't care about the rest of the state: */ 921 if (!state->crtc) 922 return 0; 923 924 /* Check whether this plane is usable on this CRTC */ 925 if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) { 926 DRM_DEBUG_ATOMIC("Invalid crtc for plane\n"); 927 return -EINVAL; 928 } 929 930 /* Check whether this plane supports the fb pixel format. */ 931 ret = drm_plane_check_pixel_format(plane, state->fb->format->format, 932 state->fb->modifier); 933 if (ret) { 934 struct drm_format_name_buf format_name; 935 DRM_DEBUG_ATOMIC("Invalid pixel format %s, modifier 0x%llx\n", 936 drm_get_format_name(state->fb->format->format, 937 &format_name), 938 state->fb->modifier); 939 return ret; 940 } 941 942 /* Give drivers some help against integer overflows */ 943 if (state->crtc_w > INT_MAX || 944 state->crtc_x > INT_MAX - (int32_t) state->crtc_w || 945 state->crtc_h > INT_MAX || 946 state->crtc_y > INT_MAX - (int32_t) state->crtc_h) { 947 DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n", 948 state->crtc_w, state->crtc_h, 949 state->crtc_x, state->crtc_y); 950 return -ERANGE; 951 } 952 953 fb_width = state->fb->width << 16; 954 fb_height = state->fb->height << 16; 955 956 /* Make sure source coordinates are inside the fb. */ 957 if (state->src_w > fb_width || 958 state->src_x > fb_width - state->src_w || 959 state->src_h > fb_height || 960 state->src_y > fb_height - state->src_h) { 961 DRM_DEBUG_ATOMIC("Invalid source coordinates " 962 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", 963 state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10, 964 state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10, 965 state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10, 966 state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10, 967 state->fb->width, state->fb->height); 968 return -ENOSPC; 969 } 970 971 if (plane_switching_crtc(state->state, plane, state)) { 972 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", 973 plane->base.id, plane->name); 974 return -EINVAL; 975 } 976 977 return 0; 978 } 979 980 static void drm_atomic_plane_print_state(struct drm_printer *p, 981 const struct drm_plane_state *state) 982 { 983 struct drm_plane *plane = state->plane; 984 struct drm_rect src = drm_plane_state_src(state); 985 struct drm_rect dest = drm_plane_state_dest(state); 986 987 drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); 988 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 989 drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); 990 if (state->fb) 991 drm_framebuffer_print_info(p, 2, state->fb); 992 drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); 993 drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); 994 drm_printf(p, "\trotation=%x\n", state->rotation); 995 drm_printf(p, "\tcolor-encoding=%s\n", 996 drm_get_color_encoding_name(state->color_encoding)); 997 drm_printf(p, "\tcolor-range=%s\n", 998 drm_get_color_range_name(state->color_range)); 999 1000 if (plane->funcs->atomic_print_state) 1001 plane->funcs->atomic_print_state(p, state); 1002 } 1003 1004 /** 1005 * DOC: handling driver private state 1006 * 1007 * Very often the DRM objects exposed to userspace in the atomic modeset api 1008 * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the 1009 * underlying hardware. Especially for any kind of shared resources (e.g. shared 1010 * clocks, scaler units, bandwidth and fifo limits shared among a group of 1011 * planes or CRTCs, and so on) it makes sense to model these as independent 1012 * objects. Drivers then need to do similar state tracking and commit ordering for 1013 * such private (since not exposed to userpace) objects as the atomic core and 1014 * helpers already provide for connectors, planes and CRTCs. 1015 * 1016 * To make this easier on drivers the atomic core provides some support to track 1017 * driver private state objects using struct &drm_private_obj, with the 1018 * associated state struct &drm_private_state. 1019 * 1020 * Similar to userspace-exposed objects, private state structures can be 1021 * acquired by calling drm_atomic_get_private_obj_state(). Since this function 1022 * does not take care of locking, drivers should wrap it for each type of 1023 * private state object they have with the required call to drm_modeset_lock() 1024 * for the corresponding &drm_modeset_lock. 1025 * 1026 * All private state structures contained in a &drm_atomic_state update can be 1027 * iterated using for_each_oldnew_private_obj_in_state(), 1028 * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state(). 1029 * Drivers are recommended to wrap these for each type of driver private state 1030 * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at 1031 * least if they want to iterate over all objects of a given type. 1032 * 1033 * An earlier way to handle driver private state was by subclassing struct 1034 * &drm_atomic_state. But since that encourages non-standard ways to implement 1035 * the check/commit split atomic requires (by using e.g. "check and rollback or 1036 * commit instead" of "duplicate state, check, then either commit or release 1037 * duplicated state) it is deprecated in favour of using &drm_private_state. 1038 */ 1039 1040 /** 1041 * drm_atomic_private_obj_init - initialize private object 1042 * @obj: private object 1043 * @state: initial private object state 1044 * @funcs: pointer to the struct of function pointers that identify the object 1045 * type 1046 * 1047 * Initialize the private object, which can be embedded into any 1048 * driver private object that needs its own atomic state. 1049 */ 1050 void 1051 drm_atomic_private_obj_init(struct drm_private_obj *obj, 1052 struct drm_private_state *state, 1053 const struct drm_private_state_funcs *funcs) 1054 { 1055 memset(obj, 0, sizeof(*obj)); 1056 1057 obj->state = state; 1058 obj->funcs = funcs; 1059 } 1060 EXPORT_SYMBOL(drm_atomic_private_obj_init); 1061 1062 /** 1063 * drm_atomic_private_obj_fini - finalize private object 1064 * @obj: private object 1065 * 1066 * Finalize the private object. 1067 */ 1068 void 1069 drm_atomic_private_obj_fini(struct drm_private_obj *obj) 1070 { 1071 obj->funcs->atomic_destroy_state(obj, obj->state); 1072 } 1073 EXPORT_SYMBOL(drm_atomic_private_obj_fini); 1074 1075 /** 1076 * drm_atomic_get_private_obj_state - get private object state 1077 * @state: global atomic state 1078 * @obj: private object to get the state for 1079 * 1080 * This function returns the private object state for the given private object, 1081 * allocating the state if needed. It does not grab any locks as the caller is 1082 * expected to care of any required locking. 1083 * 1084 * RETURNS: 1085 * 1086 * Either the allocated state or the error code encoded into a pointer. 1087 */ 1088 struct drm_private_state * 1089 drm_atomic_get_private_obj_state(struct drm_atomic_state *state, 1090 struct drm_private_obj *obj) 1091 { 1092 int index, num_objs, i; 1093 size_t size; 1094 struct __drm_private_objs_state *arr; 1095 struct drm_private_state *obj_state; 1096 1097 for (i = 0; i < state->num_private_objs; i++) 1098 if (obj == state->private_objs[i].ptr) 1099 return state->private_objs[i].state; 1100 1101 num_objs = state->num_private_objs + 1; 1102 size = sizeof(*state->private_objs) * num_objs; 1103 arr = krealloc(state->private_objs, size, GFP_KERNEL); 1104 if (!arr) 1105 return ERR_PTR(-ENOMEM); 1106 1107 state->private_objs = arr; 1108 index = state->num_private_objs; 1109 memset(&state->private_objs[index], 0, sizeof(*state->private_objs)); 1110 1111 obj_state = obj->funcs->atomic_duplicate_state(obj); 1112 if (!obj_state) 1113 return ERR_PTR(-ENOMEM); 1114 1115 state->private_objs[index].state = obj_state; 1116 state->private_objs[index].old_state = obj->state; 1117 state->private_objs[index].new_state = obj_state; 1118 state->private_objs[index].ptr = obj; 1119 1120 state->num_private_objs = num_objs; 1121 1122 DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n", 1123 obj, obj_state, state); 1124 1125 return obj_state; 1126 } 1127 EXPORT_SYMBOL(drm_atomic_get_private_obj_state); 1128 1129 /** 1130 * drm_atomic_get_connector_state - get connector state 1131 * @state: global atomic state object 1132 * @connector: connector to get state object for 1133 * 1134 * This function returns the connector state for the given connector, 1135 * allocating it if needed. It will also grab the relevant connector lock to 1136 * make sure that the state is consistent. 1137 * 1138 * Returns: 1139 * 1140 * Either the allocated state or the error code encoded into the pointer. When 1141 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 1142 * entire atomic sequence must be restarted. All other errors are fatal. 1143 */ 1144 struct drm_connector_state * 1145 drm_atomic_get_connector_state(struct drm_atomic_state *state, 1146 struct drm_connector *connector) 1147 { 1148 int ret, index; 1149 struct drm_mode_config *config = &connector->dev->mode_config; 1150 struct drm_connector_state *connector_state; 1151 1152 WARN_ON(!state->acquire_ctx); 1153 1154 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1155 if (ret) 1156 return ERR_PTR(ret); 1157 1158 index = drm_connector_index(connector); 1159 1160 if (index >= state->num_connector) { 1161 struct __drm_connnectors_state *c; 1162 int alloc = max(index + 1, config->num_connector); 1163 1164 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); 1165 if (!c) 1166 return ERR_PTR(-ENOMEM); 1167 1168 state->connectors = c; 1169 memset(&state->connectors[state->num_connector], 0, 1170 sizeof(*state->connectors) * (alloc - state->num_connector)); 1171 1172 state->num_connector = alloc; 1173 } 1174 1175 if (state->connectors[index].state) 1176 return state->connectors[index].state; 1177 1178 connector_state = connector->funcs->atomic_duplicate_state(connector); 1179 if (!connector_state) 1180 return ERR_PTR(-ENOMEM); 1181 1182 drm_connector_get(connector); 1183 state->connectors[index].state = connector_state; 1184 state->connectors[index].old_state = connector->state; 1185 state->connectors[index].new_state = connector_state; 1186 state->connectors[index].ptr = connector; 1187 connector_state->state = state; 1188 1189 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n", 1190 connector->base.id, connector->name, 1191 connector_state, state); 1192 1193 if (connector_state->crtc) { 1194 struct drm_crtc_state *crtc_state; 1195 1196 crtc_state = drm_atomic_get_crtc_state(state, 1197 connector_state->crtc); 1198 if (IS_ERR(crtc_state)) 1199 return ERR_CAST(crtc_state); 1200 } 1201 1202 return connector_state; 1203 } 1204 EXPORT_SYMBOL(drm_atomic_get_connector_state); 1205 1206 /** 1207 * drm_atomic_connector_set_property - set property on connector. 1208 * @connector: the drm connector to set a property on 1209 * @state: the state object to update with the new property value 1210 * @property: the property to set 1211 * @val: the new property value 1212 * 1213 * This function handles generic/core properties and calls out to driver's 1214 * &drm_connector_funcs.atomic_set_property for driver properties. To ensure 1215 * consistent behavior you must call this function rather than the driver hook 1216 * directly. 1217 * 1218 * RETURNS: 1219 * Zero on success, error code on failure 1220 */ 1221 static int drm_atomic_connector_set_property(struct drm_connector *connector, 1222 struct drm_connector_state *state, struct drm_property *property, 1223 uint64_t val) 1224 { 1225 struct drm_device *dev = connector->dev; 1226 struct drm_mode_config *config = &dev->mode_config; 1227 1228 if (property == config->prop_crtc_id) { 1229 struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val); 1230 return drm_atomic_set_crtc_for_connector(state, crtc); 1231 } else if (property == config->dpms_property) { 1232 /* setting DPMS property requires special handling, which 1233 * is done in legacy setprop path for us. Disallow (for 1234 * now?) atomic writes to DPMS property: 1235 */ 1236 return -EINVAL; 1237 } else if (property == config->tv_select_subconnector_property) { 1238 state->tv.subconnector = val; 1239 } else if (property == config->tv_left_margin_property) { 1240 state->tv.margins.left = val; 1241 } else if (property == config->tv_right_margin_property) { 1242 state->tv.margins.right = val; 1243 } else if (property == config->tv_top_margin_property) { 1244 state->tv.margins.top = val; 1245 } else if (property == config->tv_bottom_margin_property) { 1246 state->tv.margins.bottom = val; 1247 } else if (property == config->tv_mode_property) { 1248 state->tv.mode = val; 1249 } else if (property == config->tv_brightness_property) { 1250 state->tv.brightness = val; 1251 } else if (property == config->tv_contrast_property) { 1252 state->tv.contrast = val; 1253 } else if (property == config->tv_flicker_reduction_property) { 1254 state->tv.flicker_reduction = val; 1255 } else if (property == config->tv_overscan_property) { 1256 state->tv.overscan = val; 1257 } else if (property == config->tv_saturation_property) { 1258 state->tv.saturation = val; 1259 } else if (property == config->tv_hue_property) { 1260 state->tv.hue = val; 1261 } else if (property == config->link_status_property) { 1262 /* Never downgrade from GOOD to BAD on userspace's request here, 1263 * only hw issues can do that. 1264 * 1265 * For an atomic property the userspace doesn't need to be able 1266 * to understand all the properties, but needs to be able to 1267 * restore the state it wants on VT switch. So if the userspace 1268 * tries to change the link_status from GOOD to BAD, driver 1269 * silently rejects it and returns a 0. This prevents userspace 1270 * from accidently breaking the display when it restores the 1271 * state. 1272 */ 1273 if (state->link_status != DRM_LINK_STATUS_GOOD) 1274 state->link_status = val; 1275 } else if (property == config->aspect_ratio_property) { 1276 state->picture_aspect_ratio = val; 1277 } else if (property == connector->scaling_mode_property) { 1278 state->scaling_mode = val; 1279 } else if (property == connector->content_protection_property) { 1280 if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) { 1281 DRM_DEBUG_KMS("only drivers can set CP Enabled\n"); 1282 return -EINVAL; 1283 } 1284 state->content_protection = val; 1285 } else if (connector->funcs->atomic_set_property) { 1286 return connector->funcs->atomic_set_property(connector, 1287 state, property, val); 1288 } else { 1289 return -EINVAL; 1290 } 1291 1292 return 0; 1293 } 1294 1295 static void drm_atomic_connector_print_state(struct drm_printer *p, 1296 const struct drm_connector_state *state) 1297 { 1298 struct drm_connector *connector = state->connector; 1299 1300 drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); 1301 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 1302 1303 if (connector->funcs->atomic_print_state) 1304 connector->funcs->atomic_print_state(p, state); 1305 } 1306 1307 /** 1308 * drm_atomic_connector_get_property - get property value from connector state 1309 * @connector: the drm connector to set a property on 1310 * @state: the state object to get the property value from 1311 * @property: the property to set 1312 * @val: return location for the property value 1313 * 1314 * This function handles generic/core properties and calls out to driver's 1315 * &drm_connector_funcs.atomic_get_property for driver properties. To ensure 1316 * consistent behavior you must call this function rather than the driver hook 1317 * directly. 1318 * 1319 * RETURNS: 1320 * Zero on success, error code on failure 1321 */ 1322 static int 1323 drm_atomic_connector_get_property(struct drm_connector *connector, 1324 const struct drm_connector_state *state, 1325 struct drm_property *property, uint64_t *val) 1326 { 1327 struct drm_device *dev = connector->dev; 1328 struct drm_mode_config *config = &dev->mode_config; 1329 1330 if (property == config->prop_crtc_id) { 1331 *val = (state->crtc) ? state->crtc->base.id : 0; 1332 } else if (property == config->dpms_property) { 1333 *val = connector->dpms; 1334 } else if (property == config->tv_select_subconnector_property) { 1335 *val = state->tv.subconnector; 1336 } else if (property == config->tv_left_margin_property) { 1337 *val = state->tv.margins.left; 1338 } else if (property == config->tv_right_margin_property) { 1339 *val = state->tv.margins.right; 1340 } else if (property == config->tv_top_margin_property) { 1341 *val = state->tv.margins.top; 1342 } else if (property == config->tv_bottom_margin_property) { 1343 *val = state->tv.margins.bottom; 1344 } else if (property == config->tv_mode_property) { 1345 *val = state->tv.mode; 1346 } else if (property == config->tv_brightness_property) { 1347 *val = state->tv.brightness; 1348 } else if (property == config->tv_contrast_property) { 1349 *val = state->tv.contrast; 1350 } else if (property == config->tv_flicker_reduction_property) { 1351 *val = state->tv.flicker_reduction; 1352 } else if (property == config->tv_overscan_property) { 1353 *val = state->tv.overscan; 1354 } else if (property == config->tv_saturation_property) { 1355 *val = state->tv.saturation; 1356 } else if (property == config->tv_hue_property) { 1357 *val = state->tv.hue; 1358 } else if (property == config->link_status_property) { 1359 *val = state->link_status; 1360 } else if (property == config->aspect_ratio_property) { 1361 *val = state->picture_aspect_ratio; 1362 } else if (property == connector->scaling_mode_property) { 1363 *val = state->scaling_mode; 1364 } else if (property == connector->content_protection_property) { 1365 *val = state->content_protection; 1366 } else if (connector->funcs->atomic_get_property) { 1367 return connector->funcs->atomic_get_property(connector, 1368 state, property, val); 1369 } else { 1370 return -EINVAL; 1371 } 1372 1373 return 0; 1374 } 1375 1376 int drm_atomic_get_property(struct drm_mode_object *obj, 1377 struct drm_property *property, uint64_t *val) 1378 { 1379 struct drm_device *dev = property->dev; 1380 int ret; 1381 1382 switch (obj->type) { 1383 case DRM_MODE_OBJECT_CONNECTOR: { 1384 struct drm_connector *connector = obj_to_connector(obj); 1385 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 1386 ret = drm_atomic_connector_get_property(connector, 1387 connector->state, property, val); 1388 break; 1389 } 1390 case DRM_MODE_OBJECT_CRTC: { 1391 struct drm_crtc *crtc = obj_to_crtc(obj); 1392 WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 1393 ret = drm_atomic_crtc_get_property(crtc, 1394 crtc->state, property, val); 1395 break; 1396 } 1397 case DRM_MODE_OBJECT_PLANE: { 1398 struct drm_plane *plane = obj_to_plane(obj); 1399 WARN_ON(!drm_modeset_is_locked(&plane->mutex)); 1400 ret = drm_atomic_plane_get_property(plane, 1401 plane->state, property, val); 1402 break; 1403 } 1404 default: 1405 ret = -EINVAL; 1406 break; 1407 } 1408 1409 return ret; 1410 } 1411 1412 /** 1413 * drm_atomic_set_crtc_for_plane - set crtc for plane 1414 * @plane_state: the plane whose incoming state to update 1415 * @crtc: crtc to use for the plane 1416 * 1417 * Changing the assigned crtc for a plane requires us to grab the lock and state 1418 * for the new crtc, as needed. This function takes care of all these details 1419 * besides updating the pointer in the state object itself. 1420 * 1421 * Returns: 1422 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1423 * then the w/w mutex code has detected a deadlock and the entire atomic 1424 * sequence must be restarted. All other errors are fatal. 1425 */ 1426 int 1427 drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, 1428 struct drm_crtc *crtc) 1429 { 1430 struct drm_plane *plane = plane_state->plane; 1431 struct drm_crtc_state *crtc_state; 1432 1433 if (plane_state->crtc) { 1434 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1435 plane_state->crtc); 1436 if (WARN_ON(IS_ERR(crtc_state))) 1437 return PTR_ERR(crtc_state); 1438 1439 crtc_state->plane_mask &= ~(1 << drm_plane_index(plane)); 1440 } 1441 1442 plane_state->crtc = crtc; 1443 1444 if (crtc) { 1445 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1446 crtc); 1447 if (IS_ERR(crtc_state)) 1448 return PTR_ERR(crtc_state); 1449 crtc_state->plane_mask |= (1 << drm_plane_index(plane)); 1450 } 1451 1452 if (crtc) 1453 DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n", 1454 plane_state, crtc->base.id, crtc->name); 1455 else 1456 DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n", 1457 plane_state); 1458 1459 return 0; 1460 } 1461 EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane); 1462 1463 /** 1464 * drm_atomic_set_fb_for_plane - set framebuffer for plane 1465 * @plane_state: atomic state object for the plane 1466 * @fb: fb to use for the plane 1467 * 1468 * Changing the assigned framebuffer for a plane requires us to grab a reference 1469 * to the new fb and drop the reference to the old fb, if there is one. This 1470 * function takes care of all these details besides updating the pointer in the 1471 * state object itself. 1472 */ 1473 void 1474 drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, 1475 struct drm_framebuffer *fb) 1476 { 1477 if (fb) 1478 DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n", 1479 fb->base.id, plane_state); 1480 else 1481 DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n", 1482 plane_state); 1483 1484 drm_framebuffer_assign(&plane_state->fb, fb); 1485 } 1486 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); 1487 1488 /** 1489 * drm_atomic_set_fence_for_plane - set fence for plane 1490 * @plane_state: atomic state object for the plane 1491 * @fence: dma_fence to use for the plane 1492 * 1493 * Helper to setup the plane_state fence in case it is not set yet. 1494 * By using this drivers doesn't need to worry if the user choose 1495 * implicit or explicit fencing. 1496 * 1497 * This function will not set the fence to the state if it was set 1498 * via explicit fencing interfaces on the atomic ioctl. In that case it will 1499 * drop the reference to the fence as we are not storing it anywhere. 1500 * Otherwise, if &drm_plane_state.fence is not set this function we just set it 1501 * with the received implicit fence. In both cases this function consumes a 1502 * reference for @fence. 1503 */ 1504 void 1505 drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state, 1506 struct dma_fence *fence) 1507 { 1508 if (plane_state->fence) { 1509 dma_fence_put(fence); 1510 return; 1511 } 1512 1513 plane_state->fence = fence; 1514 } 1515 EXPORT_SYMBOL(drm_atomic_set_fence_for_plane); 1516 1517 /** 1518 * drm_atomic_set_crtc_for_connector - set crtc for connector 1519 * @conn_state: atomic state object for the connector 1520 * @crtc: crtc to use for the connector 1521 * 1522 * Changing the assigned crtc for a connector requires us to grab the lock and 1523 * state for the new crtc, as needed. This function takes care of all these 1524 * details besides updating the pointer in the state object itself. 1525 * 1526 * Returns: 1527 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1528 * then the w/w mutex code has detected a deadlock and the entire atomic 1529 * sequence must be restarted. All other errors are fatal. 1530 */ 1531 int 1532 drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, 1533 struct drm_crtc *crtc) 1534 { 1535 struct drm_crtc_state *crtc_state; 1536 1537 if (conn_state->crtc == crtc) 1538 return 0; 1539 1540 if (conn_state->crtc) { 1541 crtc_state = drm_atomic_get_new_crtc_state(conn_state->state, 1542 conn_state->crtc); 1543 1544 crtc_state->connector_mask &= 1545 ~(1 << drm_connector_index(conn_state->connector)); 1546 1547 drm_connector_put(conn_state->connector); 1548 conn_state->crtc = NULL; 1549 } 1550 1551 if (crtc) { 1552 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); 1553 if (IS_ERR(crtc_state)) 1554 return PTR_ERR(crtc_state); 1555 1556 crtc_state->connector_mask |= 1557 1 << drm_connector_index(conn_state->connector); 1558 1559 drm_connector_get(conn_state->connector); 1560 conn_state->crtc = crtc; 1561 1562 DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n", 1563 conn_state, crtc->base.id, crtc->name); 1564 } else { 1565 DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n", 1566 conn_state); 1567 } 1568 1569 return 0; 1570 } 1571 EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector); 1572 1573 /** 1574 * drm_atomic_add_affected_connectors - add connectors for crtc 1575 * @state: atomic state 1576 * @crtc: DRM crtc 1577 * 1578 * This function walks the current configuration and adds all connectors 1579 * currently using @crtc to the atomic configuration @state. Note that this 1580 * function must acquire the connection mutex. This can potentially cause 1581 * unneeded seralization if the update is just for the planes on one crtc. Hence 1582 * drivers and helpers should only call this when really needed (e.g. when a 1583 * full modeset needs to happen due to some change). 1584 * 1585 * Returns: 1586 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1587 * then the w/w mutex code has detected a deadlock and the entire atomic 1588 * sequence must be restarted. All other errors are fatal. 1589 */ 1590 int 1591 drm_atomic_add_affected_connectors(struct drm_atomic_state *state, 1592 struct drm_crtc *crtc) 1593 { 1594 struct drm_mode_config *config = &state->dev->mode_config; 1595 struct drm_connector *connector; 1596 struct drm_connector_state *conn_state; 1597 struct drm_connector_list_iter conn_iter; 1598 struct drm_crtc_state *crtc_state; 1599 int ret; 1600 1601 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1602 if (IS_ERR(crtc_state)) 1603 return PTR_ERR(crtc_state); 1604 1605 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1606 if (ret) 1607 return ret; 1608 1609 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n", 1610 crtc->base.id, crtc->name, state); 1611 1612 /* 1613 * Changed connectors are already in @state, so only need to look 1614 * at the connector_mask in crtc_state. 1615 */ 1616 drm_connector_list_iter_begin(state->dev, &conn_iter); 1617 drm_for_each_connector_iter(connector, &conn_iter) { 1618 if (!(crtc_state->connector_mask & (1 << drm_connector_index(connector)))) 1619 continue; 1620 1621 conn_state = drm_atomic_get_connector_state(state, connector); 1622 if (IS_ERR(conn_state)) { 1623 drm_connector_list_iter_end(&conn_iter); 1624 return PTR_ERR(conn_state); 1625 } 1626 } 1627 drm_connector_list_iter_end(&conn_iter); 1628 1629 return 0; 1630 } 1631 EXPORT_SYMBOL(drm_atomic_add_affected_connectors); 1632 1633 /** 1634 * drm_atomic_add_affected_planes - add planes for crtc 1635 * @state: atomic state 1636 * @crtc: DRM crtc 1637 * 1638 * This function walks the current configuration and adds all planes 1639 * currently used by @crtc to the atomic configuration @state. This is useful 1640 * when an atomic commit also needs to check all currently enabled plane on 1641 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC 1642 * to avoid special code to force-enable all planes. 1643 * 1644 * Since acquiring a plane state will always also acquire the w/w mutex of the 1645 * current CRTC for that plane (if there is any) adding all the plane states for 1646 * a CRTC will not reduce parallism of atomic updates. 1647 * 1648 * Returns: 1649 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1650 * then the w/w mutex code has detected a deadlock and the entire atomic 1651 * sequence must be restarted. All other errors are fatal. 1652 */ 1653 int 1654 drm_atomic_add_affected_planes(struct drm_atomic_state *state, 1655 struct drm_crtc *crtc) 1656 { 1657 struct drm_plane *plane; 1658 1659 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); 1660 1661 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { 1662 struct drm_plane_state *plane_state = 1663 drm_atomic_get_plane_state(state, plane); 1664 1665 if (IS_ERR(plane_state)) 1666 return PTR_ERR(plane_state); 1667 } 1668 return 0; 1669 } 1670 EXPORT_SYMBOL(drm_atomic_add_affected_planes); 1671 1672 /** 1673 * drm_atomic_check_only - check whether a given config would work 1674 * @state: atomic configuration to check 1675 * 1676 * Note that this function can return -EDEADLK if the driver needed to acquire 1677 * more locks but encountered a deadlock. The caller must then do the usual w/w 1678 * backoff dance and restart. All other errors are fatal. 1679 * 1680 * Returns: 1681 * 0 on success, negative error code on failure. 1682 */ 1683 int drm_atomic_check_only(struct drm_atomic_state *state) 1684 { 1685 struct drm_device *dev = state->dev; 1686 struct drm_mode_config *config = &dev->mode_config; 1687 struct drm_plane *plane; 1688 struct drm_plane_state *plane_state; 1689 struct drm_crtc *crtc; 1690 struct drm_crtc_state *crtc_state; 1691 int i, ret = 0; 1692 1693 DRM_DEBUG_ATOMIC("checking %p\n", state); 1694 1695 for_each_new_plane_in_state(state, plane, plane_state, i) { 1696 ret = drm_atomic_plane_check(plane, plane_state); 1697 if (ret) { 1698 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", 1699 plane->base.id, plane->name); 1700 return ret; 1701 } 1702 } 1703 1704 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1705 ret = drm_atomic_crtc_check(crtc, crtc_state); 1706 if (ret) { 1707 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", 1708 crtc->base.id, crtc->name); 1709 return ret; 1710 } 1711 } 1712 1713 if (config->funcs->atomic_check) 1714 ret = config->funcs->atomic_check(state->dev, state); 1715 1716 if (ret) 1717 return ret; 1718 1719 if (!state->allow_modeset) { 1720 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1721 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 1722 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", 1723 crtc->base.id, crtc->name); 1724 return -EINVAL; 1725 } 1726 } 1727 } 1728 1729 return 0; 1730 } 1731 EXPORT_SYMBOL(drm_atomic_check_only); 1732 1733 /** 1734 * drm_atomic_commit - commit configuration atomically 1735 * @state: atomic configuration to check 1736 * 1737 * Note that this function can return -EDEADLK if the driver needed to acquire 1738 * more locks but encountered a deadlock. The caller must then do the usual w/w 1739 * backoff dance and restart. All other errors are fatal. 1740 * 1741 * This function will take its own reference on @state. 1742 * Callers should always release their reference with drm_atomic_state_put(). 1743 * 1744 * Returns: 1745 * 0 on success, negative error code on failure. 1746 */ 1747 int drm_atomic_commit(struct drm_atomic_state *state) 1748 { 1749 struct drm_mode_config *config = &state->dev->mode_config; 1750 int ret; 1751 1752 ret = drm_atomic_check_only(state); 1753 if (ret) 1754 return ret; 1755 1756 DRM_DEBUG_ATOMIC("committing %p\n", state); 1757 1758 return config->funcs->atomic_commit(state->dev, state, false); 1759 } 1760 EXPORT_SYMBOL(drm_atomic_commit); 1761 1762 /** 1763 * drm_atomic_nonblocking_commit - atomic nonblocking commit 1764 * @state: atomic configuration to check 1765 * 1766 * Note that this function can return -EDEADLK if the driver needed to acquire 1767 * more locks but encountered a deadlock. The caller must then do the usual w/w 1768 * backoff dance and restart. All other errors are fatal. 1769 * 1770 * This function will take its own reference on @state. 1771 * Callers should always release their reference with drm_atomic_state_put(). 1772 * 1773 * Returns: 1774 * 0 on success, negative error code on failure. 1775 */ 1776 int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) 1777 { 1778 struct drm_mode_config *config = &state->dev->mode_config; 1779 int ret; 1780 1781 ret = drm_atomic_check_only(state); 1782 if (ret) 1783 return ret; 1784 1785 DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state); 1786 1787 return config->funcs->atomic_commit(state->dev, state, true); 1788 } 1789 EXPORT_SYMBOL(drm_atomic_nonblocking_commit); 1790 1791 static void drm_atomic_print_state(const struct drm_atomic_state *state) 1792 { 1793 struct drm_printer p = drm_info_printer(state->dev->dev); 1794 struct drm_plane *plane; 1795 struct drm_plane_state *plane_state; 1796 struct drm_crtc *crtc; 1797 struct drm_crtc_state *crtc_state; 1798 struct drm_connector *connector; 1799 struct drm_connector_state *connector_state; 1800 int i; 1801 1802 DRM_DEBUG_ATOMIC("checking %p\n", state); 1803 1804 for_each_new_plane_in_state(state, plane, plane_state, i) 1805 drm_atomic_plane_print_state(&p, plane_state); 1806 1807 for_each_new_crtc_in_state(state, crtc, crtc_state, i) 1808 drm_atomic_crtc_print_state(&p, crtc_state); 1809 1810 for_each_new_connector_in_state(state, connector, connector_state, i) 1811 drm_atomic_connector_print_state(&p, connector_state); 1812 } 1813 1814 static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, 1815 bool take_locks) 1816 { 1817 struct drm_mode_config *config = &dev->mode_config; 1818 struct drm_plane *plane; 1819 struct drm_crtc *crtc; 1820 struct drm_connector *connector; 1821 struct drm_connector_list_iter conn_iter; 1822 1823 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 1824 return; 1825 1826 list_for_each_entry(plane, &config->plane_list, head) { 1827 if (take_locks) 1828 drm_modeset_lock(&plane->mutex, NULL); 1829 drm_atomic_plane_print_state(p, plane->state); 1830 if (take_locks) 1831 drm_modeset_unlock(&plane->mutex); 1832 } 1833 1834 list_for_each_entry(crtc, &config->crtc_list, head) { 1835 if (take_locks) 1836 drm_modeset_lock(&crtc->mutex, NULL); 1837 drm_atomic_crtc_print_state(p, crtc->state); 1838 if (take_locks) 1839 drm_modeset_unlock(&crtc->mutex); 1840 } 1841 1842 drm_connector_list_iter_begin(dev, &conn_iter); 1843 if (take_locks) 1844 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1845 drm_for_each_connector_iter(connector, &conn_iter) 1846 drm_atomic_connector_print_state(p, connector->state); 1847 if (take_locks) 1848 drm_modeset_unlock(&dev->mode_config.connection_mutex); 1849 drm_connector_list_iter_end(&conn_iter); 1850 } 1851 1852 /** 1853 * drm_state_dump - dump entire device atomic state 1854 * @dev: the drm device 1855 * @p: where to print the state to 1856 * 1857 * Just for debugging. Drivers might want an option to dump state 1858 * to dmesg in case of error irq's. (Hint, you probably want to 1859 * ratelimit this!) 1860 * 1861 * The caller must drm_modeset_lock_all(), or if this is called 1862 * from error irq handler, it should not be enabled by default. 1863 * (Ie. if you are debugging errors you might not care that this 1864 * is racey. But calling this without all modeset locks held is 1865 * not inherently safe.) 1866 */ 1867 void drm_state_dump(struct drm_device *dev, struct drm_printer *p) 1868 { 1869 __drm_state_dump(dev, p, false); 1870 } 1871 EXPORT_SYMBOL(drm_state_dump); 1872 1873 #ifdef CONFIG_DEBUG_FS 1874 static int drm_state_info(struct seq_file *m, void *data) 1875 { 1876 struct drm_info_node *node = (struct drm_info_node *) m->private; 1877 struct drm_device *dev = node->minor->dev; 1878 struct drm_printer p = drm_seq_file_printer(m); 1879 1880 __drm_state_dump(dev, &p, true); 1881 1882 return 0; 1883 } 1884 1885 /* any use in debugfs files to dump individual planes/crtc/etc? */ 1886 static const struct drm_info_list drm_atomic_debugfs_list[] = { 1887 {"state", drm_state_info, 0}, 1888 }; 1889 1890 int drm_atomic_debugfs_init(struct drm_minor *minor) 1891 { 1892 return drm_debugfs_create_files(drm_atomic_debugfs_list, 1893 ARRAY_SIZE(drm_atomic_debugfs_list), 1894 minor->debugfs_root, minor); 1895 } 1896 #endif 1897 1898 /* 1899 * The big monster ioctl 1900 */ 1901 1902 static struct drm_pending_vblank_event *create_vblank_event( 1903 struct drm_crtc *crtc, uint64_t user_data) 1904 { 1905 struct drm_pending_vblank_event *e = NULL; 1906 1907 e = kzalloc(sizeof *e, GFP_KERNEL); 1908 if (!e) 1909 return NULL; 1910 1911 e->event.base.type = DRM_EVENT_FLIP_COMPLETE; 1912 e->event.base.length = sizeof(e->event); 1913 e->event.vbl.crtc_id = crtc->base.id; 1914 e->event.vbl.user_data = user_data; 1915 1916 return e; 1917 } 1918 1919 int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, 1920 struct drm_connector *connector, 1921 int mode) 1922 { 1923 struct drm_connector *tmp_connector; 1924 struct drm_connector_state *new_conn_state; 1925 struct drm_crtc *crtc; 1926 struct drm_crtc_state *crtc_state; 1927 int i, ret, old_mode = connector->dpms; 1928 bool active = false; 1929 1930 ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, 1931 state->acquire_ctx); 1932 if (ret) 1933 return ret; 1934 1935 if (mode != DRM_MODE_DPMS_ON) 1936 mode = DRM_MODE_DPMS_OFF; 1937 connector->dpms = mode; 1938 1939 crtc = connector->state->crtc; 1940 if (!crtc) 1941 goto out; 1942 ret = drm_atomic_add_affected_connectors(state, crtc); 1943 if (ret) 1944 goto out; 1945 1946 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1947 if (IS_ERR(crtc_state)) { 1948 ret = PTR_ERR(crtc_state); 1949 goto out; 1950 } 1951 1952 for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) { 1953 if (new_conn_state->crtc != crtc) 1954 continue; 1955 if (tmp_connector->dpms == DRM_MODE_DPMS_ON) { 1956 active = true; 1957 break; 1958 } 1959 } 1960 1961 crtc_state->active = active; 1962 ret = drm_atomic_commit(state); 1963 out: 1964 if (ret != 0) 1965 connector->dpms = old_mode; 1966 return ret; 1967 } 1968 1969 int drm_atomic_set_property(struct drm_atomic_state *state, 1970 struct drm_mode_object *obj, 1971 struct drm_property *prop, 1972 uint64_t prop_value) 1973 { 1974 struct drm_mode_object *ref; 1975 int ret; 1976 1977 if (!drm_property_change_valid_get(prop, prop_value, &ref)) 1978 return -EINVAL; 1979 1980 switch (obj->type) { 1981 case DRM_MODE_OBJECT_CONNECTOR: { 1982 struct drm_connector *connector = obj_to_connector(obj); 1983 struct drm_connector_state *connector_state; 1984 1985 connector_state = drm_atomic_get_connector_state(state, connector); 1986 if (IS_ERR(connector_state)) { 1987 ret = PTR_ERR(connector_state); 1988 break; 1989 } 1990 1991 ret = drm_atomic_connector_set_property(connector, 1992 connector_state, prop, prop_value); 1993 break; 1994 } 1995 case DRM_MODE_OBJECT_CRTC: { 1996 struct drm_crtc *crtc = obj_to_crtc(obj); 1997 struct drm_crtc_state *crtc_state; 1998 1999 crtc_state = drm_atomic_get_crtc_state(state, crtc); 2000 if (IS_ERR(crtc_state)) { 2001 ret = PTR_ERR(crtc_state); 2002 break; 2003 } 2004 2005 ret = drm_atomic_crtc_set_property(crtc, 2006 crtc_state, prop, prop_value); 2007 break; 2008 } 2009 case DRM_MODE_OBJECT_PLANE: { 2010 struct drm_plane *plane = obj_to_plane(obj); 2011 struct drm_plane_state *plane_state; 2012 2013 plane_state = drm_atomic_get_plane_state(state, plane); 2014 if (IS_ERR(plane_state)) { 2015 ret = PTR_ERR(plane_state); 2016 break; 2017 } 2018 2019 ret = drm_atomic_plane_set_property(plane, 2020 plane_state, prop, prop_value); 2021 break; 2022 } 2023 default: 2024 ret = -EINVAL; 2025 break; 2026 } 2027 2028 drm_property_change_valid_put(prop, ref); 2029 return ret; 2030 } 2031 2032 /** 2033 * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers. 2034 * 2035 * @dev: drm device to check. 2036 * @plane_mask: plane mask for planes that were updated. 2037 * @ret: return value, can be -EDEADLK for a retry. 2038 * 2039 * Before doing an update &drm_plane.old_fb is set to &drm_plane.fb, but before 2040 * dropping the locks old_fb needs to be set to NULL and plane->fb updated. This 2041 * is a common operation for each atomic update, so this call is split off as a 2042 * helper. 2043 */ 2044 void drm_atomic_clean_old_fb(struct drm_device *dev, 2045 unsigned plane_mask, 2046 int ret) 2047 { 2048 struct drm_plane *plane; 2049 2050 /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping 2051 * locks (ie. while it is still safe to deref plane->state). We 2052 * need to do this here because the driver entry points cannot 2053 * distinguish between legacy and atomic ioctls. 2054 */ 2055 drm_for_each_plane_mask(plane, dev, plane_mask) { 2056 if (ret == 0) { 2057 struct drm_framebuffer *new_fb = plane->state->fb; 2058 if (new_fb) 2059 drm_framebuffer_get(new_fb); 2060 plane->fb = new_fb; 2061 plane->crtc = plane->state->crtc; 2062 2063 if (plane->old_fb) 2064 drm_framebuffer_put(plane->old_fb); 2065 } 2066 plane->old_fb = NULL; 2067 } 2068 } 2069 EXPORT_SYMBOL(drm_atomic_clean_old_fb); 2070 2071 /** 2072 * DOC: explicit fencing properties 2073 * 2074 * Explicit fencing allows userspace to control the buffer synchronization 2075 * between devices. A Fence or a group of fences are transfered to/from 2076 * userspace using Sync File fds and there are two DRM properties for that. 2077 * IN_FENCE_FD on each DRM Plane to send fences to the kernel and 2078 * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel. 2079 * 2080 * As a contrast, with implicit fencing the kernel keeps track of any 2081 * ongoing rendering, and automatically ensures that the atomic update waits 2082 * for any pending rendering to complete. For shared buffers represented with 2083 * a &struct dma_buf this is tracked in &struct reservation_object. 2084 * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org), 2085 * whereas explicit fencing is what Android wants. 2086 * 2087 * "IN_FENCE_FD”: 2088 * Use this property to pass a fence that DRM should wait on before 2089 * proceeding with the Atomic Commit request and show the framebuffer for 2090 * the plane on the screen. The fence can be either a normal fence or a 2091 * merged one, the sync_file framework will handle both cases and use a 2092 * fence_array if a merged fence is received. Passing -1 here means no 2093 * fences to wait on. 2094 * 2095 * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag 2096 * it will only check if the Sync File is a valid one. 2097 * 2098 * On the driver side the fence is stored on the @fence parameter of 2099 * &struct drm_plane_state. Drivers which also support implicit fencing 2100 * should set the implicit fence using drm_atomic_set_fence_for_plane(), 2101 * to make sure there's consistent behaviour between drivers in precedence 2102 * of implicit vs. explicit fencing. 2103 * 2104 * "OUT_FENCE_PTR”: 2105 * Use this property to pass a file descriptor pointer to DRM. Once the 2106 * Atomic Commit request call returns OUT_FENCE_PTR will be filled with 2107 * the file descriptor number of a Sync File. This Sync File contains the 2108 * CRTC fence that will be signaled when all framebuffers present on the 2109 * Atomic Commit * request for that given CRTC are scanned out on the 2110 * screen. 2111 * 2112 * The Atomic Commit request fails if a invalid pointer is passed. If the 2113 * Atomic Commit request fails for any other reason the out fence fd 2114 * returned will be -1. On a Atomic Commit with the 2115 * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1. 2116 * 2117 * Note that out-fences don't have a special interface to drivers and are 2118 * internally represented by a &struct drm_pending_vblank_event in struct 2119 * &drm_crtc_state, which is also used by the nonblocking atomic commit 2120 * helpers and for the DRM event handling for existing userspace. 2121 */ 2122 2123 struct drm_out_fence_state { 2124 s32 __user *out_fence_ptr; 2125 struct sync_file *sync_file; 2126 int fd; 2127 }; 2128 2129 static int setup_out_fence(struct drm_out_fence_state *fence_state, 2130 struct dma_fence *fence) 2131 { 2132 fence_state->fd = get_unused_fd_flags(O_CLOEXEC); 2133 if (fence_state->fd < 0) 2134 return fence_state->fd; 2135 2136 if (put_user(fence_state->fd, fence_state->out_fence_ptr)) 2137 return -EFAULT; 2138 2139 fence_state->sync_file = sync_file_create(fence); 2140 if (!fence_state->sync_file) 2141 return -ENOMEM; 2142 2143 return 0; 2144 } 2145 2146 static int prepare_crtc_signaling(struct drm_device *dev, 2147 struct drm_atomic_state *state, 2148 struct drm_mode_atomic *arg, 2149 struct drm_file *file_priv, 2150 struct drm_out_fence_state **fence_state, 2151 unsigned int *num_fences) 2152 { 2153 struct drm_crtc *crtc; 2154 struct drm_crtc_state *crtc_state; 2155 int i, c = 0, ret; 2156 2157 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) 2158 return 0; 2159 2160 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2161 s32 __user *fence_ptr; 2162 2163 fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); 2164 2165 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) { 2166 struct drm_pending_vblank_event *e; 2167 2168 e = create_vblank_event(crtc, arg->user_data); 2169 if (!e) 2170 return -ENOMEM; 2171 2172 crtc_state->event = e; 2173 } 2174 2175 if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { 2176 struct drm_pending_vblank_event *e = crtc_state->event; 2177 2178 if (!file_priv) 2179 continue; 2180 2181 ret = drm_event_reserve_init(dev, file_priv, &e->base, 2182 &e->event.base); 2183 if (ret) { 2184 kfree(e); 2185 crtc_state->event = NULL; 2186 return ret; 2187 } 2188 } 2189 2190 if (fence_ptr) { 2191 struct dma_fence *fence; 2192 struct drm_out_fence_state *f; 2193 2194 f = krealloc(*fence_state, sizeof(**fence_state) * 2195 (*num_fences + 1), GFP_KERNEL); 2196 if (!f) 2197 return -ENOMEM; 2198 2199 memset(&f[*num_fences], 0, sizeof(*f)); 2200 2201 f[*num_fences].out_fence_ptr = fence_ptr; 2202 *fence_state = f; 2203 2204 fence = drm_crtc_create_fence(crtc); 2205 if (!fence) 2206 return -ENOMEM; 2207 2208 ret = setup_out_fence(&f[(*num_fences)++], fence); 2209 if (ret) { 2210 dma_fence_put(fence); 2211 return ret; 2212 } 2213 2214 crtc_state->event->base.fence = fence; 2215 } 2216 2217 c++; 2218 } 2219 2220 /* 2221 * Having this flag means user mode pends on event which will never 2222 * reach due to lack of at least one CRTC for signaling 2223 */ 2224 if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 2225 return -EINVAL; 2226 2227 return 0; 2228 } 2229 2230 static void complete_crtc_signaling(struct drm_device *dev, 2231 struct drm_atomic_state *state, 2232 struct drm_out_fence_state *fence_state, 2233 unsigned int num_fences, 2234 bool install_fds) 2235 { 2236 struct drm_crtc *crtc; 2237 struct drm_crtc_state *crtc_state; 2238 int i; 2239 2240 if (install_fds) { 2241 for (i = 0; i < num_fences; i++) 2242 fd_install(fence_state[i].fd, 2243 fence_state[i].sync_file->file); 2244 2245 kfree(fence_state); 2246 return; 2247 } 2248 2249 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2250 struct drm_pending_vblank_event *event = crtc_state->event; 2251 /* 2252 * Free the allocated event. drm_atomic_helper_setup_commit 2253 * can allocate an event too, so only free it if it's ours 2254 * to prevent a double free in drm_atomic_state_clear. 2255 */ 2256 if (event && (event->base.fence || event->base.file_priv)) { 2257 drm_event_cancel_free(dev, &event->base); 2258 crtc_state->event = NULL; 2259 } 2260 } 2261 2262 if (!fence_state) 2263 return; 2264 2265 for (i = 0; i < num_fences; i++) { 2266 if (fence_state[i].sync_file) 2267 fput(fence_state[i].sync_file->file); 2268 if (fence_state[i].fd >= 0) 2269 put_unused_fd(fence_state[i].fd); 2270 2271 /* If this fails log error to the user */ 2272 if (fence_state[i].out_fence_ptr && 2273 put_user(-1, fence_state[i].out_fence_ptr)) 2274 DRM_DEBUG_ATOMIC("Couldn't clear out_fence_ptr\n"); 2275 } 2276 2277 kfree(fence_state); 2278 } 2279 2280 int drm_mode_atomic_ioctl(struct drm_device *dev, 2281 void *data, struct drm_file *file_priv) 2282 { 2283 struct drm_mode_atomic *arg = data; 2284 uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr); 2285 uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr); 2286 uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr); 2287 uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr); 2288 unsigned int copied_objs, copied_props; 2289 struct drm_atomic_state *state; 2290 struct drm_modeset_acquire_ctx ctx; 2291 struct drm_plane *plane; 2292 struct drm_out_fence_state *fence_state; 2293 unsigned plane_mask; 2294 int ret = 0; 2295 unsigned int i, j, num_fences; 2296 2297 /* disallow for drivers not supporting atomic: */ 2298 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 2299 return -EINVAL; 2300 2301 /* disallow for userspace that has not enabled atomic cap (even 2302 * though this may be a bit overkill, since legacy userspace 2303 * wouldn't know how to call this ioctl) 2304 */ 2305 if (!file_priv->atomic) 2306 return -EINVAL; 2307 2308 if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) 2309 return -EINVAL; 2310 2311 if (arg->reserved) 2312 return -EINVAL; 2313 2314 if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) && 2315 !dev->mode_config.async_page_flip) 2316 return -EINVAL; 2317 2318 /* can't test and expect an event at the same time. */ 2319 if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) && 2320 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 2321 return -EINVAL; 2322 2323 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2324 2325 state = drm_atomic_state_alloc(dev); 2326 if (!state) 2327 return -ENOMEM; 2328 2329 state->acquire_ctx = &ctx; 2330 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 2331 2332 retry: 2333 plane_mask = 0; 2334 copied_objs = 0; 2335 copied_props = 0; 2336 fence_state = NULL; 2337 num_fences = 0; 2338 2339 for (i = 0; i < arg->count_objs; i++) { 2340 uint32_t obj_id, count_props; 2341 struct drm_mode_object *obj; 2342 2343 if (get_user(obj_id, objs_ptr + copied_objs)) { 2344 ret = -EFAULT; 2345 goto out; 2346 } 2347 2348 obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY); 2349 if (!obj) { 2350 ret = -ENOENT; 2351 goto out; 2352 } 2353 2354 if (!obj->properties) { 2355 drm_mode_object_put(obj); 2356 ret = -ENOENT; 2357 goto out; 2358 } 2359 2360 if (get_user(count_props, count_props_ptr + copied_objs)) { 2361 drm_mode_object_put(obj); 2362 ret = -EFAULT; 2363 goto out; 2364 } 2365 2366 copied_objs++; 2367 2368 for (j = 0; j < count_props; j++) { 2369 uint32_t prop_id; 2370 uint64_t prop_value; 2371 struct drm_property *prop; 2372 2373 if (get_user(prop_id, props_ptr + copied_props)) { 2374 drm_mode_object_put(obj); 2375 ret = -EFAULT; 2376 goto out; 2377 } 2378 2379 prop = drm_mode_obj_find_prop_id(obj, prop_id); 2380 if (!prop) { 2381 drm_mode_object_put(obj); 2382 ret = -ENOENT; 2383 goto out; 2384 } 2385 2386 if (copy_from_user(&prop_value, 2387 prop_values_ptr + copied_props, 2388 sizeof(prop_value))) { 2389 drm_mode_object_put(obj); 2390 ret = -EFAULT; 2391 goto out; 2392 } 2393 2394 ret = drm_atomic_set_property(state, obj, prop, 2395 prop_value); 2396 if (ret) { 2397 drm_mode_object_put(obj); 2398 goto out; 2399 } 2400 2401 copied_props++; 2402 } 2403 2404 if (obj->type == DRM_MODE_OBJECT_PLANE && count_props && 2405 !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) { 2406 plane = obj_to_plane(obj); 2407 plane_mask |= (1 << drm_plane_index(plane)); 2408 plane->old_fb = plane->fb; 2409 } 2410 drm_mode_object_put(obj); 2411 } 2412 2413 ret = prepare_crtc_signaling(dev, state, arg, file_priv, &fence_state, 2414 &num_fences); 2415 if (ret) 2416 goto out; 2417 2418 if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { 2419 ret = drm_atomic_check_only(state); 2420 } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { 2421 ret = drm_atomic_nonblocking_commit(state); 2422 } else { 2423 if (unlikely(drm_debug & DRM_UT_STATE)) 2424 drm_atomic_print_state(state); 2425 2426 ret = drm_atomic_commit(state); 2427 } 2428 2429 out: 2430 drm_atomic_clean_old_fb(dev, plane_mask, ret); 2431 2432 complete_crtc_signaling(dev, state, fence_state, num_fences, !ret); 2433 2434 if (ret == -EDEADLK) { 2435 drm_atomic_state_clear(state); 2436 ret = drm_modeset_backoff(&ctx); 2437 if (!ret) 2438 goto retry; 2439 } 2440 2441 drm_atomic_state_put(state); 2442 2443 drm_modeset_drop_locks(&ctx); 2444 drm_modeset_acquire_fini(&ctx); 2445 2446 return ret; 2447 } 2448