1 /* 2 * Copyright (C) 2014 Red Hat 3 * Copyright (C) 2014 Intel Corp. 4 * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Rob Clark <robdclark@gmail.com> 26 * Daniel Vetter <daniel.vetter@ffwll.ch> 27 */ 28 29 30 #include <linux/sync_file.h> 31 32 #include <drm/drm_atomic.h> 33 #include <drm/drm_atomic_uapi.h> 34 #include <drm/drm_bridge.h> 35 #include <drm/drm_debugfs.h> 36 #include <drm/drm_device.h> 37 #include <drm/drm_drv.h> 38 #include <drm/drm_file.h> 39 #include <drm/drm_fourcc.h> 40 #include <drm/drm_mode.h> 41 #include <drm/drm_print.h> 42 #include <drm/drm_writeback.h> 43 44 #include "drm_crtc_internal.h" 45 #include "drm_internal.h" 46 47 void __drm_crtc_commit_free(struct kref *kref) 48 { 49 struct drm_crtc_commit *commit = 50 container_of(kref, struct drm_crtc_commit, ref); 51 52 kfree(commit); 53 } 54 EXPORT_SYMBOL(__drm_crtc_commit_free); 55 56 /** 57 * drm_crtc_commit_wait - Waits for a commit to complete 58 * @commit: &drm_crtc_commit to wait for 59 * 60 * Waits for a given &drm_crtc_commit to be programmed into the 61 * hardware and flipped to. 62 * 63 * Returns: 64 * 65 * 0 on success, a negative error code otherwise. 66 */ 67 int drm_crtc_commit_wait(struct drm_crtc_commit *commit) 68 { 69 unsigned long timeout = 10 * HZ; 70 int ret; 71 72 if (!commit) 73 return 0; 74 75 ret = wait_for_completion_timeout(&commit->hw_done, timeout); 76 if (!ret) { 77 drm_err(commit->crtc->dev, "hw_done timed out\n"); 78 return -ETIMEDOUT; 79 } 80 81 /* 82 * Currently no support for overwriting flips, hence 83 * stall for previous one to execute completely. 84 */ 85 ret = wait_for_completion_timeout(&commit->flip_done, timeout); 86 if (!ret) { 87 drm_err(commit->crtc->dev, "flip_done timed out\n"); 88 return -ETIMEDOUT; 89 } 90 91 return 0; 92 } 93 EXPORT_SYMBOL(drm_crtc_commit_wait); 94 95 /** 96 * drm_atomic_state_default_release - 97 * release memory initialized by drm_atomic_state_init 98 * @state: atomic state 99 * 100 * Free all the memory allocated by drm_atomic_state_init. 101 * This should only be used by drivers which are still subclassing 102 * &drm_atomic_state and haven't switched to &drm_private_state yet. 103 */ 104 void drm_atomic_state_default_release(struct drm_atomic_state *state) 105 { 106 kfree(state->connectors); 107 kfree(state->crtcs); 108 kfree(state->planes); 109 kfree(state->private_objs); 110 } 111 EXPORT_SYMBOL(drm_atomic_state_default_release); 112 113 /** 114 * drm_atomic_state_init - init new atomic state 115 * @dev: DRM device 116 * @state: atomic state 117 * 118 * Default implementation for filling in a new atomic state. 119 * This should only be used by drivers which are still subclassing 120 * &drm_atomic_state and haven't switched to &drm_private_state yet. 121 */ 122 int 123 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) 124 { 125 kref_init(&state->ref); 126 127 /* TODO legacy paths should maybe do a better job about 128 * setting this appropriately? 129 */ 130 state->allow_modeset = true; 131 132 state->crtcs = kcalloc(dev->mode_config.num_crtc, 133 sizeof(*state->crtcs), GFP_KERNEL); 134 if (!state->crtcs) 135 goto fail; 136 state->planes = kcalloc(dev->mode_config.num_total_plane, 137 sizeof(*state->planes), GFP_KERNEL); 138 if (!state->planes) 139 goto fail; 140 141 state->dev = dev; 142 143 drm_dbg_atomic(dev, "Allocated atomic state %p\n", state); 144 145 return 0; 146 fail: 147 drm_atomic_state_default_release(state); 148 return -ENOMEM; 149 } 150 EXPORT_SYMBOL(drm_atomic_state_init); 151 152 /** 153 * drm_atomic_state_alloc - allocate atomic state 154 * @dev: DRM device 155 * 156 * This allocates an empty atomic state to track updates. 157 */ 158 struct drm_atomic_state * 159 drm_atomic_state_alloc(struct drm_device *dev) 160 { 161 struct drm_mode_config *config = &dev->mode_config; 162 163 if (!config->funcs->atomic_state_alloc) { 164 struct drm_atomic_state *state; 165 166 state = kzalloc(sizeof(*state), GFP_KERNEL); 167 if (!state) 168 return NULL; 169 if (drm_atomic_state_init(dev, state) < 0) { 170 kfree(state); 171 return NULL; 172 } 173 return state; 174 } 175 176 return config->funcs->atomic_state_alloc(dev); 177 } 178 EXPORT_SYMBOL(drm_atomic_state_alloc); 179 180 /** 181 * drm_atomic_state_default_clear - clear base atomic state 182 * @state: atomic state 183 * 184 * Default implementation for clearing atomic state. 185 * This should only be used by drivers which are still subclassing 186 * &drm_atomic_state and haven't switched to &drm_private_state yet. 187 */ 188 void drm_atomic_state_default_clear(struct drm_atomic_state *state) 189 { 190 struct drm_device *dev = state->dev; 191 struct drm_mode_config *config = &dev->mode_config; 192 int i; 193 194 drm_dbg_atomic(dev, "Clearing atomic state %p\n", state); 195 196 for (i = 0; i < state->num_connector; i++) { 197 struct drm_connector *connector = state->connectors[i].ptr; 198 199 if (!connector) 200 continue; 201 202 connector->funcs->atomic_destroy_state(connector, 203 state->connectors[i].state); 204 state->connectors[i].ptr = NULL; 205 state->connectors[i].state = NULL; 206 state->connectors[i].old_state = NULL; 207 state->connectors[i].new_state = NULL; 208 drm_connector_put(connector); 209 } 210 211 for (i = 0; i < config->num_crtc; i++) { 212 struct drm_crtc *crtc = state->crtcs[i].ptr; 213 214 if (!crtc) 215 continue; 216 217 crtc->funcs->atomic_destroy_state(crtc, 218 state->crtcs[i].state); 219 220 state->crtcs[i].ptr = NULL; 221 state->crtcs[i].state = NULL; 222 state->crtcs[i].old_state = NULL; 223 state->crtcs[i].new_state = NULL; 224 225 if (state->crtcs[i].commit) { 226 drm_crtc_commit_put(state->crtcs[i].commit); 227 state->crtcs[i].commit = NULL; 228 } 229 } 230 231 for (i = 0; i < config->num_total_plane; i++) { 232 struct drm_plane *plane = state->planes[i].ptr; 233 234 if (!plane) 235 continue; 236 237 plane->funcs->atomic_destroy_state(plane, 238 state->planes[i].state); 239 state->planes[i].ptr = NULL; 240 state->planes[i].state = NULL; 241 state->planes[i].old_state = NULL; 242 state->planes[i].new_state = NULL; 243 } 244 245 for (i = 0; i < state->num_private_objs; i++) { 246 struct drm_private_obj *obj = state->private_objs[i].ptr; 247 248 obj->funcs->atomic_destroy_state(obj, 249 state->private_objs[i].state); 250 state->private_objs[i].ptr = NULL; 251 state->private_objs[i].state = NULL; 252 state->private_objs[i].old_state = NULL; 253 state->private_objs[i].new_state = NULL; 254 } 255 state->num_private_objs = 0; 256 257 if (state->fake_commit) { 258 drm_crtc_commit_put(state->fake_commit); 259 state->fake_commit = NULL; 260 } 261 } 262 EXPORT_SYMBOL(drm_atomic_state_default_clear); 263 264 /** 265 * drm_atomic_state_clear - clear state object 266 * @state: atomic state 267 * 268 * When the w/w mutex algorithm detects a deadlock we need to back off and drop 269 * all locks. So someone else could sneak in and change the current modeset 270 * configuration. Which means that all the state assembled in @state is no 271 * longer an atomic update to the current state, but to some arbitrary earlier 272 * state. Which could break assumptions the driver's 273 * &drm_mode_config_funcs.atomic_check likely relies on. 274 * 275 * Hence we must clear all cached state and completely start over, using this 276 * function. 277 */ 278 void drm_atomic_state_clear(struct drm_atomic_state *state) 279 { 280 struct drm_device *dev = state->dev; 281 struct drm_mode_config *config = &dev->mode_config; 282 283 if (config->funcs->atomic_state_clear) 284 config->funcs->atomic_state_clear(state); 285 else 286 drm_atomic_state_default_clear(state); 287 } 288 EXPORT_SYMBOL(drm_atomic_state_clear); 289 290 /** 291 * __drm_atomic_state_free - free all memory for an atomic state 292 * @ref: This atomic state to deallocate 293 * 294 * This frees all memory associated with an atomic state, including all the 295 * per-object state for planes, CRTCs and connectors. 296 */ 297 void __drm_atomic_state_free(struct kref *ref) 298 { 299 struct drm_atomic_state *state = container_of(ref, typeof(*state), ref); 300 struct drm_mode_config *config = &state->dev->mode_config; 301 302 drm_atomic_state_clear(state); 303 304 drm_dbg_atomic(state->dev, "Freeing atomic state %p\n", state); 305 306 if (config->funcs->atomic_state_free) { 307 config->funcs->atomic_state_free(state); 308 } else { 309 drm_atomic_state_default_release(state); 310 kfree(state); 311 } 312 } 313 EXPORT_SYMBOL(__drm_atomic_state_free); 314 315 /** 316 * drm_atomic_get_crtc_state - get CRTC state 317 * @state: global atomic state object 318 * @crtc: CRTC to get state object for 319 * 320 * This function returns the CRTC state for the given CRTC, allocating it if 321 * needed. It will also grab the relevant CRTC lock to make sure that the state 322 * is consistent. 323 * 324 * WARNING: Drivers may only add new CRTC states to a @state if 325 * drm_atomic_state.allow_modeset is set, or if it's a driver-internal commit 326 * not created by userspace through an IOCTL call. 327 * 328 * Returns: 329 * 330 * Either the allocated state or the error code encoded into the pointer. When 331 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 332 * entire atomic sequence must be restarted. All other errors are fatal. 333 */ 334 struct drm_crtc_state * 335 drm_atomic_get_crtc_state(struct drm_atomic_state *state, 336 struct drm_crtc *crtc) 337 { 338 int ret, index = drm_crtc_index(crtc); 339 struct drm_crtc_state *crtc_state; 340 341 WARN_ON(!state->acquire_ctx); 342 343 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); 344 if (crtc_state) 345 return crtc_state; 346 347 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); 348 if (ret) 349 return ERR_PTR(ret); 350 351 crtc_state = crtc->funcs->atomic_duplicate_state(crtc); 352 if (!crtc_state) 353 return ERR_PTR(-ENOMEM); 354 355 state->crtcs[index].state = crtc_state; 356 state->crtcs[index].old_state = crtc->state; 357 state->crtcs[index].new_state = crtc_state; 358 state->crtcs[index].ptr = crtc; 359 crtc_state->state = state; 360 361 drm_dbg_atomic(state->dev, "Added [CRTC:%d:%s] %p state to %p\n", 362 crtc->base.id, crtc->name, crtc_state, state); 363 364 return crtc_state; 365 } 366 EXPORT_SYMBOL(drm_atomic_get_crtc_state); 367 368 static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state, 369 const struct drm_crtc_state *new_crtc_state) 370 { 371 struct drm_crtc *crtc = new_crtc_state->crtc; 372 373 /* NOTE: we explicitly don't enforce constraints such as primary 374 * layer covering entire screen, since that is something we want 375 * to allow (on hw that supports it). For hw that does not, it 376 * should be checked in driver's crtc->atomic_check() vfunc. 377 * 378 * TODO: Add generic modeset state checks once we support those. 379 */ 380 381 if (new_crtc_state->active && !new_crtc_state->enable) { 382 drm_dbg_atomic(crtc->dev, 383 "[CRTC:%d:%s] active without enabled\n", 384 crtc->base.id, crtc->name); 385 return -EINVAL; 386 } 387 388 /* The state->enable vs. state->mode_blob checks can be WARN_ON, 389 * as this is a kernel-internal detail that userspace should never 390 * be able to trigger. 391 */ 392 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 393 WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) { 394 drm_dbg_atomic(crtc->dev, 395 "[CRTC:%d:%s] enabled without mode blob\n", 396 crtc->base.id, crtc->name); 397 return -EINVAL; 398 } 399 400 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 401 WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) { 402 drm_dbg_atomic(crtc->dev, 403 "[CRTC:%d:%s] disabled with mode blob\n", 404 crtc->base.id, crtc->name); 405 return -EINVAL; 406 } 407 408 /* 409 * Reject event generation for when a CRTC is off and stays off. 410 * It wouldn't be hard to implement this, but userspace has a track 411 * record of happily burning through 100% cpu (or worse, crash) when the 412 * display pipe is suspended. To avoid all that fun just reject updates 413 * that ask for events since likely that indicates a bug in the 414 * compositor's drawing loop. This is consistent with the vblank IOCTL 415 * and legacy page_flip IOCTL which also reject service on a disabled 416 * pipe. 417 */ 418 if (new_crtc_state->event && 419 !new_crtc_state->active && !old_crtc_state->active) { 420 drm_dbg_atomic(crtc->dev, 421 "[CRTC:%d:%s] requesting event but off\n", 422 crtc->base.id, crtc->name); 423 return -EINVAL; 424 } 425 426 return 0; 427 } 428 429 static void drm_atomic_crtc_print_state(struct drm_printer *p, 430 const struct drm_crtc_state *state) 431 { 432 struct drm_crtc *crtc = state->crtc; 433 434 drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name); 435 drm_printf(p, "\tenable=%d\n", state->enable); 436 drm_printf(p, "\tactive=%d\n", state->active); 437 drm_printf(p, "\tself_refresh_active=%d\n", state->self_refresh_active); 438 drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed); 439 drm_printf(p, "\tmode_changed=%d\n", state->mode_changed); 440 drm_printf(p, "\tactive_changed=%d\n", state->active_changed); 441 drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed); 442 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); 443 drm_printf(p, "\tplane_mask=%x\n", state->plane_mask); 444 drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask); 445 drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask); 446 drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode)); 447 448 if (crtc->funcs->atomic_print_state) 449 crtc->funcs->atomic_print_state(p, state); 450 } 451 452 static int drm_atomic_connector_check(struct drm_connector *connector, 453 struct drm_connector_state *state) 454 { 455 struct drm_crtc_state *crtc_state; 456 struct drm_writeback_job *writeback_job = state->writeback_job; 457 const struct drm_display_info *info = &connector->display_info; 458 459 state->max_bpc = info->bpc ? info->bpc : 8; 460 if (connector->max_bpc_property) 461 state->max_bpc = min(state->max_bpc, state->max_requested_bpc); 462 463 if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job) 464 return 0; 465 466 if (writeback_job->fb && !state->crtc) { 467 drm_dbg_atomic(connector->dev, 468 "[CONNECTOR:%d:%s] framebuffer without CRTC\n", 469 connector->base.id, connector->name); 470 return -EINVAL; 471 } 472 473 if (state->crtc) 474 crtc_state = drm_atomic_get_existing_crtc_state(state->state, 475 state->crtc); 476 477 if (writeback_job->fb && !crtc_state->active) { 478 drm_dbg_atomic(connector->dev, 479 "[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n", 480 connector->base.id, connector->name, 481 state->crtc->base.id); 482 return -EINVAL; 483 } 484 485 if (!writeback_job->fb) { 486 if (writeback_job->out_fence) { 487 drm_dbg_atomic(connector->dev, 488 "[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", 489 connector->base.id, connector->name); 490 return -EINVAL; 491 } 492 493 drm_writeback_cleanup_job(writeback_job); 494 state->writeback_job = NULL; 495 } 496 497 return 0; 498 } 499 500 /** 501 * drm_atomic_get_plane_state - get plane state 502 * @state: global atomic state object 503 * @plane: plane to get state object for 504 * 505 * This function returns the plane state for the given plane, allocating it if 506 * needed. It will also grab the relevant plane lock to make sure that the state 507 * is consistent. 508 * 509 * Returns: 510 * 511 * Either the allocated state or the error code encoded into the pointer. When 512 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 513 * entire atomic sequence must be restarted. All other errors are fatal. 514 */ 515 struct drm_plane_state * 516 drm_atomic_get_plane_state(struct drm_atomic_state *state, 517 struct drm_plane *plane) 518 { 519 int ret, index = drm_plane_index(plane); 520 struct drm_plane_state *plane_state; 521 522 WARN_ON(!state->acquire_ctx); 523 524 /* the legacy pointers should never be set */ 525 WARN_ON(plane->fb); 526 WARN_ON(plane->old_fb); 527 WARN_ON(plane->crtc); 528 529 plane_state = drm_atomic_get_existing_plane_state(state, plane); 530 if (plane_state) 531 return plane_state; 532 533 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); 534 if (ret) 535 return ERR_PTR(ret); 536 537 plane_state = plane->funcs->atomic_duplicate_state(plane); 538 if (!plane_state) 539 return ERR_PTR(-ENOMEM); 540 541 state->planes[index].state = plane_state; 542 state->planes[index].ptr = plane; 543 state->planes[index].old_state = plane->state; 544 state->planes[index].new_state = plane_state; 545 plane_state->state = state; 546 547 drm_dbg_atomic(plane->dev, "Added [PLANE:%d:%s] %p state to %p\n", 548 plane->base.id, plane->name, plane_state, state); 549 550 if (plane_state->crtc) { 551 struct drm_crtc_state *crtc_state; 552 553 crtc_state = drm_atomic_get_crtc_state(state, 554 plane_state->crtc); 555 if (IS_ERR(crtc_state)) 556 return ERR_CAST(crtc_state); 557 } 558 559 return plane_state; 560 } 561 EXPORT_SYMBOL(drm_atomic_get_plane_state); 562 563 static bool 564 plane_switching_crtc(const struct drm_plane_state *old_plane_state, 565 const struct drm_plane_state *new_plane_state) 566 { 567 if (!old_plane_state->crtc || !new_plane_state->crtc) 568 return false; 569 570 if (old_plane_state->crtc == new_plane_state->crtc) 571 return false; 572 573 /* This could be refined, but currently there's no helper or driver code 574 * to implement direct switching of active planes nor userspace to take 575 * advantage of more direct plane switching without the intermediate 576 * full OFF state. 577 */ 578 return true; 579 } 580 581 /** 582 * drm_atomic_plane_check - check plane state 583 * @old_plane_state: old plane state to check 584 * @new_plane_state: new plane state to check 585 * 586 * Provides core sanity checks for plane state. 587 * 588 * RETURNS: 589 * Zero on success, error code on failure 590 */ 591 static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, 592 const struct drm_plane_state *new_plane_state) 593 { 594 struct drm_plane *plane = new_plane_state->plane; 595 struct drm_crtc *crtc = new_plane_state->crtc; 596 const struct drm_framebuffer *fb = new_plane_state->fb; 597 unsigned int fb_width, fb_height; 598 struct drm_mode_rect *clips; 599 uint32_t num_clips; 600 int ret; 601 602 /* either *both* CRTC and FB must be set, or neither */ 603 if (crtc && !fb) { 604 drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] CRTC set but no FB\n", 605 plane->base.id, plane->name); 606 return -EINVAL; 607 } else if (fb && !crtc) { 608 drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] FB set but no CRTC\n", 609 plane->base.id, plane->name); 610 return -EINVAL; 611 } 612 613 /* if disabled, we don't care about the rest of the state: */ 614 if (!crtc) 615 return 0; 616 617 /* Check whether this plane is usable on this CRTC */ 618 if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { 619 drm_dbg_atomic(plane->dev, 620 "Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", 621 crtc->base.id, crtc->name, 622 plane->base.id, plane->name); 623 return -EINVAL; 624 } 625 626 /* Check whether this plane supports the fb pixel format. */ 627 ret = drm_plane_check_pixel_format(plane, fb->format->format, 628 fb->modifier); 629 if (ret) { 630 drm_dbg_atomic(plane->dev, 631 "[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n", 632 plane->base.id, plane->name, 633 &fb->format->format, fb->modifier); 634 return ret; 635 } 636 637 /* Give drivers some help against integer overflows */ 638 if (new_plane_state->crtc_w > INT_MAX || 639 new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w || 640 new_plane_state->crtc_h > INT_MAX || 641 new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) { 642 drm_dbg_atomic(plane->dev, 643 "[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", 644 plane->base.id, plane->name, 645 new_plane_state->crtc_w, new_plane_state->crtc_h, 646 new_plane_state->crtc_x, new_plane_state->crtc_y); 647 return -ERANGE; 648 } 649 650 fb_width = fb->width << 16; 651 fb_height = fb->height << 16; 652 653 /* Make sure source coordinates are inside the fb. */ 654 if (new_plane_state->src_w > fb_width || 655 new_plane_state->src_x > fb_width - new_plane_state->src_w || 656 new_plane_state->src_h > fb_height || 657 new_plane_state->src_y > fb_height - new_plane_state->src_h) { 658 drm_dbg_atomic(plane->dev, 659 "[PLANE:%d:%s] invalid source coordinates " 660 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", 661 plane->base.id, plane->name, 662 new_plane_state->src_w >> 16, 663 ((new_plane_state->src_w & 0xffff) * 15625) >> 10, 664 new_plane_state->src_h >> 16, 665 ((new_plane_state->src_h & 0xffff) * 15625) >> 10, 666 new_plane_state->src_x >> 16, 667 ((new_plane_state->src_x & 0xffff) * 15625) >> 10, 668 new_plane_state->src_y >> 16, 669 ((new_plane_state->src_y & 0xffff) * 15625) >> 10, 670 fb->width, fb->height); 671 return -ENOSPC; 672 } 673 674 clips = __drm_plane_get_damage_clips(new_plane_state); 675 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 676 677 /* Make sure damage clips are valid and inside the fb. */ 678 while (num_clips > 0) { 679 if (clips->x1 >= clips->x2 || 680 clips->y1 >= clips->y2 || 681 clips->x1 < 0 || 682 clips->y1 < 0 || 683 clips->x2 > fb_width || 684 clips->y2 > fb_height) { 685 drm_dbg_atomic(plane->dev, 686 "[PLANE:%d:%s] invalid damage clip %d %d %d %d\n", 687 plane->base.id, plane->name, clips->x1, 688 clips->y1, clips->x2, clips->y2); 689 return -EINVAL; 690 } 691 clips++; 692 num_clips--; 693 } 694 695 if (plane_switching_crtc(old_plane_state, new_plane_state)) { 696 drm_dbg_atomic(plane->dev, 697 "[PLANE:%d:%s] switching CRTC directly\n", 698 plane->base.id, plane->name); 699 return -EINVAL; 700 } 701 702 return 0; 703 } 704 705 static void drm_atomic_plane_print_state(struct drm_printer *p, 706 const struct drm_plane_state *state) 707 { 708 struct drm_plane *plane = state->plane; 709 struct drm_rect src = drm_plane_state_src(state); 710 struct drm_rect dest = drm_plane_state_dest(state); 711 712 drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); 713 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 714 drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); 715 if (state->fb) 716 drm_framebuffer_print_info(p, 2, state->fb); 717 drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); 718 drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); 719 drm_printf(p, "\trotation=%x\n", state->rotation); 720 drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos); 721 drm_printf(p, "\tcolor-encoding=%s\n", 722 drm_get_color_encoding_name(state->color_encoding)); 723 drm_printf(p, "\tcolor-range=%s\n", 724 drm_get_color_range_name(state->color_range)); 725 726 if (plane->funcs->atomic_print_state) 727 plane->funcs->atomic_print_state(p, state); 728 } 729 730 /** 731 * DOC: handling driver private state 732 * 733 * Very often the DRM objects exposed to userspace in the atomic modeset api 734 * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the 735 * underlying hardware. Especially for any kind of shared resources (e.g. shared 736 * clocks, scaler units, bandwidth and fifo limits shared among a group of 737 * planes or CRTCs, and so on) it makes sense to model these as independent 738 * objects. Drivers then need to do similar state tracking and commit ordering for 739 * such private (since not exposed to userspace) objects as the atomic core and 740 * helpers already provide for connectors, planes and CRTCs. 741 * 742 * To make this easier on drivers the atomic core provides some support to track 743 * driver private state objects using struct &drm_private_obj, with the 744 * associated state struct &drm_private_state. 745 * 746 * Similar to userspace-exposed objects, private state structures can be 747 * acquired by calling drm_atomic_get_private_obj_state(). This also takes care 748 * of locking, hence drivers should not have a need to call drm_modeset_lock() 749 * directly. Sequence of the actual hardware state commit is not handled, 750 * drivers might need to keep track of struct drm_crtc_commit within subclassed 751 * structure of &drm_private_state as necessary, e.g. similar to 752 * &drm_plane_state.commit. See also &drm_atomic_state.fake_commit. 753 * 754 * All private state structures contained in a &drm_atomic_state update can be 755 * iterated using for_each_oldnew_private_obj_in_state(), 756 * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state(). 757 * Drivers are recommended to wrap these for each type of driver private state 758 * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at 759 * least if they want to iterate over all objects of a given type. 760 * 761 * An earlier way to handle driver private state was by subclassing struct 762 * &drm_atomic_state. But since that encourages non-standard ways to implement 763 * the check/commit split atomic requires (by using e.g. "check and rollback or 764 * commit instead" of "duplicate state, check, then either commit or release 765 * duplicated state) it is deprecated in favour of using &drm_private_state. 766 */ 767 768 /** 769 * drm_atomic_private_obj_init - initialize private object 770 * @dev: DRM device this object will be attached to 771 * @obj: private object 772 * @state: initial private object state 773 * @funcs: pointer to the struct of function pointers that identify the object 774 * type 775 * 776 * Initialize the private object, which can be embedded into any 777 * driver private object that needs its own atomic state. 778 */ 779 void 780 drm_atomic_private_obj_init(struct drm_device *dev, 781 struct drm_private_obj *obj, 782 struct drm_private_state *state, 783 const struct drm_private_state_funcs *funcs) 784 { 785 memset(obj, 0, sizeof(*obj)); 786 787 drm_modeset_lock_init(&obj->lock); 788 789 obj->state = state; 790 obj->funcs = funcs; 791 list_add_tail(&obj->head, &dev->mode_config.privobj_list); 792 } 793 EXPORT_SYMBOL(drm_atomic_private_obj_init); 794 795 /** 796 * drm_atomic_private_obj_fini - finalize private object 797 * @obj: private object 798 * 799 * Finalize the private object. 800 */ 801 void 802 drm_atomic_private_obj_fini(struct drm_private_obj *obj) 803 { 804 list_del(&obj->head); 805 obj->funcs->atomic_destroy_state(obj, obj->state); 806 drm_modeset_lock_fini(&obj->lock); 807 } 808 EXPORT_SYMBOL(drm_atomic_private_obj_fini); 809 810 /** 811 * drm_atomic_get_private_obj_state - get private object state 812 * @state: global atomic state 813 * @obj: private object to get the state for 814 * 815 * This function returns the private object state for the given private object, 816 * allocating the state if needed. It will also grab the relevant private 817 * object lock to make sure that the state is consistent. 818 * 819 * RETURNS: 820 * 821 * Either the allocated state or the error code encoded into a pointer. 822 */ 823 struct drm_private_state * 824 drm_atomic_get_private_obj_state(struct drm_atomic_state *state, 825 struct drm_private_obj *obj) 826 { 827 int index, num_objs, i, ret; 828 size_t size; 829 struct __drm_private_objs_state *arr; 830 struct drm_private_state *obj_state; 831 832 for (i = 0; i < state->num_private_objs; i++) 833 if (obj == state->private_objs[i].ptr) 834 return state->private_objs[i].state; 835 836 ret = drm_modeset_lock(&obj->lock, state->acquire_ctx); 837 if (ret) 838 return ERR_PTR(ret); 839 840 num_objs = state->num_private_objs + 1; 841 size = sizeof(*state->private_objs) * num_objs; 842 arr = krealloc(state->private_objs, size, GFP_KERNEL); 843 if (!arr) 844 return ERR_PTR(-ENOMEM); 845 846 state->private_objs = arr; 847 index = state->num_private_objs; 848 memset(&state->private_objs[index], 0, sizeof(*state->private_objs)); 849 850 obj_state = obj->funcs->atomic_duplicate_state(obj); 851 if (!obj_state) 852 return ERR_PTR(-ENOMEM); 853 854 state->private_objs[index].state = obj_state; 855 state->private_objs[index].old_state = obj->state; 856 state->private_objs[index].new_state = obj_state; 857 state->private_objs[index].ptr = obj; 858 obj_state->state = state; 859 860 state->num_private_objs = num_objs; 861 862 drm_dbg_atomic(state->dev, 863 "Added new private object %p state %p to %p\n", 864 obj, obj_state, state); 865 866 return obj_state; 867 } 868 EXPORT_SYMBOL(drm_atomic_get_private_obj_state); 869 870 /** 871 * drm_atomic_get_old_private_obj_state 872 * @state: global atomic state object 873 * @obj: private_obj to grab 874 * 875 * This function returns the old private object state for the given private_obj, 876 * or NULL if the private_obj is not part of the global atomic state. 877 */ 878 struct drm_private_state * 879 drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state, 880 struct drm_private_obj *obj) 881 { 882 int i; 883 884 for (i = 0; i < state->num_private_objs; i++) 885 if (obj == state->private_objs[i].ptr) 886 return state->private_objs[i].old_state; 887 888 return NULL; 889 } 890 EXPORT_SYMBOL(drm_atomic_get_old_private_obj_state); 891 892 /** 893 * drm_atomic_get_new_private_obj_state 894 * @state: global atomic state object 895 * @obj: private_obj to grab 896 * 897 * This function returns the new private object state for the given private_obj, 898 * or NULL if the private_obj is not part of the global atomic state. 899 */ 900 struct drm_private_state * 901 drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state, 902 struct drm_private_obj *obj) 903 { 904 int i; 905 906 for (i = 0; i < state->num_private_objs; i++) 907 if (obj == state->private_objs[i].ptr) 908 return state->private_objs[i].new_state; 909 910 return NULL; 911 } 912 EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state); 913 914 /** 915 * drm_atomic_get_old_connector_for_encoder - Get old connector for an encoder 916 * @state: Atomic state 917 * @encoder: The encoder to fetch the connector state for 918 * 919 * This function finds and returns the connector that was connected to @encoder 920 * as specified by the @state. 921 * 922 * If there is no connector in @state which previously had @encoder connected to 923 * it, this function will return NULL. While this may seem like an invalid use 924 * case, it is sometimes useful to differentiate commits which had no prior 925 * connectors attached to @encoder vs ones that did (and to inspect their 926 * state). This is especially true in enable hooks because the pipeline has 927 * changed. 928 * 929 * Returns: The old connector connected to @encoder, or NULL if the encoder is 930 * not connected. 931 */ 932 struct drm_connector * 933 drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state, 934 struct drm_encoder *encoder) 935 { 936 struct drm_connector_state *conn_state; 937 struct drm_connector *connector; 938 unsigned int i; 939 940 for_each_old_connector_in_state(state, connector, conn_state, i) { 941 if (conn_state->best_encoder == encoder) 942 return connector; 943 } 944 945 return NULL; 946 } 947 EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder); 948 949 /** 950 * drm_atomic_get_new_connector_for_encoder - Get new connector for an encoder 951 * @state: Atomic state 952 * @encoder: The encoder to fetch the connector state for 953 * 954 * This function finds and returns the connector that will be connected to 955 * @encoder as specified by the @state. 956 * 957 * If there is no connector in @state which will have @encoder connected to it, 958 * this function will return NULL. While this may seem like an invalid use case, 959 * it is sometimes useful to differentiate commits which have no connectors 960 * attached to @encoder vs ones that do (and to inspect their state). This is 961 * especially true in disable hooks because the pipeline will change. 962 * 963 * Returns: The new connector connected to @encoder, or NULL if the encoder is 964 * not connected. 965 */ 966 struct drm_connector * 967 drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state, 968 struct drm_encoder *encoder) 969 { 970 struct drm_connector_state *conn_state; 971 struct drm_connector *connector; 972 unsigned int i; 973 974 for_each_new_connector_in_state(state, connector, conn_state, i) { 975 if (conn_state->best_encoder == encoder) 976 return connector; 977 } 978 979 return NULL; 980 } 981 EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder); 982 983 /** 984 * drm_atomic_get_connector_state - get connector state 985 * @state: global atomic state object 986 * @connector: connector to get state object for 987 * 988 * This function returns the connector state for the given connector, 989 * allocating it if needed. It will also grab the relevant connector lock to 990 * make sure that the state is consistent. 991 * 992 * Returns: 993 * 994 * Either the allocated state or the error code encoded into the pointer. When 995 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 996 * entire atomic sequence must be restarted. All other errors are fatal. 997 */ 998 struct drm_connector_state * 999 drm_atomic_get_connector_state(struct drm_atomic_state *state, 1000 struct drm_connector *connector) 1001 { 1002 int ret, index; 1003 struct drm_mode_config *config = &connector->dev->mode_config; 1004 struct drm_connector_state *connector_state; 1005 1006 WARN_ON(!state->acquire_ctx); 1007 1008 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1009 if (ret) 1010 return ERR_PTR(ret); 1011 1012 index = drm_connector_index(connector); 1013 1014 if (index >= state->num_connector) { 1015 struct __drm_connnectors_state *c; 1016 int alloc = max(index + 1, config->num_connector); 1017 1018 c = krealloc_array(state->connectors, alloc, 1019 sizeof(*state->connectors), GFP_KERNEL); 1020 if (!c) 1021 return ERR_PTR(-ENOMEM); 1022 1023 state->connectors = c; 1024 memset(&state->connectors[state->num_connector], 0, 1025 sizeof(*state->connectors) * (alloc - state->num_connector)); 1026 1027 state->num_connector = alloc; 1028 } 1029 1030 if (state->connectors[index].state) 1031 return state->connectors[index].state; 1032 1033 connector_state = connector->funcs->atomic_duplicate_state(connector); 1034 if (!connector_state) 1035 return ERR_PTR(-ENOMEM); 1036 1037 drm_connector_get(connector); 1038 state->connectors[index].state = connector_state; 1039 state->connectors[index].old_state = connector->state; 1040 state->connectors[index].new_state = connector_state; 1041 state->connectors[index].ptr = connector; 1042 connector_state->state = state; 1043 1044 drm_dbg_atomic(connector->dev, "Added [CONNECTOR:%d:%s] %p state to %p\n", 1045 connector->base.id, connector->name, 1046 connector_state, state); 1047 1048 if (connector_state->crtc) { 1049 struct drm_crtc_state *crtc_state; 1050 1051 crtc_state = drm_atomic_get_crtc_state(state, 1052 connector_state->crtc); 1053 if (IS_ERR(crtc_state)) 1054 return ERR_CAST(crtc_state); 1055 } 1056 1057 return connector_state; 1058 } 1059 EXPORT_SYMBOL(drm_atomic_get_connector_state); 1060 1061 static void drm_atomic_connector_print_state(struct drm_printer *p, 1062 const struct drm_connector_state *state) 1063 { 1064 struct drm_connector *connector = state->connector; 1065 1066 drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); 1067 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 1068 drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware); 1069 1070 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 1071 if (state->writeback_job && state->writeback_job->fb) 1072 drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id); 1073 1074 if (connector->funcs->atomic_print_state) 1075 connector->funcs->atomic_print_state(p, state); 1076 } 1077 1078 /** 1079 * drm_atomic_get_bridge_state - get bridge state 1080 * @state: global atomic state object 1081 * @bridge: bridge to get state object for 1082 * 1083 * This function returns the bridge state for the given bridge, allocating it 1084 * if needed. It will also grab the relevant bridge lock to make sure that the 1085 * state is consistent. 1086 * 1087 * Returns: 1088 * 1089 * Either the allocated state or the error code encoded into the pointer. When 1090 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 1091 * entire atomic sequence must be restarted. 1092 */ 1093 struct drm_bridge_state * 1094 drm_atomic_get_bridge_state(struct drm_atomic_state *state, 1095 struct drm_bridge *bridge) 1096 { 1097 struct drm_private_state *obj_state; 1098 1099 obj_state = drm_atomic_get_private_obj_state(state, &bridge->base); 1100 if (IS_ERR(obj_state)) 1101 return ERR_CAST(obj_state); 1102 1103 return drm_priv_to_bridge_state(obj_state); 1104 } 1105 EXPORT_SYMBOL(drm_atomic_get_bridge_state); 1106 1107 /** 1108 * drm_atomic_get_old_bridge_state - get old bridge state, if it exists 1109 * @state: global atomic state object 1110 * @bridge: bridge to grab 1111 * 1112 * This function returns the old bridge state for the given bridge, or NULL if 1113 * the bridge is not part of the global atomic state. 1114 */ 1115 struct drm_bridge_state * 1116 drm_atomic_get_old_bridge_state(struct drm_atomic_state *state, 1117 struct drm_bridge *bridge) 1118 { 1119 struct drm_private_state *obj_state; 1120 1121 obj_state = drm_atomic_get_old_private_obj_state(state, &bridge->base); 1122 if (!obj_state) 1123 return NULL; 1124 1125 return drm_priv_to_bridge_state(obj_state); 1126 } 1127 EXPORT_SYMBOL(drm_atomic_get_old_bridge_state); 1128 1129 /** 1130 * drm_atomic_get_new_bridge_state - get new bridge state, if it exists 1131 * @state: global atomic state object 1132 * @bridge: bridge to grab 1133 * 1134 * This function returns the new bridge state for the given bridge, or NULL if 1135 * the bridge is not part of the global atomic state. 1136 */ 1137 struct drm_bridge_state * 1138 drm_atomic_get_new_bridge_state(struct drm_atomic_state *state, 1139 struct drm_bridge *bridge) 1140 { 1141 struct drm_private_state *obj_state; 1142 1143 obj_state = drm_atomic_get_new_private_obj_state(state, &bridge->base); 1144 if (!obj_state) 1145 return NULL; 1146 1147 return drm_priv_to_bridge_state(obj_state); 1148 } 1149 EXPORT_SYMBOL(drm_atomic_get_new_bridge_state); 1150 1151 /** 1152 * drm_atomic_add_encoder_bridges - add bridges attached to an encoder 1153 * @state: atomic state 1154 * @encoder: DRM encoder 1155 * 1156 * This function adds all bridges attached to @encoder. This is needed to add 1157 * bridge states to @state and make them available when 1158 * &drm_bridge_funcs.atomic_check(), &drm_bridge_funcs.atomic_pre_enable(), 1159 * &drm_bridge_funcs.atomic_enable(), 1160 * &drm_bridge_funcs.atomic_disable_post_disable() are called. 1161 * 1162 * Returns: 1163 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1164 * then the w/w mutex code has detected a deadlock and the entire atomic 1165 * sequence must be restarted. All other errors are fatal. 1166 */ 1167 int 1168 drm_atomic_add_encoder_bridges(struct drm_atomic_state *state, 1169 struct drm_encoder *encoder) 1170 { 1171 struct drm_bridge_state *bridge_state; 1172 struct drm_bridge *bridge; 1173 1174 if (!encoder) 1175 return 0; 1176 1177 drm_dbg_atomic(encoder->dev, 1178 "Adding all bridges for [encoder:%d:%s] to %p\n", 1179 encoder->base.id, encoder->name, state); 1180 1181 drm_for_each_bridge_in_chain(encoder, bridge) { 1182 /* Skip bridges that don't implement the atomic state hooks. */ 1183 if (!bridge->funcs->atomic_duplicate_state) 1184 continue; 1185 1186 bridge_state = drm_atomic_get_bridge_state(state, bridge); 1187 if (IS_ERR(bridge_state)) 1188 return PTR_ERR(bridge_state); 1189 } 1190 1191 return 0; 1192 } 1193 EXPORT_SYMBOL(drm_atomic_add_encoder_bridges); 1194 1195 /** 1196 * drm_atomic_add_affected_connectors - add connectors for CRTC 1197 * @state: atomic state 1198 * @crtc: DRM CRTC 1199 * 1200 * This function walks the current configuration and adds all connectors 1201 * currently using @crtc to the atomic configuration @state. Note that this 1202 * function must acquire the connection mutex. This can potentially cause 1203 * unneeded serialization if the update is just for the planes on one CRTC. Hence 1204 * drivers and helpers should only call this when really needed (e.g. when a 1205 * full modeset needs to happen due to some change). 1206 * 1207 * Returns: 1208 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1209 * then the w/w mutex code has detected a deadlock and the entire atomic 1210 * sequence must be restarted. All other errors are fatal. 1211 */ 1212 int 1213 drm_atomic_add_affected_connectors(struct drm_atomic_state *state, 1214 struct drm_crtc *crtc) 1215 { 1216 struct drm_mode_config *config = &state->dev->mode_config; 1217 struct drm_connector *connector; 1218 struct drm_connector_state *conn_state; 1219 struct drm_connector_list_iter conn_iter; 1220 struct drm_crtc_state *crtc_state; 1221 int ret; 1222 1223 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1224 if (IS_ERR(crtc_state)) 1225 return PTR_ERR(crtc_state); 1226 1227 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 1228 if (ret) 1229 return ret; 1230 1231 drm_dbg_atomic(crtc->dev, 1232 "Adding all current connectors for [CRTC:%d:%s] to %p\n", 1233 crtc->base.id, crtc->name, state); 1234 1235 /* 1236 * Changed connectors are already in @state, so only need to look 1237 * at the connector_mask in crtc_state. 1238 */ 1239 drm_connector_list_iter_begin(state->dev, &conn_iter); 1240 drm_for_each_connector_iter(connector, &conn_iter) { 1241 if (!(crtc_state->connector_mask & drm_connector_mask(connector))) 1242 continue; 1243 1244 conn_state = drm_atomic_get_connector_state(state, connector); 1245 if (IS_ERR(conn_state)) { 1246 drm_connector_list_iter_end(&conn_iter); 1247 return PTR_ERR(conn_state); 1248 } 1249 } 1250 drm_connector_list_iter_end(&conn_iter); 1251 1252 return 0; 1253 } 1254 EXPORT_SYMBOL(drm_atomic_add_affected_connectors); 1255 1256 /** 1257 * drm_atomic_add_affected_planes - add planes for CRTC 1258 * @state: atomic state 1259 * @crtc: DRM CRTC 1260 * 1261 * This function walks the current configuration and adds all planes 1262 * currently used by @crtc to the atomic configuration @state. This is useful 1263 * when an atomic commit also needs to check all currently enabled plane on 1264 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC 1265 * to avoid special code to force-enable all planes. 1266 * 1267 * Since acquiring a plane state will always also acquire the w/w mutex of the 1268 * current CRTC for that plane (if there is any) adding all the plane states for 1269 * a CRTC will not reduce parallelism of atomic updates. 1270 * 1271 * Returns: 1272 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 1273 * then the w/w mutex code has detected a deadlock and the entire atomic 1274 * sequence must be restarted. All other errors are fatal. 1275 */ 1276 int 1277 drm_atomic_add_affected_planes(struct drm_atomic_state *state, 1278 struct drm_crtc *crtc) 1279 { 1280 const struct drm_crtc_state *old_crtc_state = 1281 drm_atomic_get_old_crtc_state(state, crtc); 1282 struct drm_plane *plane; 1283 1284 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); 1285 1286 drm_dbg_atomic(crtc->dev, 1287 "Adding all current planes for [CRTC:%d:%s] to %p\n", 1288 crtc->base.id, crtc->name, state); 1289 1290 drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) { 1291 struct drm_plane_state *plane_state = 1292 drm_atomic_get_plane_state(state, plane); 1293 1294 if (IS_ERR(plane_state)) 1295 return PTR_ERR(plane_state); 1296 } 1297 return 0; 1298 } 1299 EXPORT_SYMBOL(drm_atomic_add_affected_planes); 1300 1301 /** 1302 * drm_atomic_check_only - check whether a given config would work 1303 * @state: atomic configuration to check 1304 * 1305 * Note that this function can return -EDEADLK if the driver needed to acquire 1306 * more locks but encountered a deadlock. The caller must then do the usual w/w 1307 * backoff dance and restart. All other errors are fatal. 1308 * 1309 * Returns: 1310 * 0 on success, negative error code on failure. 1311 */ 1312 int drm_atomic_check_only(struct drm_atomic_state *state) 1313 { 1314 struct drm_device *dev = state->dev; 1315 struct drm_mode_config *config = &dev->mode_config; 1316 struct drm_plane *plane; 1317 struct drm_plane_state *old_plane_state; 1318 struct drm_plane_state *new_plane_state; 1319 struct drm_crtc *crtc; 1320 struct drm_crtc_state *old_crtc_state; 1321 struct drm_crtc_state *new_crtc_state; 1322 struct drm_connector *conn; 1323 struct drm_connector_state *conn_state; 1324 unsigned int requested_crtc = 0; 1325 unsigned int affected_crtc = 0; 1326 int i, ret = 0; 1327 1328 drm_dbg_atomic(dev, "checking %p\n", state); 1329 1330 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) 1331 requested_crtc |= drm_crtc_mask(crtc); 1332 1333 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 1334 ret = drm_atomic_plane_check(old_plane_state, new_plane_state); 1335 if (ret) { 1336 drm_dbg_atomic(dev, "[PLANE:%d:%s] atomic core check failed\n", 1337 plane->base.id, plane->name); 1338 return ret; 1339 } 1340 } 1341 1342 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1343 ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state); 1344 if (ret) { 1345 drm_dbg_atomic(dev, "[CRTC:%d:%s] atomic core check failed\n", 1346 crtc->base.id, crtc->name); 1347 return ret; 1348 } 1349 } 1350 1351 for_each_new_connector_in_state(state, conn, conn_state, i) { 1352 ret = drm_atomic_connector_check(conn, conn_state); 1353 if (ret) { 1354 drm_dbg_atomic(dev, "[CONNECTOR:%d:%s] atomic core check failed\n", 1355 conn->base.id, conn->name); 1356 return ret; 1357 } 1358 } 1359 1360 if (config->funcs->atomic_check) { 1361 ret = config->funcs->atomic_check(state->dev, state); 1362 1363 if (ret) { 1364 drm_dbg_atomic(dev, "atomic driver check for %p failed: %d\n", 1365 state, ret); 1366 return ret; 1367 } 1368 } 1369 1370 if (!state->allow_modeset) { 1371 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 1372 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 1373 drm_dbg_atomic(dev, "[CRTC:%d:%s] requires full modeset\n", 1374 crtc->base.id, crtc->name); 1375 return -EINVAL; 1376 } 1377 } 1378 } 1379 1380 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) 1381 affected_crtc |= drm_crtc_mask(crtc); 1382 1383 /* 1384 * For commits that allow modesets drivers can add other CRTCs to the 1385 * atomic commit, e.g. when they need to reallocate global resources. 1386 * This can cause spurious EBUSY, which robs compositors of a very 1387 * effective sanity check for their drawing loop. Therefor only allow 1388 * drivers to add unrelated CRTC states for modeset commits. 1389 * 1390 * FIXME: Should add affected_crtc mask to the ATOMIC IOCTL as an output 1391 * so compositors know what's going on. 1392 */ 1393 if (affected_crtc != requested_crtc) { 1394 drm_dbg_atomic(dev, 1395 "driver added CRTC to commit: requested 0x%x, affected 0x%0x\n", 1396 requested_crtc, affected_crtc); 1397 WARN(!state->allow_modeset, "adding CRTC not allowed without modesets: requested 0x%x, affected 0x%0x\n", 1398 requested_crtc, affected_crtc); 1399 } 1400 1401 return 0; 1402 } 1403 EXPORT_SYMBOL(drm_atomic_check_only); 1404 1405 /** 1406 * drm_atomic_commit - commit configuration atomically 1407 * @state: atomic configuration to check 1408 * 1409 * Note that this function can return -EDEADLK if the driver needed to acquire 1410 * more locks but encountered a deadlock. The caller must then do the usual w/w 1411 * backoff dance and restart. All other errors are fatal. 1412 * 1413 * This function will take its own reference on @state. 1414 * Callers should always release their reference with drm_atomic_state_put(). 1415 * 1416 * Returns: 1417 * 0 on success, negative error code on failure. 1418 */ 1419 int drm_atomic_commit(struct drm_atomic_state *state) 1420 { 1421 struct drm_mode_config *config = &state->dev->mode_config; 1422 int ret; 1423 1424 ret = drm_atomic_check_only(state); 1425 if (ret) 1426 return ret; 1427 1428 drm_dbg_atomic(state->dev, "committing %p\n", state); 1429 1430 return config->funcs->atomic_commit(state->dev, state, false); 1431 } 1432 EXPORT_SYMBOL(drm_atomic_commit); 1433 1434 /** 1435 * drm_atomic_nonblocking_commit - atomic nonblocking commit 1436 * @state: atomic configuration to check 1437 * 1438 * Note that this function can return -EDEADLK if the driver needed to acquire 1439 * more locks but encountered a deadlock. The caller must then do the usual w/w 1440 * backoff dance and restart. All other errors are fatal. 1441 * 1442 * This function will take its own reference on @state. 1443 * Callers should always release their reference with drm_atomic_state_put(). 1444 * 1445 * Returns: 1446 * 0 on success, negative error code on failure. 1447 */ 1448 int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) 1449 { 1450 struct drm_mode_config *config = &state->dev->mode_config; 1451 int ret; 1452 1453 ret = drm_atomic_check_only(state); 1454 if (ret) 1455 return ret; 1456 1457 drm_dbg_atomic(state->dev, "committing %p nonblocking\n", state); 1458 1459 return config->funcs->atomic_commit(state->dev, state, true); 1460 } 1461 EXPORT_SYMBOL(drm_atomic_nonblocking_commit); 1462 1463 /* just used from drm-client and atomic-helper: */ 1464 int __drm_atomic_helper_disable_plane(struct drm_plane *plane, 1465 struct drm_plane_state *plane_state) 1466 { 1467 int ret; 1468 1469 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); 1470 if (ret != 0) 1471 return ret; 1472 1473 drm_atomic_set_fb_for_plane(plane_state, NULL); 1474 plane_state->crtc_x = 0; 1475 plane_state->crtc_y = 0; 1476 plane_state->crtc_w = 0; 1477 plane_state->crtc_h = 0; 1478 plane_state->src_x = 0; 1479 plane_state->src_y = 0; 1480 plane_state->src_w = 0; 1481 plane_state->src_h = 0; 1482 1483 return 0; 1484 } 1485 EXPORT_SYMBOL(__drm_atomic_helper_disable_plane); 1486 1487 static int update_output_state(struct drm_atomic_state *state, 1488 struct drm_mode_set *set) 1489 { 1490 struct drm_device *dev = set->crtc->dev; 1491 struct drm_crtc *crtc; 1492 struct drm_crtc_state *new_crtc_state; 1493 struct drm_connector *connector; 1494 struct drm_connector_state *new_conn_state; 1495 int ret, i; 1496 1497 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 1498 state->acquire_ctx); 1499 if (ret) 1500 return ret; 1501 1502 /* First disable all connectors on the target crtc. */ 1503 ret = drm_atomic_add_affected_connectors(state, set->crtc); 1504 if (ret) 1505 return ret; 1506 1507 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 1508 if (new_conn_state->crtc == set->crtc) { 1509 ret = drm_atomic_set_crtc_for_connector(new_conn_state, 1510 NULL); 1511 if (ret) 1512 return ret; 1513 1514 /* Make sure legacy setCrtc always re-trains */ 1515 new_conn_state->link_status = DRM_LINK_STATUS_GOOD; 1516 } 1517 } 1518 1519 /* Then set all connectors from set->connectors on the target crtc */ 1520 for (i = 0; i < set->num_connectors; i++) { 1521 new_conn_state = drm_atomic_get_connector_state(state, 1522 set->connectors[i]); 1523 if (IS_ERR(new_conn_state)) 1524 return PTR_ERR(new_conn_state); 1525 1526 ret = drm_atomic_set_crtc_for_connector(new_conn_state, 1527 set->crtc); 1528 if (ret) 1529 return ret; 1530 } 1531 1532 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 1533 /* 1534 * Don't update ->enable for the CRTC in the set_config request, 1535 * since a mismatch would indicate a bug in the upper layers. 1536 * The actual modeset code later on will catch any 1537 * inconsistencies here. 1538 */ 1539 if (crtc == set->crtc) 1540 continue; 1541 1542 if (!new_crtc_state->connector_mask) { 1543 ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state, 1544 NULL); 1545 if (ret < 0) 1546 return ret; 1547 1548 new_crtc_state->active = false; 1549 } 1550 } 1551 1552 return 0; 1553 } 1554 1555 /* just used from drm-client and atomic-helper: */ 1556 int __drm_atomic_helper_set_config(struct drm_mode_set *set, 1557 struct drm_atomic_state *state) 1558 { 1559 struct drm_crtc_state *crtc_state; 1560 struct drm_plane_state *primary_state; 1561 struct drm_crtc *crtc = set->crtc; 1562 int hdisplay, vdisplay; 1563 int ret; 1564 1565 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1566 if (IS_ERR(crtc_state)) 1567 return PTR_ERR(crtc_state); 1568 1569 primary_state = drm_atomic_get_plane_state(state, crtc->primary); 1570 if (IS_ERR(primary_state)) 1571 return PTR_ERR(primary_state); 1572 1573 if (!set->mode) { 1574 WARN_ON(set->fb); 1575 WARN_ON(set->num_connectors); 1576 1577 ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL); 1578 if (ret != 0) 1579 return ret; 1580 1581 crtc_state->active = false; 1582 1583 ret = drm_atomic_set_crtc_for_plane(primary_state, NULL); 1584 if (ret != 0) 1585 return ret; 1586 1587 drm_atomic_set_fb_for_plane(primary_state, NULL); 1588 1589 goto commit; 1590 } 1591 1592 WARN_ON(!set->fb); 1593 WARN_ON(!set->num_connectors); 1594 1595 ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode); 1596 if (ret != 0) 1597 return ret; 1598 1599 crtc_state->active = true; 1600 1601 ret = drm_atomic_set_crtc_for_plane(primary_state, crtc); 1602 if (ret != 0) 1603 return ret; 1604 1605 drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay); 1606 1607 drm_atomic_set_fb_for_plane(primary_state, set->fb); 1608 primary_state->crtc_x = 0; 1609 primary_state->crtc_y = 0; 1610 primary_state->crtc_w = hdisplay; 1611 primary_state->crtc_h = vdisplay; 1612 primary_state->src_x = set->x << 16; 1613 primary_state->src_y = set->y << 16; 1614 if (drm_rotation_90_or_270(primary_state->rotation)) { 1615 primary_state->src_w = vdisplay << 16; 1616 primary_state->src_h = hdisplay << 16; 1617 } else { 1618 primary_state->src_w = hdisplay << 16; 1619 primary_state->src_h = vdisplay << 16; 1620 } 1621 1622 commit: 1623 ret = update_output_state(state, set); 1624 if (ret) 1625 return ret; 1626 1627 return 0; 1628 } 1629 EXPORT_SYMBOL(__drm_atomic_helper_set_config); 1630 1631 /** 1632 * drm_atomic_print_new_state - prints drm atomic state 1633 * @state: atomic configuration to check 1634 * @p: drm printer 1635 * 1636 * This functions prints the drm atomic state snapshot using the drm printer 1637 * which is passed to it. This snapshot can be used for debugging purposes. 1638 * 1639 * Note that this function looks into the new state objects and hence its not 1640 * safe to be used after the call to drm_atomic_helper_commit_hw_done(). 1641 */ 1642 void drm_atomic_print_new_state(const struct drm_atomic_state *state, 1643 struct drm_printer *p) 1644 { 1645 struct drm_plane *plane; 1646 struct drm_plane_state *plane_state; 1647 struct drm_crtc *crtc; 1648 struct drm_crtc_state *crtc_state; 1649 struct drm_connector *connector; 1650 struct drm_connector_state *connector_state; 1651 int i; 1652 1653 if (!p) { 1654 drm_err(state->dev, "invalid drm printer\n"); 1655 return; 1656 } 1657 1658 drm_dbg_atomic(state->dev, "checking %p\n", state); 1659 1660 for_each_new_plane_in_state(state, plane, plane_state, i) 1661 drm_atomic_plane_print_state(p, plane_state); 1662 1663 for_each_new_crtc_in_state(state, crtc, crtc_state, i) 1664 drm_atomic_crtc_print_state(p, crtc_state); 1665 1666 for_each_new_connector_in_state(state, connector, connector_state, i) 1667 drm_atomic_connector_print_state(p, connector_state); 1668 } 1669 EXPORT_SYMBOL(drm_atomic_print_new_state); 1670 1671 static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, 1672 bool take_locks) 1673 { 1674 struct drm_mode_config *config = &dev->mode_config; 1675 struct drm_plane *plane; 1676 struct drm_crtc *crtc; 1677 struct drm_connector *connector; 1678 struct drm_connector_list_iter conn_iter; 1679 1680 if (!drm_drv_uses_atomic_modeset(dev)) 1681 return; 1682 1683 list_for_each_entry(plane, &config->plane_list, head) { 1684 if (take_locks) 1685 drm_modeset_lock(&plane->mutex, NULL); 1686 drm_atomic_plane_print_state(p, plane->state); 1687 if (take_locks) 1688 drm_modeset_unlock(&plane->mutex); 1689 } 1690 1691 list_for_each_entry(crtc, &config->crtc_list, head) { 1692 if (take_locks) 1693 drm_modeset_lock(&crtc->mutex, NULL); 1694 drm_atomic_crtc_print_state(p, crtc->state); 1695 if (take_locks) 1696 drm_modeset_unlock(&crtc->mutex); 1697 } 1698 1699 drm_connector_list_iter_begin(dev, &conn_iter); 1700 if (take_locks) 1701 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1702 drm_for_each_connector_iter(connector, &conn_iter) 1703 drm_atomic_connector_print_state(p, connector->state); 1704 if (take_locks) 1705 drm_modeset_unlock(&dev->mode_config.connection_mutex); 1706 drm_connector_list_iter_end(&conn_iter); 1707 } 1708 1709 /** 1710 * drm_state_dump - dump entire device atomic state 1711 * @dev: the drm device 1712 * @p: where to print the state to 1713 * 1714 * Just for debugging. Drivers might want an option to dump state 1715 * to dmesg in case of error irq's. (Hint, you probably want to 1716 * ratelimit this!) 1717 * 1718 * The caller must wrap this drm_modeset_lock_all_ctx() and 1719 * drm_modeset_drop_locks(). If this is called from error irq handler, it should 1720 * not be enabled by default - if you are debugging errors you might 1721 * not care that this is racey, but calling this without all modeset locks held 1722 * is inherently unsafe. 1723 */ 1724 void drm_state_dump(struct drm_device *dev, struct drm_printer *p) 1725 { 1726 __drm_state_dump(dev, p, false); 1727 } 1728 EXPORT_SYMBOL(drm_state_dump); 1729 1730 #ifdef CONFIG_DEBUG_FS 1731 static int drm_state_info(struct seq_file *m, void *data) 1732 { 1733 struct drm_info_node *node = (struct drm_info_node *) m->private; 1734 struct drm_device *dev = node->minor->dev; 1735 struct drm_printer p = drm_seq_file_printer(m); 1736 1737 __drm_state_dump(dev, &p, true); 1738 1739 return 0; 1740 } 1741 1742 /* any use in debugfs files to dump individual planes/crtc/etc? */ 1743 static const struct drm_info_list drm_atomic_debugfs_list[] = { 1744 {"state", drm_state_info, 0}, 1745 }; 1746 1747 void drm_atomic_debugfs_init(struct drm_minor *minor) 1748 { 1749 drm_debugfs_create_files(drm_atomic_debugfs_list, 1750 ARRAY_SIZE(drm_atomic_debugfs_list), 1751 minor->debugfs_root, minor); 1752 } 1753 #endif 1754