1 /* 2 * Copyright (C) 2014 Red Hat 3 * Copyright (C) 2014 Intel Corp. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robdclark@gmail.com> 25 * Daniel Vetter <daniel.vetter@ffwll.ch> 26 */ 27 28 29 #include <drm/drmP.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_atomic_uapi.h> 32 #include <drm/drm_mode.h> 33 #include <drm/drm_print.h> 34 #include <drm/drm_writeback.h> 35 #include <linux/sync_file.h> 36 37 #include "drm_crtc_internal.h" 38 #include "drm_internal.h" 39 40 void __drm_crtc_commit_free(struct kref *kref) 41 { 42 struct drm_crtc_commit *commit = 43 container_of(kref, struct drm_crtc_commit, ref); 44 45 kfree(commit); 46 } 47 EXPORT_SYMBOL(__drm_crtc_commit_free); 48 49 /** 50 * drm_atomic_state_default_release - 51 * release memory initialized by drm_atomic_state_init 52 * @state: atomic state 53 * 54 * Free all the memory allocated by drm_atomic_state_init. 55 * This should only be used by drivers which are still subclassing 56 * &drm_atomic_state and haven't switched to &drm_private_state yet. 57 */ 58 void drm_atomic_state_default_release(struct drm_atomic_state *state) 59 { 60 kfree(state->connectors); 61 kfree(state->crtcs); 62 kfree(state->planes); 63 kfree(state->private_objs); 64 } 65 EXPORT_SYMBOL(drm_atomic_state_default_release); 66 67 /** 68 * drm_atomic_state_init - init new atomic state 69 * @dev: DRM device 70 * @state: atomic state 71 * 72 * Default implementation for filling in a new atomic state. 73 * This should only be used by drivers which are still subclassing 74 * &drm_atomic_state and haven't switched to &drm_private_state yet. 75 */ 76 int 77 drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) 78 { 79 kref_init(&state->ref); 80 81 /* TODO legacy paths should maybe do a better job about 82 * setting this appropriately? 83 */ 84 state->allow_modeset = true; 85 86 state->crtcs = kcalloc(dev->mode_config.num_crtc, 87 sizeof(*state->crtcs), GFP_KERNEL); 88 if (!state->crtcs) 89 goto fail; 90 state->planes = kcalloc(dev->mode_config.num_total_plane, 91 sizeof(*state->planes), GFP_KERNEL); 92 if (!state->planes) 93 goto fail; 94 95 state->dev = dev; 96 97 DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state); 98 99 return 0; 100 fail: 101 drm_atomic_state_default_release(state); 102 return -ENOMEM; 103 } 104 EXPORT_SYMBOL(drm_atomic_state_init); 105 106 /** 107 * drm_atomic_state_alloc - allocate atomic state 108 * @dev: DRM device 109 * 110 * This allocates an empty atomic state to track updates. 111 */ 112 struct drm_atomic_state * 113 drm_atomic_state_alloc(struct drm_device *dev) 114 { 115 struct drm_mode_config *config = &dev->mode_config; 116 117 if (!config->funcs->atomic_state_alloc) { 118 struct drm_atomic_state *state; 119 120 state = kzalloc(sizeof(*state), GFP_KERNEL); 121 if (!state) 122 return NULL; 123 if (drm_atomic_state_init(dev, state) < 0) { 124 kfree(state); 125 return NULL; 126 } 127 return state; 128 } 129 130 return config->funcs->atomic_state_alloc(dev); 131 } 132 EXPORT_SYMBOL(drm_atomic_state_alloc); 133 134 /** 135 * drm_atomic_state_default_clear - clear base atomic state 136 * @state: atomic state 137 * 138 * Default implementation for clearing atomic state. 139 * This should only be used by drivers which are still subclassing 140 * &drm_atomic_state and haven't switched to &drm_private_state yet. 141 */ 142 void drm_atomic_state_default_clear(struct drm_atomic_state *state) 143 { 144 struct drm_device *dev = state->dev; 145 struct drm_mode_config *config = &dev->mode_config; 146 int i; 147 148 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); 149 150 for (i = 0; i < state->num_connector; i++) { 151 struct drm_connector *connector = state->connectors[i].ptr; 152 153 if (!connector) 154 continue; 155 156 connector->funcs->atomic_destroy_state(connector, 157 state->connectors[i].state); 158 state->connectors[i].ptr = NULL; 159 state->connectors[i].state = NULL; 160 state->connectors[i].old_state = NULL; 161 state->connectors[i].new_state = NULL; 162 drm_connector_put(connector); 163 } 164 165 for (i = 0; i < config->num_crtc; i++) { 166 struct drm_crtc *crtc = state->crtcs[i].ptr; 167 168 if (!crtc) 169 continue; 170 171 crtc->funcs->atomic_destroy_state(crtc, 172 state->crtcs[i].state); 173 174 state->crtcs[i].ptr = NULL; 175 state->crtcs[i].state = NULL; 176 state->crtcs[i].old_state = NULL; 177 state->crtcs[i].new_state = NULL; 178 179 if (state->crtcs[i].commit) { 180 drm_crtc_commit_put(state->crtcs[i].commit); 181 state->crtcs[i].commit = NULL; 182 } 183 } 184 185 for (i = 0; i < config->num_total_plane; i++) { 186 struct drm_plane *plane = state->planes[i].ptr; 187 188 if (!plane) 189 continue; 190 191 plane->funcs->atomic_destroy_state(plane, 192 state->planes[i].state); 193 state->planes[i].ptr = NULL; 194 state->planes[i].state = NULL; 195 state->planes[i].old_state = NULL; 196 state->planes[i].new_state = NULL; 197 } 198 199 for (i = 0; i < state->num_private_objs; i++) { 200 struct drm_private_obj *obj = state->private_objs[i].ptr; 201 202 obj->funcs->atomic_destroy_state(obj, 203 state->private_objs[i].state); 204 state->private_objs[i].ptr = NULL; 205 state->private_objs[i].state = NULL; 206 state->private_objs[i].old_state = NULL; 207 state->private_objs[i].new_state = NULL; 208 } 209 state->num_private_objs = 0; 210 211 if (state->fake_commit) { 212 drm_crtc_commit_put(state->fake_commit); 213 state->fake_commit = NULL; 214 } 215 } 216 EXPORT_SYMBOL(drm_atomic_state_default_clear); 217 218 /** 219 * drm_atomic_state_clear - clear state object 220 * @state: atomic state 221 * 222 * When the w/w mutex algorithm detects a deadlock we need to back off and drop 223 * all locks. So someone else could sneak in and change the current modeset 224 * configuration. Which means that all the state assembled in @state is no 225 * longer an atomic update to the current state, but to some arbitrary earlier 226 * state. Which could break assumptions the driver's 227 * &drm_mode_config_funcs.atomic_check likely relies on. 228 * 229 * Hence we must clear all cached state and completely start over, using this 230 * function. 231 */ 232 void drm_atomic_state_clear(struct drm_atomic_state *state) 233 { 234 struct drm_device *dev = state->dev; 235 struct drm_mode_config *config = &dev->mode_config; 236 237 if (config->funcs->atomic_state_clear) 238 config->funcs->atomic_state_clear(state); 239 else 240 drm_atomic_state_default_clear(state); 241 } 242 EXPORT_SYMBOL(drm_atomic_state_clear); 243 244 /** 245 * __drm_atomic_state_free - free all memory for an atomic state 246 * @ref: This atomic state to deallocate 247 * 248 * This frees all memory associated with an atomic state, including all the 249 * per-object state for planes, crtcs and connectors. 250 */ 251 void __drm_atomic_state_free(struct kref *ref) 252 { 253 struct drm_atomic_state *state = container_of(ref, typeof(*state), ref); 254 struct drm_mode_config *config = &state->dev->mode_config; 255 256 drm_atomic_state_clear(state); 257 258 DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); 259 260 if (config->funcs->atomic_state_free) { 261 config->funcs->atomic_state_free(state); 262 } else { 263 drm_atomic_state_default_release(state); 264 kfree(state); 265 } 266 } 267 EXPORT_SYMBOL(__drm_atomic_state_free); 268 269 /** 270 * drm_atomic_get_crtc_state - get crtc state 271 * @state: global atomic state object 272 * @crtc: crtc to get state object for 273 * 274 * This function returns the crtc state for the given crtc, allocating it if 275 * needed. It will also grab the relevant crtc lock to make sure that the state 276 * is consistent. 277 * 278 * Returns: 279 * 280 * Either the allocated state or the error code encoded into the pointer. When 281 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 282 * entire atomic sequence must be restarted. All other errors are fatal. 283 */ 284 struct drm_crtc_state * 285 drm_atomic_get_crtc_state(struct drm_atomic_state *state, 286 struct drm_crtc *crtc) 287 { 288 int ret, index = drm_crtc_index(crtc); 289 struct drm_crtc_state *crtc_state; 290 291 WARN_ON(!state->acquire_ctx); 292 293 crtc_state = drm_atomic_get_existing_crtc_state(state, crtc); 294 if (crtc_state) 295 return crtc_state; 296 297 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); 298 if (ret) 299 return ERR_PTR(ret); 300 301 crtc_state = crtc->funcs->atomic_duplicate_state(crtc); 302 if (!crtc_state) 303 return ERR_PTR(-ENOMEM); 304 305 state->crtcs[index].state = crtc_state; 306 state->crtcs[index].old_state = crtc->state; 307 state->crtcs[index].new_state = crtc_state; 308 state->crtcs[index].ptr = crtc; 309 crtc_state->state = state; 310 311 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", 312 crtc->base.id, crtc->name, crtc_state, state); 313 314 return crtc_state; 315 } 316 EXPORT_SYMBOL(drm_atomic_get_crtc_state); 317 318 static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state, 319 const struct drm_crtc_state *new_crtc_state) 320 { 321 struct drm_crtc *crtc = new_crtc_state->crtc; 322 323 /* NOTE: we explicitly don't enforce constraints such as primary 324 * layer covering entire screen, since that is something we want 325 * to allow (on hw that supports it). For hw that does not, it 326 * should be checked in driver's crtc->atomic_check() vfunc. 327 * 328 * TODO: Add generic modeset state checks once we support those. 329 */ 330 331 if (new_crtc_state->active && !new_crtc_state->enable) { 332 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n", 333 crtc->base.id, crtc->name); 334 return -EINVAL; 335 } 336 337 /* The state->enable vs. state->mode_blob checks can be WARN_ON, 338 * as this is a kernel-internal detail that userspace should never 339 * be able to trigger. */ 340 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 341 WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) { 342 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n", 343 crtc->base.id, crtc->name); 344 return -EINVAL; 345 } 346 347 if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) && 348 WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) { 349 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n", 350 crtc->base.id, crtc->name); 351 return -EINVAL; 352 } 353 354 /* 355 * Reject event generation for when a CRTC is off and stays off. 356 * It wouldn't be hard to implement this, but userspace has a track 357 * record of happily burning through 100% cpu (or worse, crash) when the 358 * display pipe is suspended. To avoid all that fun just reject updates 359 * that ask for events since likely that indicates a bug in the 360 * compositor's drawing loop. This is consistent with the vblank IOCTL 361 * and legacy page_flip IOCTL which also reject service on a disabled 362 * pipe. 363 */ 364 if (new_crtc_state->event && 365 !new_crtc_state->active && !old_crtc_state->active) { 366 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n", 367 crtc->base.id, crtc->name); 368 return -EINVAL; 369 } 370 371 return 0; 372 } 373 374 static void drm_atomic_crtc_print_state(struct drm_printer *p, 375 const struct drm_crtc_state *state) 376 { 377 struct drm_crtc *crtc = state->crtc; 378 379 drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name); 380 drm_printf(p, "\tenable=%d\n", state->enable); 381 drm_printf(p, "\tactive=%d\n", state->active); 382 drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed); 383 drm_printf(p, "\tmode_changed=%d\n", state->mode_changed); 384 drm_printf(p, "\tactive_changed=%d\n", state->active_changed); 385 drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed); 386 drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed); 387 drm_printf(p, "\tplane_mask=%x\n", state->plane_mask); 388 drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask); 389 drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask); 390 drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode)); 391 392 if (crtc->funcs->atomic_print_state) 393 crtc->funcs->atomic_print_state(p, state); 394 } 395 396 static int drm_atomic_connector_check(struct drm_connector *connector, 397 struct drm_connector_state *state) 398 { 399 struct drm_crtc_state *crtc_state; 400 struct drm_writeback_job *writeback_job = state->writeback_job; 401 const struct drm_display_info *info = &connector->display_info; 402 403 state->max_bpc = info->bpc ? info->bpc : 8; 404 if (connector->max_bpc_property) 405 state->max_bpc = min(state->max_bpc, state->max_requested_bpc); 406 407 if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job) 408 return 0; 409 410 if (writeback_job->fb && !state->crtc) { 411 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n", 412 connector->base.id, connector->name); 413 return -EINVAL; 414 } 415 416 if (state->crtc) 417 crtc_state = drm_atomic_get_existing_crtc_state(state->state, 418 state->crtc); 419 420 if (writeback_job->fb && !crtc_state->active) { 421 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n", 422 connector->base.id, connector->name, 423 state->crtc->base.id); 424 return -EINVAL; 425 } 426 427 if (writeback_job->out_fence && !writeback_job->fb) { 428 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", 429 connector->base.id, connector->name); 430 return -EINVAL; 431 } 432 433 return 0; 434 } 435 436 /** 437 * drm_atomic_get_plane_state - get plane state 438 * @state: global atomic state object 439 * @plane: plane to get state object for 440 * 441 * This function returns the plane state for the given plane, allocating it if 442 * needed. It will also grab the relevant plane lock to make sure that the state 443 * is consistent. 444 * 445 * Returns: 446 * 447 * Either the allocated state or the error code encoded into the pointer. When 448 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 449 * entire atomic sequence must be restarted. All other errors are fatal. 450 */ 451 struct drm_plane_state * 452 drm_atomic_get_plane_state(struct drm_atomic_state *state, 453 struct drm_plane *plane) 454 { 455 int ret, index = drm_plane_index(plane); 456 struct drm_plane_state *plane_state; 457 458 WARN_ON(!state->acquire_ctx); 459 460 /* the legacy pointers should never be set */ 461 WARN_ON(plane->fb); 462 WARN_ON(plane->old_fb); 463 WARN_ON(plane->crtc); 464 465 plane_state = drm_atomic_get_existing_plane_state(state, plane); 466 if (plane_state) 467 return plane_state; 468 469 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); 470 if (ret) 471 return ERR_PTR(ret); 472 473 plane_state = plane->funcs->atomic_duplicate_state(plane); 474 if (!plane_state) 475 return ERR_PTR(-ENOMEM); 476 477 state->planes[index].state = plane_state; 478 state->planes[index].ptr = plane; 479 state->planes[index].old_state = plane->state; 480 state->planes[index].new_state = plane_state; 481 plane_state->state = state; 482 483 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", 484 plane->base.id, plane->name, plane_state, state); 485 486 if (plane_state->crtc) { 487 struct drm_crtc_state *crtc_state; 488 489 crtc_state = drm_atomic_get_crtc_state(state, 490 plane_state->crtc); 491 if (IS_ERR(crtc_state)) 492 return ERR_CAST(crtc_state); 493 } 494 495 return plane_state; 496 } 497 EXPORT_SYMBOL(drm_atomic_get_plane_state); 498 499 static bool 500 plane_switching_crtc(const struct drm_plane_state *old_plane_state, 501 const struct drm_plane_state *new_plane_state) 502 { 503 if (!old_plane_state->crtc || !new_plane_state->crtc) 504 return false; 505 506 if (old_plane_state->crtc == new_plane_state->crtc) 507 return false; 508 509 /* This could be refined, but currently there's no helper or driver code 510 * to implement direct switching of active planes nor userspace to take 511 * advantage of more direct plane switching without the intermediate 512 * full OFF state. 513 */ 514 return true; 515 } 516 517 /** 518 * drm_atomic_plane_check - check plane state 519 * @old_plane_state: old plane state to check 520 * @new_plane_state: new plane state to check 521 * 522 * Provides core sanity checks for plane state. 523 * 524 * RETURNS: 525 * Zero on success, error code on failure 526 */ 527 static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state, 528 const struct drm_plane_state *new_plane_state) 529 { 530 struct drm_plane *plane = new_plane_state->plane; 531 struct drm_crtc *crtc = new_plane_state->crtc; 532 const struct drm_framebuffer *fb = new_plane_state->fb; 533 unsigned int fb_width, fb_height; 534 struct drm_mode_rect *clips; 535 uint32_t num_clips; 536 int ret; 537 538 /* either *both* CRTC and FB must be set, or neither */ 539 if (crtc && !fb) { 540 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n", 541 plane->base.id, plane->name); 542 return -EINVAL; 543 } else if (fb && !crtc) { 544 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n", 545 plane->base.id, plane->name); 546 return -EINVAL; 547 } 548 549 /* if disabled, we don't care about the rest of the state: */ 550 if (!crtc) 551 return 0; 552 553 /* Check whether this plane is usable on this CRTC */ 554 if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { 555 DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n", 556 crtc->base.id, crtc->name, 557 plane->base.id, plane->name); 558 return -EINVAL; 559 } 560 561 /* Check whether this plane supports the fb pixel format. */ 562 ret = drm_plane_check_pixel_format(plane, fb->format->format, 563 fb->modifier); 564 if (ret) { 565 struct drm_format_name_buf format_name; 566 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n", 567 plane->base.id, plane->name, 568 drm_get_format_name(fb->format->format, 569 &format_name), 570 fb->modifier); 571 return ret; 572 } 573 574 /* Give drivers some help against integer overflows */ 575 if (new_plane_state->crtc_w > INT_MAX || 576 new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w || 577 new_plane_state->crtc_h > INT_MAX || 578 new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) { 579 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n", 580 plane->base.id, plane->name, 581 new_plane_state->crtc_w, new_plane_state->crtc_h, 582 new_plane_state->crtc_x, new_plane_state->crtc_y); 583 return -ERANGE; 584 } 585 586 fb_width = fb->width << 16; 587 fb_height = fb->height << 16; 588 589 /* Make sure source coordinates are inside the fb. */ 590 if (new_plane_state->src_w > fb_width || 591 new_plane_state->src_x > fb_width - new_plane_state->src_w || 592 new_plane_state->src_h > fb_height || 593 new_plane_state->src_y > fb_height - new_plane_state->src_h) { 594 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates " 595 "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n", 596 plane->base.id, plane->name, 597 new_plane_state->src_w >> 16, 598 ((new_plane_state->src_w & 0xffff) * 15625) >> 10, 599 new_plane_state->src_h >> 16, 600 ((new_plane_state->src_h & 0xffff) * 15625) >> 10, 601 new_plane_state->src_x >> 16, 602 ((new_plane_state->src_x & 0xffff) * 15625) >> 10, 603 new_plane_state->src_y >> 16, 604 ((new_plane_state->src_y & 0xffff) * 15625) >> 10, 605 fb->width, fb->height); 606 return -ENOSPC; 607 } 608 609 clips = drm_plane_get_damage_clips(new_plane_state); 610 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 611 612 /* Make sure damage clips are valid and inside the fb. */ 613 while (num_clips > 0) { 614 if (clips->x1 >= clips->x2 || 615 clips->y1 >= clips->y2 || 616 clips->x1 < 0 || 617 clips->y1 < 0 || 618 clips->x2 > fb_width || 619 clips->y2 > fb_height) { 620 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n", 621 plane->base.id, plane->name, clips->x1, 622 clips->y1, clips->x2, clips->y2); 623 return -EINVAL; 624 } 625 clips++; 626 num_clips--; 627 } 628 629 if (plane_switching_crtc(old_plane_state, new_plane_state)) { 630 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n", 631 plane->base.id, plane->name); 632 return -EINVAL; 633 } 634 635 return 0; 636 } 637 638 static void drm_atomic_plane_print_state(struct drm_printer *p, 639 const struct drm_plane_state *state) 640 { 641 struct drm_plane *plane = state->plane; 642 struct drm_rect src = drm_plane_state_src(state); 643 struct drm_rect dest = drm_plane_state_dest(state); 644 645 drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name); 646 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 647 drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); 648 if (state->fb) 649 drm_framebuffer_print_info(p, 2, state->fb); 650 drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest)); 651 drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src)); 652 drm_printf(p, "\trotation=%x\n", state->rotation); 653 drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos); 654 drm_printf(p, "\tcolor-encoding=%s\n", 655 drm_get_color_encoding_name(state->color_encoding)); 656 drm_printf(p, "\tcolor-range=%s\n", 657 drm_get_color_range_name(state->color_range)); 658 659 if (plane->funcs->atomic_print_state) 660 plane->funcs->atomic_print_state(p, state); 661 } 662 663 /** 664 * DOC: handling driver private state 665 * 666 * Very often the DRM objects exposed to userspace in the atomic modeset api 667 * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the 668 * underlying hardware. Especially for any kind of shared resources (e.g. shared 669 * clocks, scaler units, bandwidth and fifo limits shared among a group of 670 * planes or CRTCs, and so on) it makes sense to model these as independent 671 * objects. Drivers then need to do similar state tracking and commit ordering for 672 * such private (since not exposed to userpace) objects as the atomic core and 673 * helpers already provide for connectors, planes and CRTCs. 674 * 675 * To make this easier on drivers the atomic core provides some support to track 676 * driver private state objects using struct &drm_private_obj, with the 677 * associated state struct &drm_private_state. 678 * 679 * Similar to userspace-exposed objects, private state structures can be 680 * acquired by calling drm_atomic_get_private_obj_state(). Since this function 681 * does not take care of locking, drivers should wrap it for each type of 682 * private state object they have with the required call to drm_modeset_lock() 683 * for the corresponding &drm_modeset_lock. 684 * 685 * All private state structures contained in a &drm_atomic_state update can be 686 * iterated using for_each_oldnew_private_obj_in_state(), 687 * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state(). 688 * Drivers are recommended to wrap these for each type of driver private state 689 * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at 690 * least if they want to iterate over all objects of a given type. 691 * 692 * An earlier way to handle driver private state was by subclassing struct 693 * &drm_atomic_state. But since that encourages non-standard ways to implement 694 * the check/commit split atomic requires (by using e.g. "check and rollback or 695 * commit instead" of "duplicate state, check, then either commit or release 696 * duplicated state) it is deprecated in favour of using &drm_private_state. 697 */ 698 699 /** 700 * drm_atomic_private_obj_init - initialize private object 701 * @obj: private object 702 * @state: initial private object state 703 * @funcs: pointer to the struct of function pointers that identify the object 704 * type 705 * 706 * Initialize the private object, which can be embedded into any 707 * driver private object that needs its own atomic state. 708 */ 709 void 710 drm_atomic_private_obj_init(struct drm_private_obj *obj, 711 struct drm_private_state *state, 712 const struct drm_private_state_funcs *funcs) 713 { 714 memset(obj, 0, sizeof(*obj)); 715 716 obj->state = state; 717 obj->funcs = funcs; 718 } 719 EXPORT_SYMBOL(drm_atomic_private_obj_init); 720 721 /** 722 * drm_atomic_private_obj_fini - finalize private object 723 * @obj: private object 724 * 725 * Finalize the private object. 726 */ 727 void 728 drm_atomic_private_obj_fini(struct drm_private_obj *obj) 729 { 730 obj->funcs->atomic_destroy_state(obj, obj->state); 731 } 732 EXPORT_SYMBOL(drm_atomic_private_obj_fini); 733 734 /** 735 * drm_atomic_get_private_obj_state - get private object state 736 * @state: global atomic state 737 * @obj: private object to get the state for 738 * 739 * This function returns the private object state for the given private object, 740 * allocating the state if needed. It does not grab any locks as the caller is 741 * expected to care of any required locking. 742 * 743 * RETURNS: 744 * 745 * Either the allocated state or the error code encoded into a pointer. 746 */ 747 struct drm_private_state * 748 drm_atomic_get_private_obj_state(struct drm_atomic_state *state, 749 struct drm_private_obj *obj) 750 { 751 int index, num_objs, i; 752 size_t size; 753 struct __drm_private_objs_state *arr; 754 struct drm_private_state *obj_state; 755 756 for (i = 0; i < state->num_private_objs; i++) 757 if (obj == state->private_objs[i].ptr) 758 return state->private_objs[i].state; 759 760 num_objs = state->num_private_objs + 1; 761 size = sizeof(*state->private_objs) * num_objs; 762 arr = krealloc(state->private_objs, size, GFP_KERNEL); 763 if (!arr) 764 return ERR_PTR(-ENOMEM); 765 766 state->private_objs = arr; 767 index = state->num_private_objs; 768 memset(&state->private_objs[index], 0, sizeof(*state->private_objs)); 769 770 obj_state = obj->funcs->atomic_duplicate_state(obj); 771 if (!obj_state) 772 return ERR_PTR(-ENOMEM); 773 774 state->private_objs[index].state = obj_state; 775 state->private_objs[index].old_state = obj->state; 776 state->private_objs[index].new_state = obj_state; 777 state->private_objs[index].ptr = obj; 778 obj_state->state = state; 779 780 state->num_private_objs = num_objs; 781 782 DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n", 783 obj, obj_state, state); 784 785 return obj_state; 786 } 787 EXPORT_SYMBOL(drm_atomic_get_private_obj_state); 788 789 /** 790 * drm_atomic_get_connector_state - get connector state 791 * @state: global atomic state object 792 * @connector: connector to get state object for 793 * 794 * This function returns the connector state for the given connector, 795 * allocating it if needed. It will also grab the relevant connector lock to 796 * make sure that the state is consistent. 797 * 798 * Returns: 799 * 800 * Either the allocated state or the error code encoded into the pointer. When 801 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the 802 * entire atomic sequence must be restarted. All other errors are fatal. 803 */ 804 struct drm_connector_state * 805 drm_atomic_get_connector_state(struct drm_atomic_state *state, 806 struct drm_connector *connector) 807 { 808 int ret, index; 809 struct drm_mode_config *config = &connector->dev->mode_config; 810 struct drm_connector_state *connector_state; 811 812 WARN_ON(!state->acquire_ctx); 813 814 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 815 if (ret) 816 return ERR_PTR(ret); 817 818 index = drm_connector_index(connector); 819 820 if (index >= state->num_connector) { 821 struct __drm_connnectors_state *c; 822 int alloc = max(index + 1, config->num_connector); 823 824 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); 825 if (!c) 826 return ERR_PTR(-ENOMEM); 827 828 state->connectors = c; 829 memset(&state->connectors[state->num_connector], 0, 830 sizeof(*state->connectors) * (alloc - state->num_connector)); 831 832 state->num_connector = alloc; 833 } 834 835 if (state->connectors[index].state) 836 return state->connectors[index].state; 837 838 connector_state = connector->funcs->atomic_duplicate_state(connector); 839 if (!connector_state) 840 return ERR_PTR(-ENOMEM); 841 842 drm_connector_get(connector); 843 state->connectors[index].state = connector_state; 844 state->connectors[index].old_state = connector->state; 845 state->connectors[index].new_state = connector_state; 846 state->connectors[index].ptr = connector; 847 connector_state->state = state; 848 849 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n", 850 connector->base.id, connector->name, 851 connector_state, state); 852 853 if (connector_state->crtc) { 854 struct drm_crtc_state *crtc_state; 855 856 crtc_state = drm_atomic_get_crtc_state(state, 857 connector_state->crtc); 858 if (IS_ERR(crtc_state)) 859 return ERR_CAST(crtc_state); 860 } 861 862 return connector_state; 863 } 864 EXPORT_SYMBOL(drm_atomic_get_connector_state); 865 866 static void drm_atomic_connector_print_state(struct drm_printer *p, 867 const struct drm_connector_state *state) 868 { 869 struct drm_connector *connector = state->connector; 870 871 drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); 872 drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); 873 874 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 875 if (state->writeback_job && state->writeback_job->fb) 876 drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id); 877 878 if (connector->funcs->atomic_print_state) 879 connector->funcs->atomic_print_state(p, state); 880 } 881 882 /** 883 * drm_atomic_add_affected_connectors - add connectors for crtc 884 * @state: atomic state 885 * @crtc: DRM crtc 886 * 887 * This function walks the current configuration and adds all connectors 888 * currently using @crtc to the atomic configuration @state. Note that this 889 * function must acquire the connection mutex. This can potentially cause 890 * unneeded seralization if the update is just for the planes on one crtc. Hence 891 * drivers and helpers should only call this when really needed (e.g. when a 892 * full modeset needs to happen due to some change). 893 * 894 * Returns: 895 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 896 * then the w/w mutex code has detected a deadlock and the entire atomic 897 * sequence must be restarted. All other errors are fatal. 898 */ 899 int 900 drm_atomic_add_affected_connectors(struct drm_atomic_state *state, 901 struct drm_crtc *crtc) 902 { 903 struct drm_mode_config *config = &state->dev->mode_config; 904 struct drm_connector *connector; 905 struct drm_connector_state *conn_state; 906 struct drm_connector_list_iter conn_iter; 907 struct drm_crtc_state *crtc_state; 908 int ret; 909 910 crtc_state = drm_atomic_get_crtc_state(state, crtc); 911 if (IS_ERR(crtc_state)) 912 return PTR_ERR(crtc_state); 913 914 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); 915 if (ret) 916 return ret; 917 918 DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n", 919 crtc->base.id, crtc->name, state); 920 921 /* 922 * Changed connectors are already in @state, so only need to look 923 * at the connector_mask in crtc_state. 924 */ 925 drm_connector_list_iter_begin(state->dev, &conn_iter); 926 drm_for_each_connector_iter(connector, &conn_iter) { 927 if (!(crtc_state->connector_mask & drm_connector_mask(connector))) 928 continue; 929 930 conn_state = drm_atomic_get_connector_state(state, connector); 931 if (IS_ERR(conn_state)) { 932 drm_connector_list_iter_end(&conn_iter); 933 return PTR_ERR(conn_state); 934 } 935 } 936 drm_connector_list_iter_end(&conn_iter); 937 938 return 0; 939 } 940 EXPORT_SYMBOL(drm_atomic_add_affected_connectors); 941 942 /** 943 * drm_atomic_add_affected_planes - add planes for crtc 944 * @state: atomic state 945 * @crtc: DRM crtc 946 * 947 * This function walks the current configuration and adds all planes 948 * currently used by @crtc to the atomic configuration @state. This is useful 949 * when an atomic commit also needs to check all currently enabled plane on 950 * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC 951 * to avoid special code to force-enable all planes. 952 * 953 * Since acquiring a plane state will always also acquire the w/w mutex of the 954 * current CRTC for that plane (if there is any) adding all the plane states for 955 * a CRTC will not reduce parallism of atomic updates. 956 * 957 * Returns: 958 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK 959 * then the w/w mutex code has detected a deadlock and the entire atomic 960 * sequence must be restarted. All other errors are fatal. 961 */ 962 int 963 drm_atomic_add_affected_planes(struct drm_atomic_state *state, 964 struct drm_crtc *crtc) 965 { 966 const struct drm_crtc_state *old_crtc_state = 967 drm_atomic_get_old_crtc_state(state, crtc); 968 struct drm_plane *plane; 969 970 WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); 971 972 DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n", 973 crtc->base.id, crtc->name, state); 974 975 drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) { 976 struct drm_plane_state *plane_state = 977 drm_atomic_get_plane_state(state, plane); 978 979 if (IS_ERR(plane_state)) 980 return PTR_ERR(plane_state); 981 } 982 return 0; 983 } 984 EXPORT_SYMBOL(drm_atomic_add_affected_planes); 985 986 /** 987 * drm_atomic_check_only - check whether a given config would work 988 * @state: atomic configuration to check 989 * 990 * Note that this function can return -EDEADLK if the driver needed to acquire 991 * more locks but encountered a deadlock. The caller must then do the usual w/w 992 * backoff dance and restart. All other errors are fatal. 993 * 994 * Returns: 995 * 0 on success, negative error code on failure. 996 */ 997 int drm_atomic_check_only(struct drm_atomic_state *state) 998 { 999 struct drm_device *dev = state->dev; 1000 struct drm_mode_config *config = &dev->mode_config; 1001 struct drm_plane *plane; 1002 struct drm_plane_state *old_plane_state; 1003 struct drm_plane_state *new_plane_state; 1004 struct drm_crtc *crtc; 1005 struct drm_crtc_state *old_crtc_state; 1006 struct drm_crtc_state *new_crtc_state; 1007 struct drm_connector *conn; 1008 struct drm_connector_state *conn_state; 1009 int i, ret = 0; 1010 1011 DRM_DEBUG_ATOMIC("checking %p\n", state); 1012 1013 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 1014 ret = drm_atomic_plane_check(old_plane_state, new_plane_state); 1015 if (ret) { 1016 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n", 1017 plane->base.id, plane->name); 1018 return ret; 1019 } 1020 } 1021 1022 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 1023 ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state); 1024 if (ret) { 1025 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n", 1026 crtc->base.id, crtc->name); 1027 return ret; 1028 } 1029 } 1030 1031 for_each_new_connector_in_state(state, conn, conn_state, i) { 1032 ret = drm_atomic_connector_check(conn, conn_state); 1033 if (ret) { 1034 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n", 1035 conn->base.id, conn->name); 1036 return ret; 1037 } 1038 } 1039 1040 if (config->funcs->atomic_check) { 1041 ret = config->funcs->atomic_check(state->dev, state); 1042 1043 if (ret) { 1044 DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n", 1045 state, ret); 1046 return ret; 1047 } 1048 } 1049 1050 if (!state->allow_modeset) { 1051 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 1052 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { 1053 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n", 1054 crtc->base.id, crtc->name); 1055 return -EINVAL; 1056 } 1057 } 1058 } 1059 1060 return 0; 1061 } 1062 EXPORT_SYMBOL(drm_atomic_check_only); 1063 1064 /** 1065 * drm_atomic_commit - commit configuration atomically 1066 * @state: atomic configuration to check 1067 * 1068 * Note that this function can return -EDEADLK if the driver needed to acquire 1069 * more locks but encountered a deadlock. The caller must then do the usual w/w 1070 * backoff dance and restart. All other errors are fatal. 1071 * 1072 * This function will take its own reference on @state. 1073 * Callers should always release their reference with drm_atomic_state_put(). 1074 * 1075 * Returns: 1076 * 0 on success, negative error code on failure. 1077 */ 1078 int drm_atomic_commit(struct drm_atomic_state *state) 1079 { 1080 struct drm_mode_config *config = &state->dev->mode_config; 1081 int ret; 1082 1083 ret = drm_atomic_check_only(state); 1084 if (ret) 1085 return ret; 1086 1087 DRM_DEBUG_ATOMIC("committing %p\n", state); 1088 1089 return config->funcs->atomic_commit(state->dev, state, false); 1090 } 1091 EXPORT_SYMBOL(drm_atomic_commit); 1092 1093 /** 1094 * drm_atomic_nonblocking_commit - atomic nonblocking commit 1095 * @state: atomic configuration to check 1096 * 1097 * Note that this function can return -EDEADLK if the driver needed to acquire 1098 * more locks but encountered a deadlock. The caller must then do the usual w/w 1099 * backoff dance and restart. All other errors are fatal. 1100 * 1101 * This function will take its own reference on @state. 1102 * Callers should always release their reference with drm_atomic_state_put(). 1103 * 1104 * Returns: 1105 * 0 on success, negative error code on failure. 1106 */ 1107 int drm_atomic_nonblocking_commit(struct drm_atomic_state *state) 1108 { 1109 struct drm_mode_config *config = &state->dev->mode_config; 1110 int ret; 1111 1112 ret = drm_atomic_check_only(state); 1113 if (ret) 1114 return ret; 1115 1116 DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state); 1117 1118 return config->funcs->atomic_commit(state->dev, state, true); 1119 } 1120 EXPORT_SYMBOL(drm_atomic_nonblocking_commit); 1121 1122 void drm_atomic_print_state(const struct drm_atomic_state *state) 1123 { 1124 struct drm_printer p = drm_info_printer(state->dev->dev); 1125 struct drm_plane *plane; 1126 struct drm_plane_state *plane_state; 1127 struct drm_crtc *crtc; 1128 struct drm_crtc_state *crtc_state; 1129 struct drm_connector *connector; 1130 struct drm_connector_state *connector_state; 1131 int i; 1132 1133 DRM_DEBUG_ATOMIC("checking %p\n", state); 1134 1135 for_each_new_plane_in_state(state, plane, plane_state, i) 1136 drm_atomic_plane_print_state(&p, plane_state); 1137 1138 for_each_new_crtc_in_state(state, crtc, crtc_state, i) 1139 drm_atomic_crtc_print_state(&p, crtc_state); 1140 1141 for_each_new_connector_in_state(state, connector, connector_state, i) 1142 drm_atomic_connector_print_state(&p, connector_state); 1143 } 1144 1145 static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p, 1146 bool take_locks) 1147 { 1148 struct drm_mode_config *config = &dev->mode_config; 1149 struct drm_plane *plane; 1150 struct drm_crtc *crtc; 1151 struct drm_connector *connector; 1152 struct drm_connector_list_iter conn_iter; 1153 1154 if (!drm_drv_uses_atomic_modeset(dev)) 1155 return; 1156 1157 list_for_each_entry(plane, &config->plane_list, head) { 1158 if (take_locks) 1159 drm_modeset_lock(&plane->mutex, NULL); 1160 drm_atomic_plane_print_state(p, plane->state); 1161 if (take_locks) 1162 drm_modeset_unlock(&plane->mutex); 1163 } 1164 1165 list_for_each_entry(crtc, &config->crtc_list, head) { 1166 if (take_locks) 1167 drm_modeset_lock(&crtc->mutex, NULL); 1168 drm_atomic_crtc_print_state(p, crtc->state); 1169 if (take_locks) 1170 drm_modeset_unlock(&crtc->mutex); 1171 } 1172 1173 drm_connector_list_iter_begin(dev, &conn_iter); 1174 if (take_locks) 1175 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1176 drm_for_each_connector_iter(connector, &conn_iter) 1177 drm_atomic_connector_print_state(p, connector->state); 1178 if (take_locks) 1179 drm_modeset_unlock(&dev->mode_config.connection_mutex); 1180 drm_connector_list_iter_end(&conn_iter); 1181 } 1182 1183 /** 1184 * drm_state_dump - dump entire device atomic state 1185 * @dev: the drm device 1186 * @p: where to print the state to 1187 * 1188 * Just for debugging. Drivers might want an option to dump state 1189 * to dmesg in case of error irq's. (Hint, you probably want to 1190 * ratelimit this!) 1191 * 1192 * The caller must drm_modeset_lock_all(), or if this is called 1193 * from error irq handler, it should not be enabled by default. 1194 * (Ie. if you are debugging errors you might not care that this 1195 * is racey. But calling this without all modeset locks held is 1196 * not inherently safe.) 1197 */ 1198 void drm_state_dump(struct drm_device *dev, struct drm_printer *p) 1199 { 1200 __drm_state_dump(dev, p, false); 1201 } 1202 EXPORT_SYMBOL(drm_state_dump); 1203 1204 #ifdef CONFIG_DEBUG_FS 1205 static int drm_state_info(struct seq_file *m, void *data) 1206 { 1207 struct drm_info_node *node = (struct drm_info_node *) m->private; 1208 struct drm_device *dev = node->minor->dev; 1209 struct drm_printer p = drm_seq_file_printer(m); 1210 1211 __drm_state_dump(dev, &p, true); 1212 1213 return 0; 1214 } 1215 1216 /* any use in debugfs files to dump individual planes/crtc/etc? */ 1217 static const struct drm_info_list drm_atomic_debugfs_list[] = { 1218 {"state", drm_state_info, 0}, 1219 }; 1220 1221 int drm_atomic_debugfs_init(struct drm_minor *minor) 1222 { 1223 return drm_debugfs_create_files(drm_atomic_debugfs_list, 1224 ARRAY_SIZE(drm_atomic_debugfs_list), 1225 minor->debugfs_root, minor); 1226 } 1227 #endif 1228 1229