1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <drm/drm_atomic.h> 29 #include <drm/drm_atomic_helper.h> 30 #include <drm/drm_damage_helper.h> 31 #include <drm/drm_fourcc.h> 32 #include <drm/drm_plane_helper.h> 33 #include <drm/drm_rect.h> 34 #include <drm/drm_sysfs.h> 35 #include <drm/drm_vblank.h> 36 37 #include "vmwgfx_kms.h" 38 39 void vmw_du_cleanup(struct vmw_display_unit *du) 40 { 41 drm_plane_cleanup(&du->primary); 42 drm_plane_cleanup(&du->cursor); 43 44 drm_connector_unregister(&du->connector); 45 drm_crtc_cleanup(&du->crtc); 46 drm_encoder_cleanup(&du->encoder); 47 drm_connector_cleanup(&du->connector); 48 } 49 50 /* 51 * Display Unit Cursor functions 52 */ 53 54 static int vmw_cursor_update_image(struct vmw_private *dev_priv, 55 u32 *image, u32 width, u32 height, 56 u32 hotspotX, u32 hotspotY) 57 { 58 struct { 59 u32 cmd; 60 SVGAFifoCmdDefineAlphaCursor cursor; 61 } *cmd; 62 u32 image_size = width * height * 4; 63 u32 cmd_size = sizeof(*cmd) + image_size; 64 65 if (!image) 66 return -EINVAL; 67 68 cmd = VMW_CMD_RESERVE(dev_priv, cmd_size); 69 if (unlikely(cmd == NULL)) 70 return -ENOMEM; 71 72 memset(cmd, 0, sizeof(*cmd)); 73 74 memcpy(&cmd[1], image, image_size); 75 76 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR; 77 cmd->cursor.id = 0; 78 cmd->cursor.width = width; 79 cmd->cursor.height = height; 80 cmd->cursor.hotspotX = hotspotX; 81 cmd->cursor.hotspotY = hotspotY; 82 83 vmw_cmd_commit_flush(dev_priv, cmd_size); 84 85 return 0; 86 } 87 88 static int vmw_cursor_update_bo(struct vmw_private *dev_priv, 89 struct vmw_buffer_object *bo, 90 u32 width, u32 height, 91 u32 hotspotX, u32 hotspotY) 92 { 93 struct ttm_bo_kmap_obj map; 94 unsigned long kmap_offset; 95 unsigned long kmap_num; 96 void *virtual; 97 bool dummy; 98 int ret; 99 100 kmap_offset = 0; 101 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; 102 103 ret = ttm_bo_reserve(&bo->base, true, false, NULL); 104 if (unlikely(ret != 0)) { 105 DRM_ERROR("reserve failed\n"); 106 return -EINVAL; 107 } 108 109 ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map); 110 if (unlikely(ret != 0)) 111 goto err_unreserve; 112 113 virtual = ttm_kmap_obj_virtual(&map, &dummy); 114 ret = vmw_cursor_update_image(dev_priv, virtual, width, height, 115 hotspotX, hotspotY); 116 117 ttm_bo_kunmap(&map); 118 err_unreserve: 119 ttm_bo_unreserve(&bo->base); 120 121 return ret; 122 } 123 124 125 static void vmw_cursor_update_position(struct vmw_private *dev_priv, 126 bool show, int x, int y) 127 { 128 uint32_t count; 129 130 spin_lock(&dev_priv->cursor_lock); 131 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0); 132 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x); 133 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y); 134 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT); 135 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count); 136 spin_unlock(&dev_priv->cursor_lock); 137 } 138 139 140 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 141 struct ttm_object_file *tfile, 142 struct ttm_buffer_object *bo, 143 SVGA3dCmdHeader *header) 144 { 145 struct ttm_bo_kmap_obj map; 146 unsigned long kmap_offset; 147 unsigned long kmap_num; 148 SVGA3dCopyBox *box; 149 unsigned box_count; 150 void *virtual; 151 bool dummy; 152 struct vmw_dma_cmd { 153 SVGA3dCmdHeader header; 154 SVGA3dCmdSurfaceDMA dma; 155 } *cmd; 156 int i, ret; 157 158 cmd = container_of(header, struct vmw_dma_cmd, header); 159 160 /* No snooper installed */ 161 if (!srf->snooper.image) 162 return; 163 164 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { 165 DRM_ERROR("face and mipmap for cursors should never != 0\n"); 166 return; 167 } 168 169 if (cmd->header.size < 64) { 170 DRM_ERROR("at least one full copy box must be given\n"); 171 return; 172 } 173 174 box = (SVGA3dCopyBox *)&cmd[1]; 175 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / 176 sizeof(SVGA3dCopyBox); 177 178 if (cmd->dma.guest.ptr.offset % PAGE_SIZE || 179 box->x != 0 || box->y != 0 || box->z != 0 || 180 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || 181 box->d != 1 || box_count != 1) { 182 /* TODO handle none page aligned offsets */ 183 /* TODO handle more dst & src != 0 */ 184 /* TODO handle more then one copy */ 185 DRM_ERROR("Can't snoop dma request for cursor!\n"); 186 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", 187 box->srcx, box->srcy, box->srcz, 188 box->x, box->y, box->z, 189 box->w, box->h, box->d, box_count, 190 cmd->dma.guest.ptr.offset); 191 return; 192 } 193 194 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; 195 kmap_num = (64*64*4) >> PAGE_SHIFT; 196 197 ret = ttm_bo_reserve(bo, true, false, NULL); 198 if (unlikely(ret != 0)) { 199 DRM_ERROR("reserve failed\n"); 200 return; 201 } 202 203 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 204 if (unlikely(ret != 0)) 205 goto err_unreserve; 206 207 virtual = ttm_kmap_obj_virtual(&map, &dummy); 208 209 if (box->w == 64 && cmd->dma.guest.pitch == 64*4) { 210 memcpy(srf->snooper.image, virtual, 64*64*4); 211 } else { 212 /* Image is unsigned pointer. */ 213 for (i = 0; i < box->h; i++) 214 memcpy(srf->snooper.image + i * 64, 215 virtual + i * cmd->dma.guest.pitch, 216 box->w * 4); 217 } 218 219 srf->snooper.age++; 220 221 ttm_bo_kunmap(&map); 222 err_unreserve: 223 ttm_bo_unreserve(bo); 224 } 225 226 /** 227 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots 228 * 229 * @dev_priv: Pointer to the device private struct. 230 * 231 * Clears all legacy hotspots. 232 */ 233 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv) 234 { 235 struct drm_device *dev = &dev_priv->drm; 236 struct vmw_display_unit *du; 237 struct drm_crtc *crtc; 238 239 drm_modeset_lock_all(dev); 240 drm_for_each_crtc(crtc, dev) { 241 du = vmw_crtc_to_du(crtc); 242 243 du->hotspot_x = 0; 244 du->hotspot_y = 0; 245 } 246 drm_modeset_unlock_all(dev); 247 } 248 249 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) 250 { 251 struct drm_device *dev = &dev_priv->drm; 252 struct vmw_display_unit *du; 253 struct drm_crtc *crtc; 254 255 mutex_lock(&dev->mode_config.mutex); 256 257 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 258 du = vmw_crtc_to_du(crtc); 259 if (!du->cursor_surface || 260 du->cursor_age == du->cursor_surface->snooper.age) 261 continue; 262 263 du->cursor_age = du->cursor_surface->snooper.age; 264 vmw_cursor_update_image(dev_priv, 265 du->cursor_surface->snooper.image, 266 64, 64, 267 du->hotspot_x + du->core_hotspot_x, 268 du->hotspot_y + du->core_hotspot_y); 269 } 270 271 mutex_unlock(&dev->mode_config.mutex); 272 } 273 274 275 void vmw_du_cursor_plane_destroy(struct drm_plane *plane) 276 { 277 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); 278 279 drm_plane_cleanup(plane); 280 } 281 282 283 void vmw_du_primary_plane_destroy(struct drm_plane *plane) 284 { 285 drm_plane_cleanup(plane); 286 287 /* Planes are static in our case so we don't free it */ 288 } 289 290 291 /** 292 * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface 293 * 294 * @vps: plane state associated with the display surface 295 * @unreference: true if we also want to unreference the display. 296 */ 297 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps, 298 bool unreference) 299 { 300 if (vps->surf) { 301 if (vps->pinned) { 302 vmw_resource_unpin(&vps->surf->res); 303 vps->pinned--; 304 } 305 306 if (unreference) { 307 if (vps->pinned) 308 DRM_ERROR("Surface still pinned\n"); 309 vmw_surface_unreference(&vps->surf); 310 } 311 } 312 } 313 314 315 /** 316 * vmw_du_plane_cleanup_fb - Unpins the cursor 317 * 318 * @plane: display plane 319 * @old_state: Contains the FB to clean up 320 * 321 * Unpins the framebuffer surface 322 * 323 * Returns 0 on success 324 */ 325 void 326 vmw_du_plane_cleanup_fb(struct drm_plane *plane, 327 struct drm_plane_state *old_state) 328 { 329 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); 330 331 vmw_du_plane_unpin_surf(vps, false); 332 } 333 334 335 /** 336 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it 337 * 338 * @plane: display plane 339 * @new_state: info on the new plane state, including the FB 340 * 341 * Returns 0 on success 342 */ 343 int 344 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, 345 struct drm_plane_state *new_state) 346 { 347 struct drm_framebuffer *fb = new_state->fb; 348 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); 349 350 351 if (vps->surf) 352 vmw_surface_unreference(&vps->surf); 353 354 if (vps->bo) 355 vmw_bo_unreference(&vps->bo); 356 357 if (fb) { 358 if (vmw_framebuffer_to_vfb(fb)->bo) { 359 vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer; 360 vmw_bo_reference(vps->bo); 361 } else { 362 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface; 363 vmw_surface_reference(vps->surf); 364 } 365 } 366 367 return 0; 368 } 369 370 371 void 372 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, 373 struct drm_plane_state *old_state) 374 { 375 struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; 376 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 377 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 378 struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state); 379 s32 hotspot_x, hotspot_y; 380 int ret = 0; 381 382 383 hotspot_x = du->hotspot_x; 384 hotspot_y = du->hotspot_y; 385 386 if (plane->state->fb) { 387 hotspot_x += plane->state->fb->hot_x; 388 hotspot_y += plane->state->fb->hot_y; 389 } 390 391 du->cursor_surface = vps->surf; 392 du->cursor_bo = vps->bo; 393 394 if (vps->surf) { 395 du->cursor_age = du->cursor_surface->snooper.age; 396 397 ret = vmw_cursor_update_image(dev_priv, 398 vps->surf->snooper.image, 399 64, 64, hotspot_x, 400 hotspot_y); 401 } else if (vps->bo) { 402 ret = vmw_cursor_update_bo(dev_priv, vps->bo, 403 plane->state->crtc_w, 404 plane->state->crtc_h, 405 hotspot_x, hotspot_y); 406 } else { 407 vmw_cursor_update_position(dev_priv, false, 0, 0); 408 return; 409 } 410 411 if (!ret) { 412 du->cursor_x = plane->state->crtc_x + du->set_gui_x; 413 du->cursor_y = plane->state->crtc_y + du->set_gui_y; 414 415 vmw_cursor_update_position(dev_priv, true, 416 du->cursor_x + hotspot_x, 417 du->cursor_y + hotspot_y); 418 419 du->core_hotspot_x = hotspot_x - du->hotspot_x; 420 du->core_hotspot_y = hotspot_y - du->hotspot_y; 421 } else { 422 DRM_ERROR("Failed to update cursor image\n"); 423 } 424 } 425 426 427 /** 428 * vmw_du_primary_plane_atomic_check - check if the new state is okay 429 * 430 * @plane: display plane 431 * @state: info on the new plane state, including the FB 432 * 433 * Check if the new state is settable given the current state. Other 434 * than what the atomic helper checks, we care about crtc fitting 435 * the FB and maintaining one active framebuffer. 436 * 437 * Returns 0 on success 438 */ 439 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, 440 struct drm_plane_state *state) 441 { 442 struct drm_crtc_state *crtc_state = NULL; 443 struct drm_framebuffer *new_fb = state->fb; 444 int ret; 445 446 if (state->crtc) 447 crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); 448 449 ret = drm_atomic_helper_check_plane_state(state, crtc_state, 450 DRM_PLANE_HELPER_NO_SCALING, 451 DRM_PLANE_HELPER_NO_SCALING, 452 false, true); 453 454 if (!ret && new_fb) { 455 struct drm_crtc *crtc = state->crtc; 456 struct vmw_connector_state *vcs; 457 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 458 459 vcs = vmw_connector_state_to_vcs(du->connector.state); 460 } 461 462 463 return ret; 464 } 465 466 467 /** 468 * vmw_du_cursor_plane_atomic_check - check if the new state is okay 469 * 470 * @plane: cursor plane 471 * @state: info on the new plane state 472 * 473 * This is a chance to fail if the new cursor state does not fit 474 * our requirements. 475 * 476 * Returns 0 on success 477 */ 478 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, 479 struct drm_plane_state *new_state) 480 { 481 int ret = 0; 482 struct drm_crtc_state *crtc_state = NULL; 483 struct vmw_surface *surface = NULL; 484 struct drm_framebuffer *fb = new_state->fb; 485 486 if (new_state->crtc) 487 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 488 new_state->crtc); 489 490 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, 491 DRM_PLANE_HELPER_NO_SCALING, 492 DRM_PLANE_HELPER_NO_SCALING, 493 true, true); 494 if (ret) 495 return ret; 496 497 /* Turning off */ 498 if (!fb) 499 return 0; 500 501 /* A lot of the code assumes this */ 502 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) { 503 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n", 504 new_state->crtc_w, new_state->crtc_h); 505 ret = -EINVAL; 506 } 507 508 if (!vmw_framebuffer_to_vfb(fb)->bo) 509 surface = vmw_framebuffer_to_vfbs(fb)->surface; 510 511 if (surface && !surface->snooper.image) { 512 DRM_ERROR("surface not suitable for cursor\n"); 513 ret = -EINVAL; 514 } 515 516 return ret; 517 } 518 519 520 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, 521 struct drm_atomic_state *state) 522 { 523 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, 524 crtc); 525 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc); 526 int connector_mask = drm_connector_mask(&du->connector); 527 bool has_primary = new_state->plane_mask & 528 drm_plane_mask(crtc->primary); 529 530 /* We always want to have an active plane with an active CRTC */ 531 if (has_primary != new_state->enable) 532 return -EINVAL; 533 534 535 if (new_state->connector_mask != connector_mask && 536 new_state->connector_mask != 0) { 537 DRM_ERROR("Invalid connectors configuration\n"); 538 return -EINVAL; 539 } 540 541 /* 542 * Our virtual device does not have a dot clock, so use the logical 543 * clock value as the dot clock. 544 */ 545 if (new_state->mode.crtc_clock == 0) 546 new_state->adjusted_mode.crtc_clock = new_state->mode.clock; 547 548 return 0; 549 } 550 551 552 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, 553 struct drm_atomic_state *state) 554 { 555 } 556 557 558 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, 559 struct drm_atomic_state *state) 560 { 561 struct drm_pending_vblank_event *event = crtc->state->event; 562 563 if (event) { 564 crtc->state->event = NULL; 565 566 spin_lock_irq(&crtc->dev->event_lock); 567 drm_crtc_send_vblank_event(crtc, event); 568 spin_unlock_irq(&crtc->dev->event_lock); 569 } 570 } 571 572 573 /** 574 * vmw_du_crtc_duplicate_state - duplicate crtc state 575 * @crtc: DRM crtc 576 * 577 * Allocates and returns a copy of the crtc state (both common and 578 * vmw-specific) for the specified crtc. 579 * 580 * Returns: The newly allocated crtc state, or NULL on failure. 581 */ 582 struct drm_crtc_state * 583 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc) 584 { 585 struct drm_crtc_state *state; 586 struct vmw_crtc_state *vcs; 587 588 if (WARN_ON(!crtc->state)) 589 return NULL; 590 591 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL); 592 593 if (!vcs) 594 return NULL; 595 596 state = &vcs->base; 597 598 __drm_atomic_helper_crtc_duplicate_state(crtc, state); 599 600 return state; 601 } 602 603 604 /** 605 * vmw_du_crtc_reset - creates a blank vmw crtc state 606 * @crtc: DRM crtc 607 * 608 * Resets the atomic state for @crtc by freeing the state pointer (which 609 * might be NULL, e.g. at driver load time) and allocating a new empty state 610 * object. 611 */ 612 void vmw_du_crtc_reset(struct drm_crtc *crtc) 613 { 614 struct vmw_crtc_state *vcs; 615 616 617 if (crtc->state) { 618 __drm_atomic_helper_crtc_destroy_state(crtc->state); 619 620 kfree(vmw_crtc_state_to_vcs(crtc->state)); 621 } 622 623 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL); 624 625 if (!vcs) { 626 DRM_ERROR("Cannot allocate vmw_crtc_state\n"); 627 return; 628 } 629 630 __drm_atomic_helper_crtc_reset(crtc, &vcs->base); 631 } 632 633 634 /** 635 * vmw_du_crtc_destroy_state - destroy crtc state 636 * @crtc: DRM crtc 637 * @state: state object to destroy 638 * 639 * Destroys the crtc state (both common and vmw-specific) for the 640 * specified plane. 641 */ 642 void 643 vmw_du_crtc_destroy_state(struct drm_crtc *crtc, 644 struct drm_crtc_state *state) 645 { 646 drm_atomic_helper_crtc_destroy_state(crtc, state); 647 } 648 649 650 /** 651 * vmw_du_plane_duplicate_state - duplicate plane state 652 * @plane: drm plane 653 * 654 * Allocates and returns a copy of the plane state (both common and 655 * vmw-specific) for the specified plane. 656 * 657 * Returns: The newly allocated plane state, or NULL on failure. 658 */ 659 struct drm_plane_state * 660 vmw_du_plane_duplicate_state(struct drm_plane *plane) 661 { 662 struct drm_plane_state *state; 663 struct vmw_plane_state *vps; 664 665 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL); 666 667 if (!vps) 668 return NULL; 669 670 vps->pinned = 0; 671 vps->cpp = 0; 672 673 /* Each ref counted resource needs to be acquired again */ 674 if (vps->surf) 675 (void) vmw_surface_reference(vps->surf); 676 677 if (vps->bo) 678 (void) vmw_bo_reference(vps->bo); 679 680 state = &vps->base; 681 682 __drm_atomic_helper_plane_duplicate_state(plane, state); 683 684 return state; 685 } 686 687 688 /** 689 * vmw_du_plane_reset - creates a blank vmw plane state 690 * @plane: drm plane 691 * 692 * Resets the atomic state for @plane by freeing the state pointer (which might 693 * be NULL, e.g. at driver load time) and allocating a new empty state object. 694 */ 695 void vmw_du_plane_reset(struct drm_plane *plane) 696 { 697 struct vmw_plane_state *vps; 698 699 700 if (plane->state) 701 vmw_du_plane_destroy_state(plane, plane->state); 702 703 vps = kzalloc(sizeof(*vps), GFP_KERNEL); 704 705 if (!vps) { 706 DRM_ERROR("Cannot allocate vmw_plane_state\n"); 707 return; 708 } 709 710 __drm_atomic_helper_plane_reset(plane, &vps->base); 711 } 712 713 714 /** 715 * vmw_du_plane_destroy_state - destroy plane state 716 * @plane: DRM plane 717 * @state: state object to destroy 718 * 719 * Destroys the plane state (both common and vmw-specific) for the 720 * specified plane. 721 */ 722 void 723 vmw_du_plane_destroy_state(struct drm_plane *plane, 724 struct drm_plane_state *state) 725 { 726 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state); 727 728 729 /* Should have been freed by cleanup_fb */ 730 if (vps->surf) 731 vmw_surface_unreference(&vps->surf); 732 733 if (vps->bo) 734 vmw_bo_unreference(&vps->bo); 735 736 drm_atomic_helper_plane_destroy_state(plane, state); 737 } 738 739 740 /** 741 * vmw_du_connector_duplicate_state - duplicate connector state 742 * @connector: DRM connector 743 * 744 * Allocates and returns a copy of the connector state (both common and 745 * vmw-specific) for the specified connector. 746 * 747 * Returns: The newly allocated connector state, or NULL on failure. 748 */ 749 struct drm_connector_state * 750 vmw_du_connector_duplicate_state(struct drm_connector *connector) 751 { 752 struct drm_connector_state *state; 753 struct vmw_connector_state *vcs; 754 755 if (WARN_ON(!connector->state)) 756 return NULL; 757 758 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL); 759 760 if (!vcs) 761 return NULL; 762 763 state = &vcs->base; 764 765 __drm_atomic_helper_connector_duplicate_state(connector, state); 766 767 return state; 768 } 769 770 771 /** 772 * vmw_du_connector_reset - creates a blank vmw connector state 773 * @connector: DRM connector 774 * 775 * Resets the atomic state for @connector by freeing the state pointer (which 776 * might be NULL, e.g. at driver load time) and allocating a new empty state 777 * object. 778 */ 779 void vmw_du_connector_reset(struct drm_connector *connector) 780 { 781 struct vmw_connector_state *vcs; 782 783 784 if (connector->state) { 785 __drm_atomic_helper_connector_destroy_state(connector->state); 786 787 kfree(vmw_connector_state_to_vcs(connector->state)); 788 } 789 790 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL); 791 792 if (!vcs) { 793 DRM_ERROR("Cannot allocate vmw_connector_state\n"); 794 return; 795 } 796 797 __drm_atomic_helper_connector_reset(connector, &vcs->base); 798 } 799 800 801 /** 802 * vmw_du_connector_destroy_state - destroy connector state 803 * @connector: DRM connector 804 * @state: state object to destroy 805 * 806 * Destroys the connector state (both common and vmw-specific) for the 807 * specified plane. 808 */ 809 void 810 vmw_du_connector_destroy_state(struct drm_connector *connector, 811 struct drm_connector_state *state) 812 { 813 drm_atomic_helper_connector_destroy_state(connector, state); 814 } 815 /* 816 * Generic framebuffer code 817 */ 818 819 /* 820 * Surface framebuffer code 821 */ 822 823 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 824 { 825 struct vmw_framebuffer_surface *vfbs = 826 vmw_framebuffer_to_vfbs(framebuffer); 827 828 drm_framebuffer_cleanup(framebuffer); 829 vmw_surface_unreference(&vfbs->surface); 830 if (vfbs->base.user_obj) 831 ttm_base_object_unref(&vfbs->base.user_obj); 832 833 kfree(vfbs); 834 } 835 836 /** 837 * vmw_kms_readback - Perform a readback from the screen system to 838 * a buffer-object backed framebuffer. 839 * 840 * @dev_priv: Pointer to the device private structure. 841 * @file_priv: Pointer to a struct drm_file identifying the caller. 842 * Must be set to NULL if @user_fence_rep is NULL. 843 * @vfb: Pointer to the buffer-object backed framebuffer. 844 * @user_fence_rep: User-space provided structure for fence information. 845 * Must be set to non-NULL if @file_priv is non-NULL. 846 * @vclips: Array of clip rects. 847 * @num_clips: Number of clip rects in @vclips. 848 * 849 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if 850 * interrupted. 851 */ 852 int vmw_kms_readback(struct vmw_private *dev_priv, 853 struct drm_file *file_priv, 854 struct vmw_framebuffer *vfb, 855 struct drm_vmw_fence_rep __user *user_fence_rep, 856 struct drm_vmw_rect *vclips, 857 uint32_t num_clips) 858 { 859 switch (dev_priv->active_display_unit) { 860 case vmw_du_screen_object: 861 return vmw_kms_sou_readback(dev_priv, file_priv, vfb, 862 user_fence_rep, vclips, num_clips, 863 NULL); 864 case vmw_du_screen_target: 865 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb, 866 user_fence_rep, NULL, vclips, num_clips, 867 1, false, true, NULL); 868 default: 869 WARN_ONCE(true, 870 "Readback called with invalid display system.\n"); 871 } 872 873 return -ENOSYS; 874 } 875 876 877 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { 878 .destroy = vmw_framebuffer_surface_destroy, 879 .dirty = drm_atomic_helper_dirtyfb, 880 }; 881 882 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 883 struct vmw_surface *surface, 884 struct vmw_framebuffer **out, 885 const struct drm_mode_fb_cmd2 886 *mode_cmd, 887 bool is_bo_proxy) 888 889 { 890 struct drm_device *dev = &dev_priv->drm; 891 struct vmw_framebuffer_surface *vfbs; 892 enum SVGA3dSurfaceFormat format; 893 int ret; 894 struct drm_format_name_buf format_name; 895 896 /* 3D is only supported on HWv8 and newer hosts */ 897 if (dev_priv->active_display_unit == vmw_du_legacy) 898 return -ENOSYS; 899 900 /* 901 * Sanity checks. 902 */ 903 904 /* Surface must be marked as a scanout. */ 905 if (unlikely(!surface->metadata.scanout)) 906 return -EINVAL; 907 908 if (unlikely(surface->metadata.mip_levels[0] != 1 || 909 surface->metadata.num_sizes != 1 || 910 surface->metadata.base_size.width < mode_cmd->width || 911 surface->metadata.base_size.height < mode_cmd->height || 912 surface->metadata.base_size.depth != 1)) { 913 DRM_ERROR("Incompatible surface dimensions " 914 "for requested mode.\n"); 915 return -EINVAL; 916 } 917 918 switch (mode_cmd->pixel_format) { 919 case DRM_FORMAT_ARGB8888: 920 format = SVGA3D_A8R8G8B8; 921 break; 922 case DRM_FORMAT_XRGB8888: 923 format = SVGA3D_X8R8G8B8; 924 break; 925 case DRM_FORMAT_RGB565: 926 format = SVGA3D_R5G6B5; 927 break; 928 case DRM_FORMAT_XRGB1555: 929 format = SVGA3D_A1R5G5B5; 930 break; 931 default: 932 DRM_ERROR("Invalid pixel format: %s\n", 933 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 934 return -EINVAL; 935 } 936 937 /* 938 * For DX, surface format validation is done when surface->scanout 939 * is set. 940 */ 941 if (!has_sm4_context(dev_priv) && format != surface->metadata.format) { 942 DRM_ERROR("Invalid surface format for requested mode.\n"); 943 return -EINVAL; 944 } 945 946 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); 947 if (!vfbs) { 948 ret = -ENOMEM; 949 goto out_err1; 950 } 951 952 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); 953 vfbs->surface = vmw_surface_reference(surface); 954 vfbs->base.user_handle = mode_cmd->handles[0]; 955 vfbs->is_bo_proxy = is_bo_proxy; 956 957 *out = &vfbs->base; 958 959 ret = drm_framebuffer_init(dev, &vfbs->base.base, 960 &vmw_framebuffer_surface_funcs); 961 if (ret) 962 goto out_err2; 963 964 return 0; 965 966 out_err2: 967 vmw_surface_unreference(&surface); 968 kfree(vfbs); 969 out_err1: 970 return ret; 971 } 972 973 /* 974 * Buffer-object framebuffer code 975 */ 976 977 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) 978 { 979 struct vmw_framebuffer_bo *vfbd = 980 vmw_framebuffer_to_vfbd(framebuffer); 981 982 drm_framebuffer_cleanup(framebuffer); 983 vmw_bo_unreference(&vfbd->buffer); 984 if (vfbd->base.user_obj) 985 ttm_base_object_unref(&vfbd->base.user_obj); 986 987 kfree(vfbd); 988 } 989 990 static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer, 991 struct drm_file *file_priv, 992 unsigned int flags, unsigned int color, 993 struct drm_clip_rect *clips, 994 unsigned int num_clips) 995 { 996 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 997 struct vmw_framebuffer_bo *vfbd = 998 vmw_framebuffer_to_vfbd(framebuffer); 999 struct drm_clip_rect norect; 1000 int ret, increment = 1; 1001 1002 drm_modeset_lock_all(&dev_priv->drm); 1003 1004 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 1005 if (unlikely(ret != 0)) { 1006 drm_modeset_unlock_all(&dev_priv->drm); 1007 return ret; 1008 } 1009 1010 if (!num_clips) { 1011 num_clips = 1; 1012 clips = &norect; 1013 norect.x1 = norect.y1 = 0; 1014 norect.x2 = framebuffer->width; 1015 norect.y2 = framebuffer->height; 1016 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { 1017 num_clips /= 2; 1018 increment = 2; 1019 } 1020 1021 switch (dev_priv->active_display_unit) { 1022 case vmw_du_legacy: 1023 ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0, 1024 clips, num_clips, increment); 1025 break; 1026 default: 1027 ret = -EINVAL; 1028 WARN_ONCE(true, "Dirty called with invalid display system.\n"); 1029 break; 1030 } 1031 1032 vmw_cmd_flush(dev_priv, false); 1033 ttm_read_unlock(&dev_priv->reservation_sem); 1034 1035 drm_modeset_unlock_all(&dev_priv->drm); 1036 1037 return ret; 1038 } 1039 1040 static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer, 1041 struct drm_file *file_priv, 1042 unsigned int flags, unsigned int color, 1043 struct drm_clip_rect *clips, 1044 unsigned int num_clips) 1045 { 1046 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 1047 1048 if (dev_priv->active_display_unit == vmw_du_legacy) 1049 return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags, 1050 color, clips, num_clips); 1051 1052 return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color, 1053 clips, num_clips); 1054 } 1055 1056 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = { 1057 .destroy = vmw_framebuffer_bo_destroy, 1058 .dirty = vmw_framebuffer_bo_dirty_ext, 1059 }; 1060 1061 /** 1062 * Pin the bofer in a location suitable for access by the 1063 * display system. 1064 */ 1065 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) 1066 { 1067 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 1068 struct vmw_buffer_object *buf; 1069 struct ttm_placement *placement; 1070 int ret; 1071 1072 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 1073 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; 1074 1075 if (!buf) 1076 return 0; 1077 1078 switch (dev_priv->active_display_unit) { 1079 case vmw_du_legacy: 1080 vmw_overlay_pause_all(dev_priv); 1081 ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false); 1082 vmw_overlay_resume_all(dev_priv); 1083 break; 1084 case vmw_du_screen_object: 1085 case vmw_du_screen_target: 1086 if (vfb->bo) { 1087 if (dev_priv->capabilities & SVGA_CAP_3D) { 1088 /* 1089 * Use surface DMA to get content to 1090 * sreen target surface. 1091 */ 1092 placement = &vmw_vram_gmr_placement; 1093 } else { 1094 /* Use CPU blit. */ 1095 placement = &vmw_sys_placement; 1096 } 1097 } else { 1098 /* Use surface / image update */ 1099 placement = &vmw_mob_placement; 1100 } 1101 1102 return vmw_bo_pin_in_placement(dev_priv, buf, placement, false); 1103 default: 1104 return -EINVAL; 1105 } 1106 1107 return ret; 1108 } 1109 1110 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb) 1111 { 1112 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 1113 struct vmw_buffer_object *buf; 1114 1115 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 1116 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; 1117 1118 if (WARN_ON(!buf)) 1119 return 0; 1120 1121 return vmw_bo_unpin(dev_priv, buf, false); 1122 } 1123 1124 /** 1125 * vmw_create_bo_proxy - create a proxy surface for the buffer object 1126 * 1127 * @dev: DRM device 1128 * @mode_cmd: parameters for the new surface 1129 * @bo_mob: MOB backing the buffer object 1130 * @srf_out: newly created surface 1131 * 1132 * When the content FB is a buffer object, we create a surface as a proxy to the 1133 * same buffer. This way we can do a surface copy rather than a surface DMA. 1134 * This is a more efficient approach 1135 * 1136 * RETURNS: 1137 * 0 on success, error code otherwise 1138 */ 1139 static int vmw_create_bo_proxy(struct drm_device *dev, 1140 const struct drm_mode_fb_cmd2 *mode_cmd, 1141 struct vmw_buffer_object *bo_mob, 1142 struct vmw_surface **srf_out) 1143 { 1144 struct vmw_surface_metadata metadata = {0}; 1145 uint32_t format; 1146 struct vmw_resource *res; 1147 unsigned int bytes_pp; 1148 struct drm_format_name_buf format_name; 1149 int ret; 1150 1151 switch (mode_cmd->pixel_format) { 1152 case DRM_FORMAT_ARGB8888: 1153 case DRM_FORMAT_XRGB8888: 1154 format = SVGA3D_X8R8G8B8; 1155 bytes_pp = 4; 1156 break; 1157 1158 case DRM_FORMAT_RGB565: 1159 case DRM_FORMAT_XRGB1555: 1160 format = SVGA3D_R5G6B5; 1161 bytes_pp = 2; 1162 break; 1163 1164 case 8: 1165 format = SVGA3D_P8; 1166 bytes_pp = 1; 1167 break; 1168 1169 default: 1170 DRM_ERROR("Invalid framebuffer format %s\n", 1171 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 1172 return -EINVAL; 1173 } 1174 1175 metadata.format = format; 1176 metadata.mip_levels[0] = 1; 1177 metadata.num_sizes = 1; 1178 metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp; 1179 metadata.base_size.height = mode_cmd->height; 1180 metadata.base_size.depth = 1; 1181 metadata.scanout = true; 1182 1183 ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out); 1184 if (ret) { 1185 DRM_ERROR("Failed to allocate proxy content buffer\n"); 1186 return ret; 1187 } 1188 1189 res = &(*srf_out)->res; 1190 1191 /* Reserve and switch the backing mob. */ 1192 mutex_lock(&res->dev_priv->cmdbuf_mutex); 1193 (void) vmw_resource_reserve(res, false, true); 1194 vmw_bo_unreference(&res->backup); 1195 res->backup = vmw_bo_reference(bo_mob); 1196 res->backup_offset = 0; 1197 vmw_resource_unreserve(res, false, false, false, NULL, 0); 1198 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 1199 1200 return 0; 1201 } 1202 1203 1204 1205 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, 1206 struct vmw_buffer_object *bo, 1207 struct vmw_framebuffer **out, 1208 const struct drm_mode_fb_cmd2 1209 *mode_cmd) 1210 1211 { 1212 struct drm_device *dev = &dev_priv->drm; 1213 struct vmw_framebuffer_bo *vfbd; 1214 unsigned int requested_size; 1215 struct drm_format_name_buf format_name; 1216 int ret; 1217 1218 requested_size = mode_cmd->height * mode_cmd->pitches[0]; 1219 if (unlikely(requested_size > bo->base.base.size)) { 1220 DRM_ERROR("Screen buffer object size is too small " 1221 "for requested mode.\n"); 1222 return -EINVAL; 1223 } 1224 1225 /* Limited framebuffer color depth support for screen objects */ 1226 if (dev_priv->active_display_unit == vmw_du_screen_object) { 1227 switch (mode_cmd->pixel_format) { 1228 case DRM_FORMAT_XRGB8888: 1229 case DRM_FORMAT_ARGB8888: 1230 break; 1231 case DRM_FORMAT_XRGB1555: 1232 case DRM_FORMAT_RGB565: 1233 break; 1234 default: 1235 DRM_ERROR("Invalid pixel format: %s\n", 1236 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 1237 return -EINVAL; 1238 } 1239 } 1240 1241 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); 1242 if (!vfbd) { 1243 ret = -ENOMEM; 1244 goto out_err1; 1245 } 1246 1247 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); 1248 vfbd->base.bo = true; 1249 vfbd->buffer = vmw_bo_reference(bo); 1250 vfbd->base.user_handle = mode_cmd->handles[0]; 1251 *out = &vfbd->base; 1252 1253 ret = drm_framebuffer_init(dev, &vfbd->base.base, 1254 &vmw_framebuffer_bo_funcs); 1255 if (ret) 1256 goto out_err2; 1257 1258 return 0; 1259 1260 out_err2: 1261 vmw_bo_unreference(&bo); 1262 kfree(vfbd); 1263 out_err1: 1264 return ret; 1265 } 1266 1267 1268 /** 1269 * vmw_kms_srf_ok - check if a surface can be created 1270 * 1271 * @width: requested width 1272 * @height: requested height 1273 * 1274 * Surfaces need to be less than texture size 1275 */ 1276 static bool 1277 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height) 1278 { 1279 if (width > dev_priv->texture_max_width || 1280 height > dev_priv->texture_max_height) 1281 return false; 1282 1283 return true; 1284 } 1285 1286 /** 1287 * vmw_kms_new_framebuffer - Create a new framebuffer. 1288 * 1289 * @dev_priv: Pointer to device private struct. 1290 * @bo: Pointer to buffer object to wrap the kms framebuffer around. 1291 * Either @bo or @surface must be NULL. 1292 * @surface: Pointer to a surface to wrap the kms framebuffer around. 1293 * Either @bo or @surface must be NULL. 1294 * @only_2d: No presents will occur to this buffer object based framebuffer. 1295 * This helps the code to do some important optimizations. 1296 * @mode_cmd: Frame-buffer metadata. 1297 */ 1298 struct vmw_framebuffer * 1299 vmw_kms_new_framebuffer(struct vmw_private *dev_priv, 1300 struct vmw_buffer_object *bo, 1301 struct vmw_surface *surface, 1302 bool only_2d, 1303 const struct drm_mode_fb_cmd2 *mode_cmd) 1304 { 1305 struct vmw_framebuffer *vfb = NULL; 1306 bool is_bo_proxy = false; 1307 int ret; 1308 1309 /* 1310 * We cannot use the SurfaceDMA command in an non-accelerated VM, 1311 * therefore, wrap the buffer object in a surface so we can use the 1312 * SurfaceCopy command. 1313 */ 1314 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && 1315 bo && only_2d && 1316 mode_cmd->width > 64 && /* Don't create a proxy for cursor */ 1317 dev_priv->active_display_unit == vmw_du_screen_target) { 1318 ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd, 1319 bo, &surface); 1320 if (ret) 1321 return ERR_PTR(ret); 1322 1323 is_bo_proxy = true; 1324 } 1325 1326 /* Create the new framebuffer depending one what we have */ 1327 if (surface) { 1328 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, 1329 mode_cmd, 1330 is_bo_proxy); 1331 1332 /* 1333 * vmw_create_bo_proxy() adds a reference that is no longer 1334 * needed 1335 */ 1336 if (is_bo_proxy) 1337 vmw_surface_unreference(&surface); 1338 } else if (bo) { 1339 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb, 1340 mode_cmd); 1341 } else { 1342 BUG(); 1343 } 1344 1345 if (ret) 1346 return ERR_PTR(ret); 1347 1348 vfb->pin = vmw_framebuffer_pin; 1349 vfb->unpin = vmw_framebuffer_unpin; 1350 1351 return vfb; 1352 } 1353 1354 /* 1355 * Generic Kernel modesetting functions 1356 */ 1357 1358 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, 1359 struct drm_file *file_priv, 1360 const struct drm_mode_fb_cmd2 *mode_cmd) 1361 { 1362 struct vmw_private *dev_priv = vmw_priv(dev); 1363 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1364 struct vmw_framebuffer *vfb = NULL; 1365 struct vmw_surface *surface = NULL; 1366 struct vmw_buffer_object *bo = NULL; 1367 struct ttm_base_object *user_obj; 1368 int ret; 1369 1370 /* 1371 * Take a reference on the user object of the resource 1372 * backing the kms fb. This ensures that user-space handle 1373 * lookups on that resource will always work as long as 1374 * it's registered with a kms framebuffer. This is important, 1375 * since vmw_execbuf_process identifies resources in the 1376 * command stream using user-space handles. 1377 */ 1378 1379 user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]); 1380 if (unlikely(user_obj == NULL)) { 1381 DRM_ERROR("Could not locate requested kms frame buffer.\n"); 1382 return ERR_PTR(-ENOENT); 1383 } 1384 1385 /** 1386 * End conditioned code. 1387 */ 1388 1389 /* returns either a bo or surface */ 1390 ret = vmw_user_lookup_handle(dev_priv, tfile, 1391 mode_cmd->handles[0], 1392 &surface, &bo); 1393 if (ret) 1394 goto err_out; 1395 1396 1397 if (!bo && 1398 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) { 1399 DRM_ERROR("Surface size cannot exceed %dx%d", 1400 dev_priv->texture_max_width, 1401 dev_priv->texture_max_height); 1402 goto err_out; 1403 } 1404 1405 1406 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface, 1407 !(dev_priv->capabilities & SVGA_CAP_3D), 1408 mode_cmd); 1409 if (IS_ERR(vfb)) { 1410 ret = PTR_ERR(vfb); 1411 goto err_out; 1412 } 1413 1414 err_out: 1415 /* vmw_user_lookup_handle takes one ref so does new_fb */ 1416 if (bo) 1417 vmw_bo_unreference(&bo); 1418 if (surface) 1419 vmw_surface_unreference(&surface); 1420 1421 if (ret) { 1422 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); 1423 ttm_base_object_unref(&user_obj); 1424 return ERR_PTR(ret); 1425 } else 1426 vfb->user_obj = user_obj; 1427 1428 return &vfb->base; 1429 } 1430 1431 /** 1432 * vmw_kms_check_display_memory - Validates display memory required for a 1433 * topology 1434 * @dev: DRM device 1435 * @num_rects: number of drm_rect in rects 1436 * @rects: array of drm_rect representing the topology to validate indexed by 1437 * crtc index. 1438 * 1439 * Returns: 1440 * 0 on success otherwise negative error code 1441 */ 1442 static int vmw_kms_check_display_memory(struct drm_device *dev, 1443 uint32_t num_rects, 1444 struct drm_rect *rects) 1445 { 1446 struct vmw_private *dev_priv = vmw_priv(dev); 1447 struct drm_rect bounding_box = {0}; 1448 u64 total_pixels = 0, pixel_mem, bb_mem; 1449 int i; 1450 1451 for (i = 0; i < num_rects; i++) { 1452 /* 1453 * For STDU only individual screen (screen target) is limited by 1454 * SCREENTARGET_MAX_WIDTH/HEIGHT registers. 1455 */ 1456 if (dev_priv->active_display_unit == vmw_du_screen_target && 1457 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width || 1458 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) { 1459 VMW_DEBUG_KMS("Screen size not supported.\n"); 1460 return -EINVAL; 1461 } 1462 1463 /* Bounding box upper left is at (0,0). */ 1464 if (rects[i].x2 > bounding_box.x2) 1465 bounding_box.x2 = rects[i].x2; 1466 1467 if (rects[i].y2 > bounding_box.y2) 1468 bounding_box.y2 = rects[i].y2; 1469 1470 total_pixels += (u64) drm_rect_width(&rects[i]) * 1471 (u64) drm_rect_height(&rects[i]); 1472 } 1473 1474 /* Virtual svga device primary limits are always in 32-bpp. */ 1475 pixel_mem = total_pixels * 4; 1476 1477 /* 1478 * For HV10 and below prim_bb_mem is vram size. When 1479 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is 1480 * limit on primary bounding box 1481 */ 1482 if (pixel_mem > dev_priv->prim_bb_mem) { 1483 VMW_DEBUG_KMS("Combined output size too large.\n"); 1484 return -EINVAL; 1485 } 1486 1487 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */ 1488 if (dev_priv->active_display_unit != vmw_du_screen_target || 1489 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) { 1490 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4; 1491 1492 if (bb_mem > dev_priv->prim_bb_mem) { 1493 VMW_DEBUG_KMS("Topology is beyond supported limits.\n"); 1494 return -EINVAL; 1495 } 1496 } 1497 1498 return 0; 1499 } 1500 1501 /** 1502 * vmw_crtc_state_and_lock - Return new or current crtc state with locked 1503 * crtc mutex 1504 * @state: The atomic state pointer containing the new atomic state 1505 * @crtc: The crtc 1506 * 1507 * This function returns the new crtc state if it's part of the state update. 1508 * Otherwise returns the current crtc state. It also makes sure that the 1509 * crtc mutex is locked. 1510 * 1511 * Returns: A valid crtc state pointer or NULL. It may also return a 1512 * pointer error, in particular -EDEADLK if locking needs to be rerun. 1513 */ 1514 static struct drm_crtc_state * 1515 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc) 1516 { 1517 struct drm_crtc_state *crtc_state; 1518 1519 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 1520 if (crtc_state) { 1521 lockdep_assert_held(&crtc->mutex.mutex.base); 1522 } else { 1523 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); 1524 1525 if (ret != 0 && ret != -EALREADY) 1526 return ERR_PTR(ret); 1527 1528 crtc_state = crtc->state; 1529 } 1530 1531 return crtc_state; 1532 } 1533 1534 /** 1535 * vmw_kms_check_implicit - Verify that all implicit display units scan out 1536 * from the same fb after the new state is committed. 1537 * @dev: The drm_device. 1538 * @state: The new state to be checked. 1539 * 1540 * Returns: 1541 * Zero on success, 1542 * -EINVAL on invalid state, 1543 * -EDEADLK if modeset locking needs to be rerun. 1544 */ 1545 static int vmw_kms_check_implicit(struct drm_device *dev, 1546 struct drm_atomic_state *state) 1547 { 1548 struct drm_framebuffer *implicit_fb = NULL; 1549 struct drm_crtc *crtc; 1550 struct drm_crtc_state *crtc_state; 1551 struct drm_plane_state *plane_state; 1552 1553 drm_for_each_crtc(crtc, dev) { 1554 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 1555 1556 if (!du->is_implicit) 1557 continue; 1558 1559 crtc_state = vmw_crtc_state_and_lock(state, crtc); 1560 if (IS_ERR(crtc_state)) 1561 return PTR_ERR(crtc_state); 1562 1563 if (!crtc_state || !crtc_state->enable) 1564 continue; 1565 1566 /* 1567 * Can't move primary planes across crtcs, so this is OK. 1568 * It also means we don't need to take the plane mutex. 1569 */ 1570 plane_state = du->primary.state; 1571 if (plane_state->crtc != crtc) 1572 continue; 1573 1574 if (!implicit_fb) 1575 implicit_fb = plane_state->fb; 1576 else if (implicit_fb != plane_state->fb) 1577 return -EINVAL; 1578 } 1579 1580 return 0; 1581 } 1582 1583 /** 1584 * vmw_kms_check_topology - Validates topology in drm_atomic_state 1585 * @dev: DRM device 1586 * @state: the driver state object 1587 * 1588 * Returns: 1589 * 0 on success otherwise negative error code 1590 */ 1591 static int vmw_kms_check_topology(struct drm_device *dev, 1592 struct drm_atomic_state *state) 1593 { 1594 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1595 struct drm_rect *rects; 1596 struct drm_crtc *crtc; 1597 uint32_t i; 1598 int ret = 0; 1599 1600 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect), 1601 GFP_KERNEL); 1602 if (!rects) 1603 return -ENOMEM; 1604 1605 drm_for_each_crtc(crtc, dev) { 1606 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 1607 struct drm_crtc_state *crtc_state; 1608 1609 i = drm_crtc_index(crtc); 1610 1611 crtc_state = vmw_crtc_state_and_lock(state, crtc); 1612 if (IS_ERR(crtc_state)) { 1613 ret = PTR_ERR(crtc_state); 1614 goto clean; 1615 } 1616 1617 if (!crtc_state) 1618 continue; 1619 1620 if (crtc_state->enable) { 1621 rects[i].x1 = du->gui_x; 1622 rects[i].y1 = du->gui_y; 1623 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay; 1624 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay; 1625 } else { 1626 rects[i].x1 = 0; 1627 rects[i].y1 = 0; 1628 rects[i].x2 = 0; 1629 rects[i].y2 = 0; 1630 } 1631 } 1632 1633 /* Determine change to topology due to new atomic state */ 1634 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 1635 new_crtc_state, i) { 1636 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 1637 struct drm_connector *connector; 1638 struct drm_connector_state *conn_state; 1639 struct vmw_connector_state *vmw_conn_state; 1640 1641 if (!du->pref_active && new_crtc_state->enable) { 1642 VMW_DEBUG_KMS("Enabling a disabled display unit\n"); 1643 ret = -EINVAL; 1644 goto clean; 1645 } 1646 1647 /* 1648 * For vmwgfx each crtc has only one connector attached and it 1649 * is not changed so don't really need to check the 1650 * crtc->connector_mask and iterate over it. 1651 */ 1652 connector = &du->connector; 1653 conn_state = drm_atomic_get_connector_state(state, connector); 1654 if (IS_ERR(conn_state)) { 1655 ret = PTR_ERR(conn_state); 1656 goto clean; 1657 } 1658 1659 vmw_conn_state = vmw_connector_state_to_vcs(conn_state); 1660 vmw_conn_state->gui_x = du->gui_x; 1661 vmw_conn_state->gui_y = du->gui_y; 1662 } 1663 1664 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc, 1665 rects); 1666 1667 clean: 1668 kfree(rects); 1669 return ret; 1670 } 1671 1672 /** 1673 * vmw_kms_atomic_check_modeset- validate state object for modeset changes 1674 * 1675 * @dev: DRM device 1676 * @state: the driver state object 1677 * 1678 * This is a simple wrapper around drm_atomic_helper_check_modeset() for 1679 * us to assign a value to mode->crtc_clock so that 1680 * drm_calc_timestamping_constants() won't throw an error message 1681 * 1682 * Returns: 1683 * Zero for success or -errno 1684 */ 1685 static int 1686 vmw_kms_atomic_check_modeset(struct drm_device *dev, 1687 struct drm_atomic_state *state) 1688 { 1689 struct drm_crtc *crtc; 1690 struct drm_crtc_state *crtc_state; 1691 bool need_modeset = false; 1692 int i, ret; 1693 1694 ret = drm_atomic_helper_check(dev, state); 1695 if (ret) 1696 return ret; 1697 1698 ret = vmw_kms_check_implicit(dev, state); 1699 if (ret) { 1700 VMW_DEBUG_KMS("Invalid implicit state\n"); 1701 return ret; 1702 } 1703 1704 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1705 if (drm_atomic_crtc_needs_modeset(crtc_state)) 1706 need_modeset = true; 1707 } 1708 1709 if (need_modeset) 1710 return vmw_kms_check_topology(dev, state); 1711 1712 return ret; 1713 } 1714 1715 static const struct drm_mode_config_funcs vmw_kms_funcs = { 1716 .fb_create = vmw_kms_fb_create, 1717 .atomic_check = vmw_kms_atomic_check_modeset, 1718 .atomic_commit = drm_atomic_helper_commit, 1719 }; 1720 1721 static int vmw_kms_generic_present(struct vmw_private *dev_priv, 1722 struct drm_file *file_priv, 1723 struct vmw_framebuffer *vfb, 1724 struct vmw_surface *surface, 1725 uint32_t sid, 1726 int32_t destX, int32_t destY, 1727 struct drm_vmw_rect *clips, 1728 uint32_t num_clips) 1729 { 1730 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips, 1731 &surface->res, destX, destY, 1732 num_clips, 1, NULL, NULL); 1733 } 1734 1735 1736 int vmw_kms_present(struct vmw_private *dev_priv, 1737 struct drm_file *file_priv, 1738 struct vmw_framebuffer *vfb, 1739 struct vmw_surface *surface, 1740 uint32_t sid, 1741 int32_t destX, int32_t destY, 1742 struct drm_vmw_rect *clips, 1743 uint32_t num_clips) 1744 { 1745 int ret; 1746 1747 switch (dev_priv->active_display_unit) { 1748 case vmw_du_screen_target: 1749 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips, 1750 &surface->res, destX, destY, 1751 num_clips, 1, NULL, NULL); 1752 break; 1753 case vmw_du_screen_object: 1754 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface, 1755 sid, destX, destY, clips, 1756 num_clips); 1757 break; 1758 default: 1759 WARN_ONCE(true, 1760 "Present called with invalid display system.\n"); 1761 ret = -ENOSYS; 1762 break; 1763 } 1764 if (ret) 1765 return ret; 1766 1767 vmw_cmd_flush(dev_priv, false); 1768 1769 return 0; 1770 } 1771 1772 static void 1773 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv) 1774 { 1775 if (dev_priv->hotplug_mode_update_property) 1776 return; 1777 1778 dev_priv->hotplug_mode_update_property = 1779 drm_property_create_range(&dev_priv->drm, 1780 DRM_MODE_PROP_IMMUTABLE, 1781 "hotplug_mode_update", 0, 1); 1782 1783 if (!dev_priv->hotplug_mode_update_property) 1784 return; 1785 1786 } 1787 1788 int vmw_kms_init(struct vmw_private *dev_priv) 1789 { 1790 struct drm_device *dev = &dev_priv->drm; 1791 int ret; 1792 1793 drm_mode_config_init(dev); 1794 dev->mode_config.funcs = &vmw_kms_funcs; 1795 dev->mode_config.min_width = 1; 1796 dev->mode_config.min_height = 1; 1797 dev->mode_config.max_width = dev_priv->texture_max_width; 1798 dev->mode_config.max_height = dev_priv->texture_max_height; 1799 1800 drm_mode_create_suggested_offset_properties(dev); 1801 vmw_kms_create_hotplug_mode_update_property(dev_priv); 1802 1803 ret = vmw_kms_stdu_init_display(dev_priv); 1804 if (ret) { 1805 ret = vmw_kms_sou_init_display(dev_priv); 1806 if (ret) /* Fallback */ 1807 ret = vmw_kms_ldu_init_display(dev_priv); 1808 } 1809 1810 return ret; 1811 } 1812 1813 int vmw_kms_close(struct vmw_private *dev_priv) 1814 { 1815 int ret = 0; 1816 1817 /* 1818 * Docs says we should take the lock before calling this function 1819 * but since it destroys encoders and our destructor calls 1820 * drm_encoder_cleanup which takes the lock we deadlock. 1821 */ 1822 drm_mode_config_cleanup(&dev_priv->drm); 1823 if (dev_priv->active_display_unit == vmw_du_legacy) 1824 ret = vmw_kms_ldu_close_display(dev_priv); 1825 1826 return ret; 1827 } 1828 1829 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 1830 struct drm_file *file_priv) 1831 { 1832 struct drm_vmw_cursor_bypass_arg *arg = data; 1833 struct vmw_display_unit *du; 1834 struct drm_crtc *crtc; 1835 int ret = 0; 1836 1837 1838 mutex_lock(&dev->mode_config.mutex); 1839 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { 1840 1841 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1842 du = vmw_crtc_to_du(crtc); 1843 du->hotspot_x = arg->xhot; 1844 du->hotspot_y = arg->yhot; 1845 } 1846 1847 mutex_unlock(&dev->mode_config.mutex); 1848 return 0; 1849 } 1850 1851 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id); 1852 if (!crtc) { 1853 ret = -ENOENT; 1854 goto out; 1855 } 1856 1857 du = vmw_crtc_to_du(crtc); 1858 1859 du->hotspot_x = arg->xhot; 1860 du->hotspot_y = arg->yhot; 1861 1862 out: 1863 mutex_unlock(&dev->mode_config.mutex); 1864 1865 return ret; 1866 } 1867 1868 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 1869 unsigned width, unsigned height, unsigned pitch, 1870 unsigned bpp, unsigned depth) 1871 { 1872 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1873 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); 1874 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1875 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch); 1876 vmw_write(vmw_priv, SVGA_REG_WIDTH, width); 1877 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); 1878 if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0) 1879 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp); 1880 1881 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) { 1882 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n", 1883 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH)); 1884 return -EINVAL; 1885 } 1886 1887 return 0; 1888 } 1889 1890 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 1891 uint32_t pitch, 1892 uint32_t height) 1893 { 1894 return ((u64) pitch * (u64) height) < (u64) 1895 ((dev_priv->active_display_unit == vmw_du_screen_target) ? 1896 dev_priv->prim_bb_mem : dev_priv->vram_size); 1897 } 1898 1899 1900 /** 1901 * Function called by DRM code called with vbl_lock held. 1902 */ 1903 u32 vmw_get_vblank_counter(struct drm_crtc *crtc) 1904 { 1905 return 0; 1906 } 1907 1908 /** 1909 * Function called by DRM code called with vbl_lock held. 1910 */ 1911 int vmw_enable_vblank(struct drm_crtc *crtc) 1912 { 1913 return -EINVAL; 1914 } 1915 1916 /** 1917 * Function called by DRM code called with vbl_lock held. 1918 */ 1919 void vmw_disable_vblank(struct drm_crtc *crtc) 1920 { 1921 } 1922 1923 /** 1924 * vmw_du_update_layout - Update the display unit with topology from resolution 1925 * plugin and generate DRM uevent 1926 * @dev_priv: device private 1927 * @num_rects: number of drm_rect in rects 1928 * @rects: toplogy to update 1929 */ 1930 static int vmw_du_update_layout(struct vmw_private *dev_priv, 1931 unsigned int num_rects, struct drm_rect *rects) 1932 { 1933 struct drm_device *dev = &dev_priv->drm; 1934 struct vmw_display_unit *du; 1935 struct drm_connector *con; 1936 struct drm_connector_list_iter conn_iter; 1937 struct drm_modeset_acquire_ctx ctx; 1938 struct drm_crtc *crtc; 1939 int ret; 1940 1941 /* Currently gui_x/y is protected with the crtc mutex */ 1942 mutex_lock(&dev->mode_config.mutex); 1943 drm_modeset_acquire_init(&ctx, 0); 1944 retry: 1945 drm_for_each_crtc(crtc, dev) { 1946 ret = drm_modeset_lock(&crtc->mutex, &ctx); 1947 if (ret < 0) { 1948 if (ret == -EDEADLK) { 1949 drm_modeset_backoff(&ctx); 1950 goto retry; 1951 } 1952 goto out_fini; 1953 } 1954 } 1955 1956 drm_connector_list_iter_begin(dev, &conn_iter); 1957 drm_for_each_connector_iter(con, &conn_iter) { 1958 du = vmw_connector_to_du(con); 1959 if (num_rects > du->unit) { 1960 du->pref_width = drm_rect_width(&rects[du->unit]); 1961 du->pref_height = drm_rect_height(&rects[du->unit]); 1962 du->pref_active = true; 1963 du->gui_x = rects[du->unit].x1; 1964 du->gui_y = rects[du->unit].y1; 1965 } else { 1966 du->pref_width = 800; 1967 du->pref_height = 600; 1968 du->pref_active = false; 1969 du->gui_x = 0; 1970 du->gui_y = 0; 1971 } 1972 } 1973 drm_connector_list_iter_end(&conn_iter); 1974 1975 list_for_each_entry(con, &dev->mode_config.connector_list, head) { 1976 du = vmw_connector_to_du(con); 1977 if (num_rects > du->unit) { 1978 drm_object_property_set_value 1979 (&con->base, dev->mode_config.suggested_x_property, 1980 du->gui_x); 1981 drm_object_property_set_value 1982 (&con->base, dev->mode_config.suggested_y_property, 1983 du->gui_y); 1984 } else { 1985 drm_object_property_set_value 1986 (&con->base, dev->mode_config.suggested_x_property, 1987 0); 1988 drm_object_property_set_value 1989 (&con->base, dev->mode_config.suggested_y_property, 1990 0); 1991 } 1992 con->status = vmw_du_connector_detect(con, true); 1993 } 1994 1995 drm_sysfs_hotplug_event(dev); 1996 out_fini: 1997 drm_modeset_drop_locks(&ctx); 1998 drm_modeset_acquire_fini(&ctx); 1999 mutex_unlock(&dev->mode_config.mutex); 2000 2001 return 0; 2002 } 2003 2004 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 2005 u16 *r, u16 *g, u16 *b, 2006 uint32_t size, 2007 struct drm_modeset_acquire_ctx *ctx) 2008 { 2009 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 2010 int i; 2011 2012 for (i = 0; i < size; i++) { 2013 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i, 2014 r[i], g[i], b[i]); 2015 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8); 2016 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); 2017 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); 2018 } 2019 2020 return 0; 2021 } 2022 2023 int vmw_du_connector_dpms(struct drm_connector *connector, int mode) 2024 { 2025 return 0; 2026 } 2027 2028 enum drm_connector_status 2029 vmw_du_connector_detect(struct drm_connector *connector, bool force) 2030 { 2031 uint32_t num_displays; 2032 struct drm_device *dev = connector->dev; 2033 struct vmw_private *dev_priv = vmw_priv(dev); 2034 struct vmw_display_unit *du = vmw_connector_to_du(connector); 2035 2036 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); 2037 2038 return ((vmw_connector_to_du(connector)->unit < num_displays && 2039 du->pref_active) ? 2040 connector_status_connected : connector_status_disconnected); 2041 } 2042 2043 static struct drm_display_mode vmw_kms_connector_builtin[] = { 2044 /* 640x480@60Hz */ 2045 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 2046 752, 800, 0, 480, 489, 492, 525, 0, 2047 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2048 /* 800x600@60Hz */ 2049 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 2050 968, 1056, 0, 600, 601, 605, 628, 0, 2051 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2052 /* 1024x768@60Hz */ 2053 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 2054 1184, 1344, 0, 768, 771, 777, 806, 0, 2055 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2056 /* 1152x864@75Hz */ 2057 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 2058 1344, 1600, 0, 864, 865, 868, 900, 0, 2059 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2060 /* 1280x768@60Hz */ 2061 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 2062 1472, 1664, 0, 768, 771, 778, 798, 0, 2063 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2064 /* 1280x800@60Hz */ 2065 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, 2066 1480, 1680, 0, 800, 803, 809, 831, 0, 2067 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2068 /* 1280x960@60Hz */ 2069 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, 2070 1488, 1800, 0, 960, 961, 964, 1000, 0, 2071 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2072 /* 1280x1024@60Hz */ 2073 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, 2074 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, 2075 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2076 /* 1360x768@60Hz */ 2077 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, 2078 1536, 1792, 0, 768, 771, 777, 795, 0, 2079 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2080 /* 1440x1050@60Hz */ 2081 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, 2082 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, 2083 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2084 /* 1440x900@60Hz */ 2085 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, 2086 1672, 1904, 0, 900, 903, 909, 934, 0, 2087 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2088 /* 1600x1200@60Hz */ 2089 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, 2090 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, 2091 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2092 /* 1680x1050@60Hz */ 2093 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, 2094 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, 2095 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2096 /* 1792x1344@60Hz */ 2097 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, 2098 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, 2099 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2100 /* 1853x1392@60Hz */ 2101 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 2102 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, 2103 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2104 /* 1920x1200@60Hz */ 2105 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 2106 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, 2107 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2108 /* 1920x1440@60Hz */ 2109 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 2110 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, 2111 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2112 /* 2560x1600@60Hz */ 2113 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 2114 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, 2115 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2116 /* Terminate */ 2117 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, 2118 }; 2119 2120 /** 2121 * vmw_guess_mode_timing - Provide fake timings for a 2122 * 60Hz vrefresh mode. 2123 * 2124 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay 2125 * members filled in. 2126 */ 2127 void vmw_guess_mode_timing(struct drm_display_mode *mode) 2128 { 2129 mode->hsync_start = mode->hdisplay + 50; 2130 mode->hsync_end = mode->hsync_start + 50; 2131 mode->htotal = mode->hsync_end + 50; 2132 2133 mode->vsync_start = mode->vdisplay + 50; 2134 mode->vsync_end = mode->vsync_start + 50; 2135 mode->vtotal = mode->vsync_end + 50; 2136 2137 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6; 2138 } 2139 2140 2141 int vmw_du_connector_fill_modes(struct drm_connector *connector, 2142 uint32_t max_width, uint32_t max_height) 2143 { 2144 struct vmw_display_unit *du = vmw_connector_to_du(connector); 2145 struct drm_device *dev = connector->dev; 2146 struct vmw_private *dev_priv = vmw_priv(dev); 2147 struct drm_display_mode *mode = NULL; 2148 struct drm_display_mode *bmode; 2149 struct drm_display_mode prefmode = { DRM_MODE("preferred", 2150 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 2151 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2152 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) 2153 }; 2154 int i; 2155 u32 assumed_bpp = 4; 2156 2157 if (dev_priv->assume_16bpp) 2158 assumed_bpp = 2; 2159 2160 max_width = min(max_width, dev_priv->texture_max_width); 2161 max_height = min(max_height, dev_priv->texture_max_height); 2162 2163 /* 2164 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/ 2165 * HEIGHT registers. 2166 */ 2167 if (dev_priv->active_display_unit == vmw_du_screen_target) { 2168 max_width = min(max_width, dev_priv->stdu_max_width); 2169 max_height = min(max_height, dev_priv->stdu_max_height); 2170 } 2171 2172 /* Add preferred mode */ 2173 mode = drm_mode_duplicate(dev, &prefmode); 2174 if (!mode) 2175 return 0; 2176 mode->hdisplay = du->pref_width; 2177 mode->vdisplay = du->pref_height; 2178 vmw_guess_mode_timing(mode); 2179 2180 if (vmw_kms_validate_mode_vram(dev_priv, 2181 mode->hdisplay * assumed_bpp, 2182 mode->vdisplay)) { 2183 drm_mode_probed_add(connector, mode); 2184 } else { 2185 drm_mode_destroy(dev, mode); 2186 mode = NULL; 2187 } 2188 2189 if (du->pref_mode) { 2190 list_del_init(&du->pref_mode->head); 2191 drm_mode_destroy(dev, du->pref_mode); 2192 } 2193 2194 /* mode might be null here, this is intended */ 2195 du->pref_mode = mode; 2196 2197 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { 2198 bmode = &vmw_kms_connector_builtin[i]; 2199 if (bmode->hdisplay > max_width || 2200 bmode->vdisplay > max_height) 2201 continue; 2202 2203 if (!vmw_kms_validate_mode_vram(dev_priv, 2204 bmode->hdisplay * assumed_bpp, 2205 bmode->vdisplay)) 2206 continue; 2207 2208 mode = drm_mode_duplicate(dev, bmode); 2209 if (!mode) 2210 return 0; 2211 2212 drm_mode_probed_add(connector, mode); 2213 } 2214 2215 drm_connector_list_update(connector); 2216 /* Move the prefered mode first, help apps pick the right mode. */ 2217 drm_mode_sort(&connector->modes); 2218 2219 return 1; 2220 } 2221 2222 /** 2223 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl 2224 * @dev: drm device for the ioctl 2225 * @data: data pointer for the ioctl 2226 * @file_priv: drm file for the ioctl call 2227 * 2228 * Update preferred topology of display unit as per ioctl request. The topology 2229 * is expressed as array of drm_vmw_rect. 2230 * e.g. 2231 * [0 0 640 480] [640 0 800 600] [0 480 640 480] 2232 * 2233 * NOTE: 2234 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside 2235 * device limit on topology, x + w and y + h (lower right) cannot be greater 2236 * than INT_MAX. So topology beyond these limits will return with error. 2237 * 2238 * Returns: 2239 * Zero on success, negative errno on failure. 2240 */ 2241 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 2242 struct drm_file *file_priv) 2243 { 2244 struct vmw_private *dev_priv = vmw_priv(dev); 2245 struct drm_mode_config *mode_config = &dev->mode_config; 2246 struct drm_vmw_update_layout_arg *arg = 2247 (struct drm_vmw_update_layout_arg *)data; 2248 void __user *user_rects; 2249 struct drm_vmw_rect *rects; 2250 struct drm_rect *drm_rects; 2251 unsigned rects_size; 2252 int ret, i; 2253 2254 if (!arg->num_outputs) { 2255 struct drm_rect def_rect = {0, 0, 800, 600}; 2256 VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n", 2257 def_rect.x1, def_rect.y1, 2258 def_rect.x2, def_rect.y2); 2259 vmw_du_update_layout(dev_priv, 1, &def_rect); 2260 return 0; 2261 } 2262 2263 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 2264 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), 2265 GFP_KERNEL); 2266 if (unlikely(!rects)) 2267 return -ENOMEM; 2268 2269 user_rects = (void __user *)(unsigned long)arg->rects; 2270 ret = copy_from_user(rects, user_rects, rects_size); 2271 if (unlikely(ret != 0)) { 2272 DRM_ERROR("Failed to get rects.\n"); 2273 ret = -EFAULT; 2274 goto out_free; 2275 } 2276 2277 drm_rects = (struct drm_rect *)rects; 2278 2279 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs); 2280 for (i = 0; i < arg->num_outputs; i++) { 2281 struct drm_vmw_rect curr_rect; 2282 2283 /* Verify user-space for overflow as kernel use drm_rect */ 2284 if ((rects[i].x + rects[i].w > INT_MAX) || 2285 (rects[i].y + rects[i].h > INT_MAX)) { 2286 ret = -ERANGE; 2287 goto out_free; 2288 } 2289 2290 curr_rect = rects[i]; 2291 drm_rects[i].x1 = curr_rect.x; 2292 drm_rects[i].y1 = curr_rect.y; 2293 drm_rects[i].x2 = curr_rect.x + curr_rect.w; 2294 drm_rects[i].y2 = curr_rect.y + curr_rect.h; 2295 2296 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n", 2297 drm_rects[i].x1, drm_rects[i].y1, 2298 drm_rects[i].x2, drm_rects[i].y2); 2299 2300 /* 2301 * Currently this check is limiting the topology within 2302 * mode_config->max (which actually is max texture size 2303 * supported by virtual device). This limit is here to address 2304 * window managers that create a big framebuffer for whole 2305 * topology. 2306 */ 2307 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 || 2308 drm_rects[i].x2 > mode_config->max_width || 2309 drm_rects[i].y2 > mode_config->max_height) { 2310 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n", 2311 drm_rects[i].x1, drm_rects[i].y1, 2312 drm_rects[i].x2, drm_rects[i].y2); 2313 ret = -EINVAL; 2314 goto out_free; 2315 } 2316 } 2317 2318 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); 2319 2320 if (ret == 0) 2321 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects); 2322 2323 out_free: 2324 kfree(rects); 2325 return ret; 2326 } 2327 2328 /** 2329 * vmw_kms_helper_dirty - Helper to build commands and perform actions based 2330 * on a set of cliprects and a set of display units. 2331 * 2332 * @dev_priv: Pointer to a device private structure. 2333 * @framebuffer: Pointer to the framebuffer on which to perform the actions. 2334 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL. 2335 * Cliprects are given in framebuffer coordinates. 2336 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must 2337 * be NULL. Cliprects are given in source coordinates. 2338 * @dest_x: X coordinate offset for the crtc / destination clip rects. 2339 * @dest_y: Y coordinate offset for the crtc / destination clip rects. 2340 * @num_clips: Number of cliprects in the @clips or @vclips array. 2341 * @increment: Integer with which to increment the clip counter when looping. 2342 * Used to skip a predetermined number of clip rects. 2343 * @dirty: Closure structure. See the description of struct vmw_kms_dirty. 2344 */ 2345 int vmw_kms_helper_dirty(struct vmw_private *dev_priv, 2346 struct vmw_framebuffer *framebuffer, 2347 const struct drm_clip_rect *clips, 2348 const struct drm_vmw_rect *vclips, 2349 s32 dest_x, s32 dest_y, 2350 int num_clips, 2351 int increment, 2352 struct vmw_kms_dirty *dirty) 2353 { 2354 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 2355 struct drm_crtc *crtc; 2356 u32 num_units = 0; 2357 u32 i, k; 2358 2359 dirty->dev_priv = dev_priv; 2360 2361 /* If crtc is passed, no need to iterate over other display units */ 2362 if (dirty->crtc) { 2363 units[num_units++] = vmw_crtc_to_du(dirty->crtc); 2364 } else { 2365 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list, 2366 head) { 2367 struct drm_plane *plane = crtc->primary; 2368 2369 if (plane->state->fb == &framebuffer->base) 2370 units[num_units++] = vmw_crtc_to_du(crtc); 2371 } 2372 } 2373 2374 for (k = 0; k < num_units; k++) { 2375 struct vmw_display_unit *unit = units[k]; 2376 s32 crtc_x = unit->crtc.x; 2377 s32 crtc_y = unit->crtc.y; 2378 s32 crtc_width = unit->crtc.mode.hdisplay; 2379 s32 crtc_height = unit->crtc.mode.vdisplay; 2380 const struct drm_clip_rect *clips_ptr = clips; 2381 const struct drm_vmw_rect *vclips_ptr = vclips; 2382 2383 dirty->unit = unit; 2384 if (dirty->fifo_reserve_size > 0) { 2385 dirty->cmd = VMW_CMD_RESERVE(dev_priv, 2386 dirty->fifo_reserve_size); 2387 if (!dirty->cmd) 2388 return -ENOMEM; 2389 2390 memset(dirty->cmd, 0, dirty->fifo_reserve_size); 2391 } 2392 dirty->num_hits = 0; 2393 for (i = 0; i < num_clips; i++, clips_ptr += increment, 2394 vclips_ptr += increment) { 2395 s32 clip_left; 2396 s32 clip_top; 2397 2398 /* 2399 * Select clip array type. Note that integer type 2400 * in @clips is unsigned short, whereas in @vclips 2401 * it's 32-bit. 2402 */ 2403 if (clips) { 2404 dirty->fb_x = (s32) clips_ptr->x1; 2405 dirty->fb_y = (s32) clips_ptr->y1; 2406 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x - 2407 crtc_x; 2408 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y - 2409 crtc_y; 2410 } else { 2411 dirty->fb_x = vclips_ptr->x; 2412 dirty->fb_y = vclips_ptr->y; 2413 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w + 2414 dest_x - crtc_x; 2415 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h + 2416 dest_y - crtc_y; 2417 } 2418 2419 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x; 2420 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y; 2421 2422 /* Skip this clip if it's outside the crtc region */ 2423 if (dirty->unit_x1 >= crtc_width || 2424 dirty->unit_y1 >= crtc_height || 2425 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0) 2426 continue; 2427 2428 /* Clip right and bottom to crtc limits */ 2429 dirty->unit_x2 = min_t(s32, dirty->unit_x2, 2430 crtc_width); 2431 dirty->unit_y2 = min_t(s32, dirty->unit_y2, 2432 crtc_height); 2433 2434 /* Clip left and top to crtc limits */ 2435 clip_left = min_t(s32, dirty->unit_x1, 0); 2436 clip_top = min_t(s32, dirty->unit_y1, 0); 2437 dirty->unit_x1 -= clip_left; 2438 dirty->unit_y1 -= clip_top; 2439 dirty->fb_x -= clip_left; 2440 dirty->fb_y -= clip_top; 2441 2442 dirty->clip(dirty); 2443 } 2444 2445 dirty->fifo_commit(dirty); 2446 } 2447 2448 return 0; 2449 } 2450 2451 /** 2452 * vmw_kms_helper_validation_finish - Helper for post KMS command submission 2453 * cleanup and fencing 2454 * @dev_priv: Pointer to the device-private struct 2455 * @file_priv: Pointer identifying the client when user-space fencing is used 2456 * @ctx: Pointer to the validation context 2457 * @out_fence: If non-NULL, returned refcounted fence-pointer 2458 * @user_fence_rep: If non-NULL, pointer to user-space address area 2459 * in which to copy user-space fence info 2460 */ 2461 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, 2462 struct drm_file *file_priv, 2463 struct vmw_validation_context *ctx, 2464 struct vmw_fence_obj **out_fence, 2465 struct drm_vmw_fence_rep __user * 2466 user_fence_rep) 2467 { 2468 struct vmw_fence_obj *fence = NULL; 2469 uint32_t handle = 0; 2470 int ret = 0; 2471 2472 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || 2473 out_fence) 2474 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, 2475 file_priv ? &handle : NULL); 2476 vmw_validation_done(ctx, fence); 2477 if (file_priv) 2478 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), 2479 ret, user_fence_rep, fence, 2480 handle, -1, NULL); 2481 if (out_fence) 2482 *out_fence = fence; 2483 else 2484 vmw_fence_obj_unreference(&fence); 2485 } 2486 2487 /** 2488 * vmw_kms_update_proxy - Helper function to update a proxy surface from 2489 * its backing MOB. 2490 * 2491 * @res: Pointer to the surface resource 2492 * @clips: Clip rects in framebuffer (surface) space. 2493 * @num_clips: Number of clips in @clips. 2494 * @increment: Integer with which to increment the clip counter when looping. 2495 * Used to skip a predetermined number of clip rects. 2496 * 2497 * This function makes sure the proxy surface is updated from its backing MOB 2498 * using the region given by @clips. The surface resource @res and its backing 2499 * MOB needs to be reserved and validated on call. 2500 */ 2501 int vmw_kms_update_proxy(struct vmw_resource *res, 2502 const struct drm_clip_rect *clips, 2503 unsigned num_clips, 2504 int increment) 2505 { 2506 struct vmw_private *dev_priv = res->dev_priv; 2507 struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size; 2508 struct { 2509 SVGA3dCmdHeader header; 2510 SVGA3dCmdUpdateGBImage body; 2511 } *cmd; 2512 SVGA3dBox *box; 2513 size_t copy_size = 0; 2514 int i; 2515 2516 if (!clips) 2517 return 0; 2518 2519 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips); 2520 if (!cmd) 2521 return -ENOMEM; 2522 2523 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) { 2524 box = &cmd->body.box; 2525 2526 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; 2527 cmd->header.size = sizeof(cmd->body); 2528 cmd->body.image.sid = res->id; 2529 cmd->body.image.face = 0; 2530 cmd->body.image.mipmap = 0; 2531 2532 if (clips->x1 > size->width || clips->x2 > size->width || 2533 clips->y1 > size->height || clips->y2 > size->height) { 2534 DRM_ERROR("Invalid clips outsize of framebuffer.\n"); 2535 return -EINVAL; 2536 } 2537 2538 box->x = clips->x1; 2539 box->y = clips->y1; 2540 box->z = 0; 2541 box->w = clips->x2 - clips->x1; 2542 box->h = clips->y2 - clips->y1; 2543 box->d = 1; 2544 2545 copy_size += sizeof(*cmd); 2546 } 2547 2548 vmw_cmd_commit(dev_priv, copy_size); 2549 2550 return 0; 2551 } 2552 2553 int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, 2554 unsigned unit, 2555 u32 max_width, 2556 u32 max_height, 2557 struct drm_connector **p_con, 2558 struct drm_crtc **p_crtc, 2559 struct drm_display_mode **p_mode) 2560 { 2561 struct drm_connector *con; 2562 struct vmw_display_unit *du; 2563 struct drm_display_mode *mode; 2564 int i = 0; 2565 int ret = 0; 2566 2567 mutex_lock(&dev_priv->drm.mode_config.mutex); 2568 list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list, 2569 head) { 2570 if (i == unit) 2571 break; 2572 2573 ++i; 2574 } 2575 2576 if (&con->head == &dev_priv->drm.mode_config.connector_list) { 2577 DRM_ERROR("Could not find initial display unit.\n"); 2578 ret = -EINVAL; 2579 goto out_unlock; 2580 } 2581 2582 if (list_empty(&con->modes)) 2583 (void) vmw_du_connector_fill_modes(con, max_width, max_height); 2584 2585 if (list_empty(&con->modes)) { 2586 DRM_ERROR("Could not find initial display mode.\n"); 2587 ret = -EINVAL; 2588 goto out_unlock; 2589 } 2590 2591 du = vmw_connector_to_du(con); 2592 *p_con = con; 2593 *p_crtc = &du->crtc; 2594 2595 list_for_each_entry(mode, &con->modes, head) { 2596 if (mode->type & DRM_MODE_TYPE_PREFERRED) 2597 break; 2598 } 2599 2600 if (&mode->head == &con->modes) { 2601 WARN_ONCE(true, "Could not find initial preferred mode.\n"); 2602 *p_mode = list_first_entry(&con->modes, 2603 struct drm_display_mode, 2604 head); 2605 } else { 2606 *p_mode = mode; 2607 } 2608 2609 out_unlock: 2610 mutex_unlock(&dev_priv->drm.mode_config.mutex); 2611 2612 return ret; 2613 } 2614 2615 /** 2616 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement 2617 * property. 2618 * 2619 * @dev_priv: Pointer to a device private struct. 2620 * 2621 * Sets up the implicit placement property unless it's already set up. 2622 */ 2623 void 2624 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv) 2625 { 2626 if (dev_priv->implicit_placement_property) 2627 return; 2628 2629 dev_priv->implicit_placement_property = 2630 drm_property_create_range(&dev_priv->drm, 2631 DRM_MODE_PROP_IMMUTABLE, 2632 "implicit_placement", 0, 1); 2633 } 2634 2635 /** 2636 * vmw_kms_suspend - Save modesetting state and turn modesetting off. 2637 * 2638 * @dev: Pointer to the drm device 2639 * Return: 0 on success. Negative error code on failure. 2640 */ 2641 int vmw_kms_suspend(struct drm_device *dev) 2642 { 2643 struct vmw_private *dev_priv = vmw_priv(dev); 2644 2645 dev_priv->suspend_state = drm_atomic_helper_suspend(dev); 2646 if (IS_ERR(dev_priv->suspend_state)) { 2647 int ret = PTR_ERR(dev_priv->suspend_state); 2648 2649 DRM_ERROR("Failed kms suspend: %d\n", ret); 2650 dev_priv->suspend_state = NULL; 2651 2652 return ret; 2653 } 2654 2655 return 0; 2656 } 2657 2658 2659 /** 2660 * vmw_kms_resume - Re-enable modesetting and restore state 2661 * 2662 * @dev: Pointer to the drm device 2663 * Return: 0 on success. Negative error code on failure. 2664 * 2665 * State is resumed from a previous vmw_kms_suspend(). It's illegal 2666 * to call this function without a previous vmw_kms_suspend(). 2667 */ 2668 int vmw_kms_resume(struct drm_device *dev) 2669 { 2670 struct vmw_private *dev_priv = vmw_priv(dev); 2671 int ret; 2672 2673 if (WARN_ON(!dev_priv->suspend_state)) 2674 return 0; 2675 2676 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state); 2677 dev_priv->suspend_state = NULL; 2678 2679 return ret; 2680 } 2681 2682 /** 2683 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost 2684 * 2685 * @dev: Pointer to the drm device 2686 */ 2687 void vmw_kms_lost_device(struct drm_device *dev) 2688 { 2689 drm_atomic_helper_shutdown(dev); 2690 } 2691 2692 /** 2693 * vmw_du_helper_plane_update - Helper to do plane update on a display unit. 2694 * @update: The closure structure. 2695 * 2696 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane 2697 * update on display unit. 2698 * 2699 * Return: 0 on success or a negative error code on failure. 2700 */ 2701 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update) 2702 { 2703 struct drm_plane_state *state = update->plane->state; 2704 struct drm_plane_state *old_state = update->old_state; 2705 struct drm_atomic_helper_damage_iter iter; 2706 struct drm_rect clip; 2707 struct drm_rect bb; 2708 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 2709 uint32_t reserved_size = 0; 2710 uint32_t submit_size = 0; 2711 uint32_t curr_size = 0; 2712 uint32_t num_hits = 0; 2713 void *cmd_start; 2714 char *cmd_next; 2715 int ret; 2716 2717 /* 2718 * Iterate in advance to check if really need plane update and find the 2719 * number of clips that actually are in plane src for fifo allocation. 2720 */ 2721 drm_atomic_helper_damage_iter_init(&iter, old_state, state); 2722 drm_atomic_for_each_plane_damage(&iter, &clip) 2723 num_hits++; 2724 2725 if (num_hits == 0) 2726 return 0; 2727 2728 if (update->vfb->bo) { 2729 struct vmw_framebuffer_bo *vfbbo = 2730 container_of(update->vfb, typeof(*vfbbo), base); 2731 2732 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false, 2733 update->cpu_blit); 2734 } else { 2735 struct vmw_framebuffer_surface *vfbs = 2736 container_of(update->vfb, typeof(*vfbs), base); 2737 2738 ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res, 2739 0, VMW_RES_DIRTY_NONE, NULL, 2740 NULL); 2741 } 2742 2743 if (ret) 2744 return ret; 2745 2746 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr); 2747 if (ret) 2748 goto out_unref; 2749 2750 reserved_size = update->calc_fifo_size(update, num_hits); 2751 cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size); 2752 if (!cmd_start) { 2753 ret = -ENOMEM; 2754 goto out_revert; 2755 } 2756 2757 cmd_next = cmd_start; 2758 2759 if (update->post_prepare) { 2760 curr_size = update->post_prepare(update, cmd_next); 2761 cmd_next += curr_size; 2762 submit_size += curr_size; 2763 } 2764 2765 if (update->pre_clip) { 2766 curr_size = update->pre_clip(update, cmd_next, num_hits); 2767 cmd_next += curr_size; 2768 submit_size += curr_size; 2769 } 2770 2771 bb.x1 = INT_MAX; 2772 bb.y1 = INT_MAX; 2773 bb.x2 = INT_MIN; 2774 bb.y2 = INT_MIN; 2775 2776 drm_atomic_helper_damage_iter_init(&iter, old_state, state); 2777 drm_atomic_for_each_plane_damage(&iter, &clip) { 2778 uint32_t fb_x = clip.x1; 2779 uint32_t fb_y = clip.y1; 2780 2781 vmw_du_translate_to_crtc(state, &clip); 2782 if (update->clip) { 2783 curr_size = update->clip(update, cmd_next, &clip, fb_x, 2784 fb_y); 2785 cmd_next += curr_size; 2786 submit_size += curr_size; 2787 } 2788 bb.x1 = min_t(int, bb.x1, clip.x1); 2789 bb.y1 = min_t(int, bb.y1, clip.y1); 2790 bb.x2 = max_t(int, bb.x2, clip.x2); 2791 bb.y2 = max_t(int, bb.y2, clip.y2); 2792 } 2793 2794 curr_size = update->post_clip(update, cmd_next, &bb); 2795 submit_size += curr_size; 2796 2797 if (reserved_size < submit_size) 2798 submit_size = 0; 2799 2800 vmw_cmd_commit(update->dev_priv, submit_size); 2801 2802 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx, 2803 update->out_fence, NULL); 2804 return ret; 2805 2806 out_revert: 2807 vmw_validation_revert(&val_ctx); 2808 2809 out_unref: 2810 vmw_validation_unref_lists(&val_ctx); 2811 return ret; 2812 } 2813