1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_kms.h" 29 #include <drm/drm_plane_helper.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_atomic_helper.h> 32 #include <drm/drm_rect.h> 33 #include <drm/drm_damage_helper.h> 34 35 /* Might need a hrtimer here? */ 36 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 37 38 void vmw_du_cleanup(struct vmw_display_unit *du) 39 { 40 drm_plane_cleanup(&du->primary); 41 drm_plane_cleanup(&du->cursor); 42 43 drm_connector_unregister(&du->connector); 44 drm_crtc_cleanup(&du->crtc); 45 drm_encoder_cleanup(&du->encoder); 46 drm_connector_cleanup(&du->connector); 47 } 48 49 /* 50 * Display Unit Cursor functions 51 */ 52 53 static int vmw_cursor_update_image(struct vmw_private *dev_priv, 54 u32 *image, u32 width, u32 height, 55 u32 hotspotX, u32 hotspotY) 56 { 57 struct { 58 u32 cmd; 59 SVGAFifoCmdDefineAlphaCursor cursor; 60 } *cmd; 61 u32 image_size = width * height * 4; 62 u32 cmd_size = sizeof(*cmd) + image_size; 63 64 if (!image) 65 return -EINVAL; 66 67 cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size); 68 if (unlikely(cmd == NULL)) 69 return -ENOMEM; 70 71 memset(cmd, 0, sizeof(*cmd)); 72 73 memcpy(&cmd[1], image, image_size); 74 75 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR; 76 cmd->cursor.id = 0; 77 cmd->cursor.width = width; 78 cmd->cursor.height = height; 79 cmd->cursor.hotspotX = hotspotX; 80 cmd->cursor.hotspotY = hotspotY; 81 82 vmw_fifo_commit_flush(dev_priv, cmd_size); 83 84 return 0; 85 } 86 87 static int vmw_cursor_update_bo(struct vmw_private *dev_priv, 88 struct vmw_buffer_object *bo, 89 u32 width, u32 height, 90 u32 hotspotX, u32 hotspotY) 91 { 92 struct ttm_bo_kmap_obj map; 93 unsigned long kmap_offset; 94 unsigned long kmap_num; 95 void *virtual; 96 bool dummy; 97 int ret; 98 99 kmap_offset = 0; 100 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; 101 102 ret = ttm_bo_reserve(&bo->base, true, false, NULL); 103 if (unlikely(ret != 0)) { 104 DRM_ERROR("reserve failed\n"); 105 return -EINVAL; 106 } 107 108 ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map); 109 if (unlikely(ret != 0)) 110 goto err_unreserve; 111 112 virtual = ttm_kmap_obj_virtual(&map, &dummy); 113 ret = vmw_cursor_update_image(dev_priv, virtual, width, height, 114 hotspotX, hotspotY); 115 116 ttm_bo_kunmap(&map); 117 err_unreserve: 118 ttm_bo_unreserve(&bo->base); 119 120 return ret; 121 } 122 123 124 static void vmw_cursor_update_position(struct vmw_private *dev_priv, 125 bool show, int x, int y) 126 { 127 u32 *fifo_mem = dev_priv->mmio_virt; 128 uint32_t count; 129 130 spin_lock(&dev_priv->cursor_lock); 131 vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); 132 vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X); 133 vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y); 134 count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT); 135 vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); 136 spin_unlock(&dev_priv->cursor_lock); 137 } 138 139 140 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 141 struct ttm_object_file *tfile, 142 struct ttm_buffer_object *bo, 143 SVGA3dCmdHeader *header) 144 { 145 struct ttm_bo_kmap_obj map; 146 unsigned long kmap_offset; 147 unsigned long kmap_num; 148 SVGA3dCopyBox *box; 149 unsigned box_count; 150 void *virtual; 151 bool dummy; 152 struct vmw_dma_cmd { 153 SVGA3dCmdHeader header; 154 SVGA3dCmdSurfaceDMA dma; 155 } *cmd; 156 int i, ret; 157 158 cmd = container_of(header, struct vmw_dma_cmd, header); 159 160 /* No snooper installed */ 161 if (!srf->snooper.image) 162 return; 163 164 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { 165 DRM_ERROR("face and mipmap for cursors should never != 0\n"); 166 return; 167 } 168 169 if (cmd->header.size < 64) { 170 DRM_ERROR("at least one full copy box must be given\n"); 171 return; 172 } 173 174 box = (SVGA3dCopyBox *)&cmd[1]; 175 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / 176 sizeof(SVGA3dCopyBox); 177 178 if (cmd->dma.guest.ptr.offset % PAGE_SIZE || 179 box->x != 0 || box->y != 0 || box->z != 0 || 180 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || 181 box->d != 1 || box_count != 1) { 182 /* TODO handle none page aligned offsets */ 183 /* TODO handle more dst & src != 0 */ 184 /* TODO handle more then one copy */ 185 DRM_ERROR("Cant snoop dma request for cursor!\n"); 186 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", 187 box->srcx, box->srcy, box->srcz, 188 box->x, box->y, box->z, 189 box->w, box->h, box->d, box_count, 190 cmd->dma.guest.ptr.offset); 191 return; 192 } 193 194 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; 195 kmap_num = (64*64*4) >> PAGE_SHIFT; 196 197 ret = ttm_bo_reserve(bo, true, false, NULL); 198 if (unlikely(ret != 0)) { 199 DRM_ERROR("reserve failed\n"); 200 return; 201 } 202 203 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 204 if (unlikely(ret != 0)) 205 goto err_unreserve; 206 207 virtual = ttm_kmap_obj_virtual(&map, &dummy); 208 209 if (box->w == 64 && cmd->dma.guest.pitch == 64*4) { 210 memcpy(srf->snooper.image, virtual, 64*64*4); 211 } else { 212 /* Image is unsigned pointer. */ 213 for (i = 0; i < box->h; i++) 214 memcpy(srf->snooper.image + i * 64, 215 virtual + i * cmd->dma.guest.pitch, 216 box->w * 4); 217 } 218 219 srf->snooper.age++; 220 221 ttm_bo_kunmap(&map); 222 err_unreserve: 223 ttm_bo_unreserve(bo); 224 } 225 226 /** 227 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots 228 * 229 * @dev_priv: Pointer to the device private struct. 230 * 231 * Clears all legacy hotspots. 232 */ 233 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv) 234 { 235 struct drm_device *dev = dev_priv->dev; 236 struct vmw_display_unit *du; 237 struct drm_crtc *crtc; 238 239 drm_modeset_lock_all(dev); 240 drm_for_each_crtc(crtc, dev) { 241 du = vmw_crtc_to_du(crtc); 242 243 du->hotspot_x = 0; 244 du->hotspot_y = 0; 245 } 246 drm_modeset_unlock_all(dev); 247 } 248 249 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) 250 { 251 struct drm_device *dev = dev_priv->dev; 252 struct vmw_display_unit *du; 253 struct drm_crtc *crtc; 254 255 mutex_lock(&dev->mode_config.mutex); 256 257 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 258 du = vmw_crtc_to_du(crtc); 259 if (!du->cursor_surface || 260 du->cursor_age == du->cursor_surface->snooper.age) 261 continue; 262 263 du->cursor_age = du->cursor_surface->snooper.age; 264 vmw_cursor_update_image(dev_priv, 265 du->cursor_surface->snooper.image, 266 64, 64, 267 du->hotspot_x + du->core_hotspot_x, 268 du->hotspot_y + du->core_hotspot_y); 269 } 270 271 mutex_unlock(&dev->mode_config.mutex); 272 } 273 274 275 void vmw_du_cursor_plane_destroy(struct drm_plane *plane) 276 { 277 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); 278 279 drm_plane_cleanup(plane); 280 } 281 282 283 void vmw_du_primary_plane_destroy(struct drm_plane *plane) 284 { 285 drm_plane_cleanup(plane); 286 287 /* Planes are static in our case so we don't free it */ 288 } 289 290 291 /** 292 * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface 293 * 294 * @vps: plane state associated with the display surface 295 * @unreference: true if we also want to unreference the display. 296 */ 297 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps, 298 bool unreference) 299 { 300 if (vps->surf) { 301 if (vps->pinned) { 302 vmw_resource_unpin(&vps->surf->res); 303 vps->pinned--; 304 } 305 306 if (unreference) { 307 if (vps->pinned) 308 DRM_ERROR("Surface still pinned\n"); 309 vmw_surface_unreference(&vps->surf); 310 } 311 } 312 } 313 314 315 /** 316 * vmw_du_plane_cleanup_fb - Unpins the cursor 317 * 318 * @plane: display plane 319 * @old_state: Contains the FB to clean up 320 * 321 * Unpins the framebuffer surface 322 * 323 * Returns 0 on success 324 */ 325 void 326 vmw_du_plane_cleanup_fb(struct drm_plane *plane, 327 struct drm_plane_state *old_state) 328 { 329 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); 330 331 vmw_du_plane_unpin_surf(vps, false); 332 } 333 334 335 /** 336 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it 337 * 338 * @plane: display plane 339 * @new_state: info on the new plane state, including the FB 340 * 341 * Returns 0 on success 342 */ 343 int 344 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, 345 struct drm_plane_state *new_state) 346 { 347 struct drm_framebuffer *fb = new_state->fb; 348 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); 349 350 351 if (vps->surf) 352 vmw_surface_unreference(&vps->surf); 353 354 if (vps->bo) 355 vmw_bo_unreference(&vps->bo); 356 357 if (fb) { 358 if (vmw_framebuffer_to_vfb(fb)->bo) { 359 vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer; 360 vmw_bo_reference(vps->bo); 361 } else { 362 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface; 363 vmw_surface_reference(vps->surf); 364 } 365 } 366 367 return 0; 368 } 369 370 371 void 372 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, 373 struct drm_plane_state *old_state) 374 { 375 struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; 376 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 377 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 378 struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state); 379 s32 hotspot_x, hotspot_y; 380 int ret = 0; 381 382 383 hotspot_x = du->hotspot_x; 384 hotspot_y = du->hotspot_y; 385 386 if (plane->state->fb) { 387 hotspot_x += plane->state->fb->hot_x; 388 hotspot_y += plane->state->fb->hot_y; 389 } 390 391 du->cursor_surface = vps->surf; 392 du->cursor_bo = vps->bo; 393 394 if (vps->surf) { 395 du->cursor_age = du->cursor_surface->snooper.age; 396 397 ret = vmw_cursor_update_image(dev_priv, 398 vps->surf->snooper.image, 399 64, 64, hotspot_x, 400 hotspot_y); 401 } else if (vps->bo) { 402 ret = vmw_cursor_update_bo(dev_priv, vps->bo, 403 plane->state->crtc_w, 404 plane->state->crtc_h, 405 hotspot_x, hotspot_y); 406 } else { 407 vmw_cursor_update_position(dev_priv, false, 0, 0); 408 return; 409 } 410 411 if (!ret) { 412 du->cursor_x = plane->state->crtc_x + du->set_gui_x; 413 du->cursor_y = plane->state->crtc_y + du->set_gui_y; 414 415 vmw_cursor_update_position(dev_priv, true, 416 du->cursor_x + hotspot_x, 417 du->cursor_y + hotspot_y); 418 419 du->core_hotspot_x = hotspot_x - du->hotspot_x; 420 du->core_hotspot_y = hotspot_y - du->hotspot_y; 421 } else { 422 DRM_ERROR("Failed to update cursor image\n"); 423 } 424 } 425 426 427 /** 428 * vmw_du_primary_plane_atomic_check - check if the new state is okay 429 * 430 * @plane: display plane 431 * @state: info on the new plane state, including the FB 432 * 433 * Check if the new state is settable given the current state. Other 434 * than what the atomic helper checks, we care about crtc fitting 435 * the FB and maintaining one active framebuffer. 436 * 437 * Returns 0 on success 438 */ 439 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, 440 struct drm_plane_state *state) 441 { 442 struct drm_crtc_state *crtc_state = NULL; 443 struct drm_framebuffer *new_fb = state->fb; 444 int ret; 445 446 if (state->crtc) 447 crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); 448 449 ret = drm_atomic_helper_check_plane_state(state, crtc_state, 450 DRM_PLANE_HELPER_NO_SCALING, 451 DRM_PLANE_HELPER_NO_SCALING, 452 false, true); 453 454 if (!ret && new_fb) { 455 struct drm_crtc *crtc = state->crtc; 456 struct vmw_connector_state *vcs; 457 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 458 459 vcs = vmw_connector_state_to_vcs(du->connector.state); 460 } 461 462 463 return ret; 464 } 465 466 467 /** 468 * vmw_du_cursor_plane_atomic_check - check if the new state is okay 469 * 470 * @plane: cursor plane 471 * @state: info on the new plane state 472 * 473 * This is a chance to fail if the new cursor state does not fit 474 * our requirements. 475 * 476 * Returns 0 on success 477 */ 478 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, 479 struct drm_plane_state *new_state) 480 { 481 int ret = 0; 482 struct drm_crtc_state *crtc_state = NULL; 483 struct vmw_surface *surface = NULL; 484 struct drm_framebuffer *fb = new_state->fb; 485 486 if (new_state->crtc) 487 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 488 new_state->crtc); 489 490 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, 491 DRM_PLANE_HELPER_NO_SCALING, 492 DRM_PLANE_HELPER_NO_SCALING, 493 true, true); 494 if (ret) 495 return ret; 496 497 /* Turning off */ 498 if (!fb) 499 return 0; 500 501 /* A lot of the code assumes this */ 502 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) { 503 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n", 504 new_state->crtc_w, new_state->crtc_h); 505 ret = -EINVAL; 506 } 507 508 if (!vmw_framebuffer_to_vfb(fb)->bo) 509 surface = vmw_framebuffer_to_vfbs(fb)->surface; 510 511 if (surface && !surface->snooper.image) { 512 DRM_ERROR("surface not suitable for cursor\n"); 513 ret = -EINVAL; 514 } 515 516 return ret; 517 } 518 519 520 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, 521 struct drm_crtc_state *new_state) 522 { 523 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc); 524 int connector_mask = drm_connector_mask(&du->connector); 525 bool has_primary = new_state->plane_mask & 526 drm_plane_mask(crtc->primary); 527 528 /* We always want to have an active plane with an active CRTC */ 529 if (has_primary != new_state->enable) 530 return -EINVAL; 531 532 533 if (new_state->connector_mask != connector_mask && 534 new_state->connector_mask != 0) { 535 DRM_ERROR("Invalid connectors configuration\n"); 536 return -EINVAL; 537 } 538 539 /* 540 * Our virtual device does not have a dot clock, so use the logical 541 * clock value as the dot clock. 542 */ 543 if (new_state->mode.crtc_clock == 0) 544 new_state->adjusted_mode.crtc_clock = new_state->mode.clock; 545 546 return 0; 547 } 548 549 550 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, 551 struct drm_crtc_state *old_crtc_state) 552 { 553 } 554 555 556 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, 557 struct drm_crtc_state *old_crtc_state) 558 { 559 struct drm_pending_vblank_event *event = crtc->state->event; 560 561 if (event) { 562 crtc->state->event = NULL; 563 564 spin_lock_irq(&crtc->dev->event_lock); 565 drm_crtc_send_vblank_event(crtc, event); 566 spin_unlock_irq(&crtc->dev->event_lock); 567 } 568 } 569 570 571 /** 572 * vmw_du_crtc_duplicate_state - duplicate crtc state 573 * @crtc: DRM crtc 574 * 575 * Allocates and returns a copy of the crtc state (both common and 576 * vmw-specific) for the specified crtc. 577 * 578 * Returns: The newly allocated crtc state, or NULL on failure. 579 */ 580 struct drm_crtc_state * 581 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc) 582 { 583 struct drm_crtc_state *state; 584 struct vmw_crtc_state *vcs; 585 586 if (WARN_ON(!crtc->state)) 587 return NULL; 588 589 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL); 590 591 if (!vcs) 592 return NULL; 593 594 state = &vcs->base; 595 596 __drm_atomic_helper_crtc_duplicate_state(crtc, state); 597 598 return state; 599 } 600 601 602 /** 603 * vmw_du_crtc_reset - creates a blank vmw crtc state 604 * @crtc: DRM crtc 605 * 606 * Resets the atomic state for @crtc by freeing the state pointer (which 607 * might be NULL, e.g. at driver load time) and allocating a new empty state 608 * object. 609 */ 610 void vmw_du_crtc_reset(struct drm_crtc *crtc) 611 { 612 struct vmw_crtc_state *vcs; 613 614 615 if (crtc->state) { 616 __drm_atomic_helper_crtc_destroy_state(crtc->state); 617 618 kfree(vmw_crtc_state_to_vcs(crtc->state)); 619 } 620 621 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL); 622 623 if (!vcs) { 624 DRM_ERROR("Cannot allocate vmw_crtc_state\n"); 625 return; 626 } 627 628 crtc->state = &vcs->base; 629 crtc->state->crtc = crtc; 630 } 631 632 633 /** 634 * vmw_du_crtc_destroy_state - destroy crtc state 635 * @crtc: DRM crtc 636 * @state: state object to destroy 637 * 638 * Destroys the crtc state (both common and vmw-specific) for the 639 * specified plane. 640 */ 641 void 642 vmw_du_crtc_destroy_state(struct drm_crtc *crtc, 643 struct drm_crtc_state *state) 644 { 645 drm_atomic_helper_crtc_destroy_state(crtc, state); 646 } 647 648 649 /** 650 * vmw_du_plane_duplicate_state - duplicate plane state 651 * @plane: drm plane 652 * 653 * Allocates and returns a copy of the plane state (both common and 654 * vmw-specific) for the specified plane. 655 * 656 * Returns: The newly allocated plane state, or NULL on failure. 657 */ 658 struct drm_plane_state * 659 vmw_du_plane_duplicate_state(struct drm_plane *plane) 660 { 661 struct drm_plane_state *state; 662 struct vmw_plane_state *vps; 663 664 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL); 665 666 if (!vps) 667 return NULL; 668 669 vps->pinned = 0; 670 vps->cpp = 0; 671 672 /* Each ref counted resource needs to be acquired again */ 673 if (vps->surf) 674 (void) vmw_surface_reference(vps->surf); 675 676 if (vps->bo) 677 (void) vmw_bo_reference(vps->bo); 678 679 state = &vps->base; 680 681 __drm_atomic_helper_plane_duplicate_state(plane, state); 682 683 return state; 684 } 685 686 687 /** 688 * vmw_du_plane_reset - creates a blank vmw plane state 689 * @plane: drm plane 690 * 691 * Resets the atomic state for @plane by freeing the state pointer (which might 692 * be NULL, e.g. at driver load time) and allocating a new empty state object. 693 */ 694 void vmw_du_plane_reset(struct drm_plane *plane) 695 { 696 struct vmw_plane_state *vps; 697 698 699 if (plane->state) 700 vmw_du_plane_destroy_state(plane, plane->state); 701 702 vps = kzalloc(sizeof(*vps), GFP_KERNEL); 703 704 if (!vps) { 705 DRM_ERROR("Cannot allocate vmw_plane_state\n"); 706 return; 707 } 708 709 __drm_atomic_helper_plane_reset(plane, &vps->base); 710 } 711 712 713 /** 714 * vmw_du_plane_destroy_state - destroy plane state 715 * @plane: DRM plane 716 * @state: state object to destroy 717 * 718 * Destroys the plane state (both common and vmw-specific) for the 719 * specified plane. 720 */ 721 void 722 vmw_du_plane_destroy_state(struct drm_plane *plane, 723 struct drm_plane_state *state) 724 { 725 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state); 726 727 728 /* Should have been freed by cleanup_fb */ 729 if (vps->surf) 730 vmw_surface_unreference(&vps->surf); 731 732 if (vps->bo) 733 vmw_bo_unreference(&vps->bo); 734 735 drm_atomic_helper_plane_destroy_state(plane, state); 736 } 737 738 739 /** 740 * vmw_du_connector_duplicate_state - duplicate connector state 741 * @connector: DRM connector 742 * 743 * Allocates and returns a copy of the connector state (both common and 744 * vmw-specific) for the specified connector. 745 * 746 * Returns: The newly allocated connector state, or NULL on failure. 747 */ 748 struct drm_connector_state * 749 vmw_du_connector_duplicate_state(struct drm_connector *connector) 750 { 751 struct drm_connector_state *state; 752 struct vmw_connector_state *vcs; 753 754 if (WARN_ON(!connector->state)) 755 return NULL; 756 757 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL); 758 759 if (!vcs) 760 return NULL; 761 762 state = &vcs->base; 763 764 __drm_atomic_helper_connector_duplicate_state(connector, state); 765 766 return state; 767 } 768 769 770 /** 771 * vmw_du_connector_reset - creates a blank vmw connector state 772 * @connector: DRM connector 773 * 774 * Resets the atomic state for @connector by freeing the state pointer (which 775 * might be NULL, e.g. at driver load time) and allocating a new empty state 776 * object. 777 */ 778 void vmw_du_connector_reset(struct drm_connector *connector) 779 { 780 struct vmw_connector_state *vcs; 781 782 783 if (connector->state) { 784 __drm_atomic_helper_connector_destroy_state(connector->state); 785 786 kfree(vmw_connector_state_to_vcs(connector->state)); 787 } 788 789 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL); 790 791 if (!vcs) { 792 DRM_ERROR("Cannot allocate vmw_connector_state\n"); 793 return; 794 } 795 796 __drm_atomic_helper_connector_reset(connector, &vcs->base); 797 } 798 799 800 /** 801 * vmw_du_connector_destroy_state - destroy connector state 802 * @connector: DRM connector 803 * @state: state object to destroy 804 * 805 * Destroys the connector state (both common and vmw-specific) for the 806 * specified plane. 807 */ 808 void 809 vmw_du_connector_destroy_state(struct drm_connector *connector, 810 struct drm_connector_state *state) 811 { 812 drm_atomic_helper_connector_destroy_state(connector, state); 813 } 814 /* 815 * Generic framebuffer code 816 */ 817 818 /* 819 * Surface framebuffer code 820 */ 821 822 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 823 { 824 struct vmw_framebuffer_surface *vfbs = 825 vmw_framebuffer_to_vfbs(framebuffer); 826 827 drm_framebuffer_cleanup(framebuffer); 828 vmw_surface_unreference(&vfbs->surface); 829 if (vfbs->base.user_obj) 830 ttm_base_object_unref(&vfbs->base.user_obj); 831 832 kfree(vfbs); 833 } 834 835 /** 836 * vmw_kms_readback - Perform a readback from the screen system to 837 * a buffer-object backed framebuffer. 838 * 839 * @dev_priv: Pointer to the device private structure. 840 * @file_priv: Pointer to a struct drm_file identifying the caller. 841 * Must be set to NULL if @user_fence_rep is NULL. 842 * @vfb: Pointer to the buffer-object backed framebuffer. 843 * @user_fence_rep: User-space provided structure for fence information. 844 * Must be set to non-NULL if @file_priv is non-NULL. 845 * @vclips: Array of clip rects. 846 * @num_clips: Number of clip rects in @vclips. 847 * 848 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if 849 * interrupted. 850 */ 851 int vmw_kms_readback(struct vmw_private *dev_priv, 852 struct drm_file *file_priv, 853 struct vmw_framebuffer *vfb, 854 struct drm_vmw_fence_rep __user *user_fence_rep, 855 struct drm_vmw_rect *vclips, 856 uint32_t num_clips) 857 { 858 switch (dev_priv->active_display_unit) { 859 case vmw_du_screen_object: 860 return vmw_kms_sou_readback(dev_priv, file_priv, vfb, 861 user_fence_rep, vclips, num_clips, 862 NULL); 863 case vmw_du_screen_target: 864 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb, 865 user_fence_rep, NULL, vclips, num_clips, 866 1, false, true, NULL); 867 default: 868 WARN_ONCE(true, 869 "Readback called with invalid display system.\n"); 870 } 871 872 return -ENOSYS; 873 } 874 875 876 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { 877 .destroy = vmw_framebuffer_surface_destroy, 878 .dirty = drm_atomic_helper_dirtyfb, 879 }; 880 881 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 882 struct vmw_surface *surface, 883 struct vmw_framebuffer **out, 884 const struct drm_mode_fb_cmd2 885 *mode_cmd, 886 bool is_bo_proxy) 887 888 { 889 struct drm_device *dev = dev_priv->dev; 890 struct vmw_framebuffer_surface *vfbs; 891 enum SVGA3dSurfaceFormat format; 892 int ret; 893 struct drm_format_name_buf format_name; 894 895 /* 3D is only supported on HWv8 and newer hosts */ 896 if (dev_priv->active_display_unit == vmw_du_legacy) 897 return -ENOSYS; 898 899 /* 900 * Sanity checks. 901 */ 902 903 /* Surface must be marked as a scanout. */ 904 if (unlikely(!surface->scanout)) 905 return -EINVAL; 906 907 if (unlikely(surface->mip_levels[0] != 1 || 908 surface->num_sizes != 1 || 909 surface->base_size.width < mode_cmd->width || 910 surface->base_size.height < mode_cmd->height || 911 surface->base_size.depth != 1)) { 912 DRM_ERROR("Incompatible surface dimensions " 913 "for requested mode.\n"); 914 return -EINVAL; 915 } 916 917 switch (mode_cmd->pixel_format) { 918 case DRM_FORMAT_ARGB8888: 919 format = SVGA3D_A8R8G8B8; 920 break; 921 case DRM_FORMAT_XRGB8888: 922 format = SVGA3D_X8R8G8B8; 923 break; 924 case DRM_FORMAT_RGB565: 925 format = SVGA3D_R5G6B5; 926 break; 927 case DRM_FORMAT_XRGB1555: 928 format = SVGA3D_A1R5G5B5; 929 break; 930 default: 931 DRM_ERROR("Invalid pixel format: %s\n", 932 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 933 return -EINVAL; 934 } 935 936 /* 937 * For DX, surface format validation is done when surface->scanout 938 * is set. 939 */ 940 if (!dev_priv->has_dx && format != surface->format) { 941 DRM_ERROR("Invalid surface format for requested mode.\n"); 942 return -EINVAL; 943 } 944 945 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); 946 if (!vfbs) { 947 ret = -ENOMEM; 948 goto out_err1; 949 } 950 951 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); 952 vfbs->surface = vmw_surface_reference(surface); 953 vfbs->base.user_handle = mode_cmd->handles[0]; 954 vfbs->is_bo_proxy = is_bo_proxy; 955 956 *out = &vfbs->base; 957 958 ret = drm_framebuffer_init(dev, &vfbs->base.base, 959 &vmw_framebuffer_surface_funcs); 960 if (ret) 961 goto out_err2; 962 963 return 0; 964 965 out_err2: 966 vmw_surface_unreference(&surface); 967 kfree(vfbs); 968 out_err1: 969 return ret; 970 } 971 972 /* 973 * Buffer-object framebuffer code 974 */ 975 976 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) 977 { 978 struct vmw_framebuffer_bo *vfbd = 979 vmw_framebuffer_to_vfbd(framebuffer); 980 981 drm_framebuffer_cleanup(framebuffer); 982 vmw_bo_unreference(&vfbd->buffer); 983 if (vfbd->base.user_obj) 984 ttm_base_object_unref(&vfbd->base.user_obj); 985 986 kfree(vfbd); 987 } 988 989 static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer, 990 struct drm_file *file_priv, 991 unsigned int flags, unsigned int color, 992 struct drm_clip_rect *clips, 993 unsigned int num_clips) 994 { 995 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 996 struct vmw_framebuffer_bo *vfbd = 997 vmw_framebuffer_to_vfbd(framebuffer); 998 struct drm_clip_rect norect; 999 int ret, increment = 1; 1000 1001 drm_modeset_lock_all(dev_priv->dev); 1002 1003 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 1004 if (unlikely(ret != 0)) { 1005 drm_modeset_unlock_all(dev_priv->dev); 1006 return ret; 1007 } 1008 1009 if (!num_clips) { 1010 num_clips = 1; 1011 clips = &norect; 1012 norect.x1 = norect.y1 = 0; 1013 norect.x2 = framebuffer->width; 1014 norect.y2 = framebuffer->height; 1015 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { 1016 num_clips /= 2; 1017 increment = 2; 1018 } 1019 1020 switch (dev_priv->active_display_unit) { 1021 case vmw_du_legacy: 1022 ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0, 1023 clips, num_clips, increment); 1024 break; 1025 default: 1026 ret = -EINVAL; 1027 WARN_ONCE(true, "Dirty called with invalid display system.\n"); 1028 break; 1029 } 1030 1031 vmw_fifo_flush(dev_priv, false); 1032 ttm_read_unlock(&dev_priv->reservation_sem); 1033 1034 drm_modeset_unlock_all(dev_priv->dev); 1035 1036 return ret; 1037 } 1038 1039 static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer, 1040 struct drm_file *file_priv, 1041 unsigned int flags, unsigned int color, 1042 struct drm_clip_rect *clips, 1043 unsigned int num_clips) 1044 { 1045 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 1046 1047 if (dev_priv->active_display_unit == vmw_du_legacy) 1048 return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags, 1049 color, clips, num_clips); 1050 1051 return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color, 1052 clips, num_clips); 1053 } 1054 1055 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = { 1056 .destroy = vmw_framebuffer_bo_destroy, 1057 .dirty = vmw_framebuffer_bo_dirty_ext, 1058 }; 1059 1060 /** 1061 * Pin the bofer in a location suitable for access by the 1062 * display system. 1063 */ 1064 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) 1065 { 1066 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 1067 struct vmw_buffer_object *buf; 1068 struct ttm_placement *placement; 1069 int ret; 1070 1071 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 1072 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; 1073 1074 if (!buf) 1075 return 0; 1076 1077 switch (dev_priv->active_display_unit) { 1078 case vmw_du_legacy: 1079 vmw_overlay_pause_all(dev_priv); 1080 ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false); 1081 vmw_overlay_resume_all(dev_priv); 1082 break; 1083 case vmw_du_screen_object: 1084 case vmw_du_screen_target: 1085 if (vfb->bo) { 1086 if (dev_priv->capabilities & SVGA_CAP_3D) { 1087 /* 1088 * Use surface DMA to get content to 1089 * sreen target surface. 1090 */ 1091 placement = &vmw_vram_gmr_placement; 1092 } else { 1093 /* Use CPU blit. */ 1094 placement = &vmw_sys_placement; 1095 } 1096 } else { 1097 /* Use surface / image update */ 1098 placement = &vmw_mob_placement; 1099 } 1100 1101 return vmw_bo_pin_in_placement(dev_priv, buf, placement, false); 1102 default: 1103 return -EINVAL; 1104 } 1105 1106 return ret; 1107 } 1108 1109 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb) 1110 { 1111 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 1112 struct vmw_buffer_object *buf; 1113 1114 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 1115 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; 1116 1117 if (WARN_ON(!buf)) 1118 return 0; 1119 1120 return vmw_bo_unpin(dev_priv, buf, false); 1121 } 1122 1123 /** 1124 * vmw_create_bo_proxy - create a proxy surface for the buffer object 1125 * 1126 * @dev: DRM device 1127 * @mode_cmd: parameters for the new surface 1128 * @bo_mob: MOB backing the buffer object 1129 * @srf_out: newly created surface 1130 * 1131 * When the content FB is a buffer object, we create a surface as a proxy to the 1132 * same buffer. This way we can do a surface copy rather than a surface DMA. 1133 * This is a more efficient approach 1134 * 1135 * RETURNS: 1136 * 0 on success, error code otherwise 1137 */ 1138 static int vmw_create_bo_proxy(struct drm_device *dev, 1139 const struct drm_mode_fb_cmd2 *mode_cmd, 1140 struct vmw_buffer_object *bo_mob, 1141 struct vmw_surface **srf_out) 1142 { 1143 uint32_t format; 1144 struct drm_vmw_size content_base_size = {0}; 1145 struct vmw_resource *res; 1146 unsigned int bytes_pp; 1147 struct drm_format_name_buf format_name; 1148 int ret; 1149 1150 switch (mode_cmd->pixel_format) { 1151 case DRM_FORMAT_ARGB8888: 1152 case DRM_FORMAT_XRGB8888: 1153 format = SVGA3D_X8R8G8B8; 1154 bytes_pp = 4; 1155 break; 1156 1157 case DRM_FORMAT_RGB565: 1158 case DRM_FORMAT_XRGB1555: 1159 format = SVGA3D_R5G6B5; 1160 bytes_pp = 2; 1161 break; 1162 1163 case 8: 1164 format = SVGA3D_P8; 1165 bytes_pp = 1; 1166 break; 1167 1168 default: 1169 DRM_ERROR("Invalid framebuffer format %s\n", 1170 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 1171 return -EINVAL; 1172 } 1173 1174 content_base_size.width = mode_cmd->pitches[0] / bytes_pp; 1175 content_base_size.height = mode_cmd->height; 1176 content_base_size.depth = 1; 1177 1178 ret = vmw_surface_gb_priv_define(dev, 1179 0, /* kernel visible only */ 1180 0, /* flags */ 1181 format, 1182 true, /* can be a scanout buffer */ 1183 1, /* num of mip levels */ 1184 0, 1185 0, 1186 content_base_size, 1187 SVGA3D_MS_PATTERN_NONE, 1188 SVGA3D_MS_QUALITY_NONE, 1189 srf_out); 1190 if (ret) { 1191 DRM_ERROR("Failed to allocate proxy content buffer\n"); 1192 return ret; 1193 } 1194 1195 res = &(*srf_out)->res; 1196 1197 /* Reserve and switch the backing mob. */ 1198 mutex_lock(&res->dev_priv->cmdbuf_mutex); 1199 (void) vmw_resource_reserve(res, false, true); 1200 vmw_bo_unreference(&res->backup); 1201 res->backup = vmw_bo_reference(bo_mob); 1202 res->backup_offset = 0; 1203 vmw_resource_unreserve(res, false, false, false, NULL, 0); 1204 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 1205 1206 return 0; 1207 } 1208 1209 1210 1211 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, 1212 struct vmw_buffer_object *bo, 1213 struct vmw_framebuffer **out, 1214 const struct drm_mode_fb_cmd2 1215 *mode_cmd) 1216 1217 { 1218 struct drm_device *dev = dev_priv->dev; 1219 struct vmw_framebuffer_bo *vfbd; 1220 unsigned int requested_size; 1221 struct drm_format_name_buf format_name; 1222 int ret; 1223 1224 requested_size = mode_cmd->height * mode_cmd->pitches[0]; 1225 if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) { 1226 DRM_ERROR("Screen buffer object size is too small " 1227 "for requested mode.\n"); 1228 return -EINVAL; 1229 } 1230 1231 /* Limited framebuffer color depth support for screen objects */ 1232 if (dev_priv->active_display_unit == vmw_du_screen_object) { 1233 switch (mode_cmd->pixel_format) { 1234 case DRM_FORMAT_XRGB8888: 1235 case DRM_FORMAT_ARGB8888: 1236 break; 1237 case DRM_FORMAT_XRGB1555: 1238 case DRM_FORMAT_RGB565: 1239 break; 1240 default: 1241 DRM_ERROR("Invalid pixel format: %s\n", 1242 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 1243 return -EINVAL; 1244 } 1245 } 1246 1247 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); 1248 if (!vfbd) { 1249 ret = -ENOMEM; 1250 goto out_err1; 1251 } 1252 1253 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); 1254 vfbd->base.bo = true; 1255 vfbd->buffer = vmw_bo_reference(bo); 1256 vfbd->base.user_handle = mode_cmd->handles[0]; 1257 *out = &vfbd->base; 1258 1259 ret = drm_framebuffer_init(dev, &vfbd->base.base, 1260 &vmw_framebuffer_bo_funcs); 1261 if (ret) 1262 goto out_err2; 1263 1264 return 0; 1265 1266 out_err2: 1267 vmw_bo_unreference(&bo); 1268 kfree(vfbd); 1269 out_err1: 1270 return ret; 1271 } 1272 1273 1274 /** 1275 * vmw_kms_srf_ok - check if a surface can be created 1276 * 1277 * @width: requested width 1278 * @height: requested height 1279 * 1280 * Surfaces need to be less than texture size 1281 */ 1282 static bool 1283 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height) 1284 { 1285 if (width > dev_priv->texture_max_width || 1286 height > dev_priv->texture_max_height) 1287 return false; 1288 1289 return true; 1290 } 1291 1292 /** 1293 * vmw_kms_new_framebuffer - Create a new framebuffer. 1294 * 1295 * @dev_priv: Pointer to device private struct. 1296 * @bo: Pointer to buffer object to wrap the kms framebuffer around. 1297 * Either @bo or @surface must be NULL. 1298 * @surface: Pointer to a surface to wrap the kms framebuffer around. 1299 * Either @bo or @surface must be NULL. 1300 * @only_2d: No presents will occur to this buffer object based framebuffer. 1301 * This helps the code to do some important optimizations. 1302 * @mode_cmd: Frame-buffer metadata. 1303 */ 1304 struct vmw_framebuffer * 1305 vmw_kms_new_framebuffer(struct vmw_private *dev_priv, 1306 struct vmw_buffer_object *bo, 1307 struct vmw_surface *surface, 1308 bool only_2d, 1309 const struct drm_mode_fb_cmd2 *mode_cmd) 1310 { 1311 struct vmw_framebuffer *vfb = NULL; 1312 bool is_bo_proxy = false; 1313 int ret; 1314 1315 /* 1316 * We cannot use the SurfaceDMA command in an non-accelerated VM, 1317 * therefore, wrap the buffer object in a surface so we can use the 1318 * SurfaceCopy command. 1319 */ 1320 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && 1321 bo && only_2d && 1322 mode_cmd->width > 64 && /* Don't create a proxy for cursor */ 1323 dev_priv->active_display_unit == vmw_du_screen_target) { 1324 ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd, 1325 bo, &surface); 1326 if (ret) 1327 return ERR_PTR(ret); 1328 1329 is_bo_proxy = true; 1330 } 1331 1332 /* Create the new framebuffer depending one what we have */ 1333 if (surface) { 1334 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, 1335 mode_cmd, 1336 is_bo_proxy); 1337 1338 /* 1339 * vmw_create_bo_proxy() adds a reference that is no longer 1340 * needed 1341 */ 1342 if (is_bo_proxy) 1343 vmw_surface_unreference(&surface); 1344 } else if (bo) { 1345 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb, 1346 mode_cmd); 1347 } else { 1348 BUG(); 1349 } 1350 1351 if (ret) 1352 return ERR_PTR(ret); 1353 1354 vfb->pin = vmw_framebuffer_pin; 1355 vfb->unpin = vmw_framebuffer_unpin; 1356 1357 return vfb; 1358 } 1359 1360 /* 1361 * Generic Kernel modesetting functions 1362 */ 1363 1364 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, 1365 struct drm_file *file_priv, 1366 const struct drm_mode_fb_cmd2 *mode_cmd) 1367 { 1368 struct vmw_private *dev_priv = vmw_priv(dev); 1369 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1370 struct vmw_framebuffer *vfb = NULL; 1371 struct vmw_surface *surface = NULL; 1372 struct vmw_buffer_object *bo = NULL; 1373 struct ttm_base_object *user_obj; 1374 int ret; 1375 1376 /* 1377 * Take a reference on the user object of the resource 1378 * backing the kms fb. This ensures that user-space handle 1379 * lookups on that resource will always work as long as 1380 * it's registered with a kms framebuffer. This is important, 1381 * since vmw_execbuf_process identifies resources in the 1382 * command stream using user-space handles. 1383 */ 1384 1385 user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]); 1386 if (unlikely(user_obj == NULL)) { 1387 DRM_ERROR("Could not locate requested kms frame buffer.\n"); 1388 return ERR_PTR(-ENOENT); 1389 } 1390 1391 /** 1392 * End conditioned code. 1393 */ 1394 1395 /* returns either a bo or surface */ 1396 ret = vmw_user_lookup_handle(dev_priv, tfile, 1397 mode_cmd->handles[0], 1398 &surface, &bo); 1399 if (ret) 1400 goto err_out; 1401 1402 1403 if (!bo && 1404 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) { 1405 DRM_ERROR("Surface size cannot exceed %dx%d", 1406 dev_priv->texture_max_width, 1407 dev_priv->texture_max_height); 1408 goto err_out; 1409 } 1410 1411 1412 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface, 1413 !(dev_priv->capabilities & SVGA_CAP_3D), 1414 mode_cmd); 1415 if (IS_ERR(vfb)) { 1416 ret = PTR_ERR(vfb); 1417 goto err_out; 1418 } 1419 1420 err_out: 1421 /* vmw_user_lookup_handle takes one ref so does new_fb */ 1422 if (bo) 1423 vmw_bo_unreference(&bo); 1424 if (surface) 1425 vmw_surface_unreference(&surface); 1426 1427 if (ret) { 1428 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); 1429 ttm_base_object_unref(&user_obj); 1430 return ERR_PTR(ret); 1431 } else 1432 vfb->user_obj = user_obj; 1433 1434 return &vfb->base; 1435 } 1436 1437 /** 1438 * vmw_kms_check_display_memory - Validates display memory required for a 1439 * topology 1440 * @dev: DRM device 1441 * @num_rects: number of drm_rect in rects 1442 * @rects: array of drm_rect representing the topology to validate indexed by 1443 * crtc index. 1444 * 1445 * Returns: 1446 * 0 on success otherwise negative error code 1447 */ 1448 static int vmw_kms_check_display_memory(struct drm_device *dev, 1449 uint32_t num_rects, 1450 struct drm_rect *rects) 1451 { 1452 struct vmw_private *dev_priv = vmw_priv(dev); 1453 struct drm_rect bounding_box = {0}; 1454 u64 total_pixels = 0, pixel_mem, bb_mem; 1455 int i; 1456 1457 for (i = 0; i < num_rects; i++) { 1458 /* 1459 * For STDU only individual screen (screen target) is limited by 1460 * SCREENTARGET_MAX_WIDTH/HEIGHT registers. 1461 */ 1462 if (dev_priv->active_display_unit == vmw_du_screen_target && 1463 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width || 1464 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) { 1465 DRM_ERROR("Screen size not supported.\n"); 1466 return -EINVAL; 1467 } 1468 1469 /* Bounding box upper left is at (0,0). */ 1470 if (rects[i].x2 > bounding_box.x2) 1471 bounding_box.x2 = rects[i].x2; 1472 1473 if (rects[i].y2 > bounding_box.y2) 1474 bounding_box.y2 = rects[i].y2; 1475 1476 total_pixels += (u64) drm_rect_width(&rects[i]) * 1477 (u64) drm_rect_height(&rects[i]); 1478 } 1479 1480 /* Virtual svga device primary limits are always in 32-bpp. */ 1481 pixel_mem = total_pixels * 4; 1482 1483 /* 1484 * For HV10 and below prim_bb_mem is vram size. When 1485 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is 1486 * limit on primary bounding box 1487 */ 1488 if (pixel_mem > dev_priv->prim_bb_mem) { 1489 DRM_ERROR("Combined output size too large.\n"); 1490 return -EINVAL; 1491 } 1492 1493 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */ 1494 if (dev_priv->active_display_unit != vmw_du_screen_target || 1495 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) { 1496 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4; 1497 1498 if (bb_mem > dev_priv->prim_bb_mem) { 1499 DRM_ERROR("Topology is beyond supported limits.\n"); 1500 return -EINVAL; 1501 } 1502 } 1503 1504 return 0; 1505 } 1506 1507 /** 1508 * vmw_crtc_state_and_lock - Return new or current crtc state with locked 1509 * crtc mutex 1510 * @state: The atomic state pointer containing the new atomic state 1511 * @crtc: The crtc 1512 * 1513 * This function returns the new crtc state if it's part of the state update. 1514 * Otherwise returns the current crtc state. It also makes sure that the 1515 * crtc mutex is locked. 1516 * 1517 * Returns: A valid crtc state pointer or NULL. It may also return a 1518 * pointer error, in particular -EDEADLK if locking needs to be rerun. 1519 */ 1520 static struct drm_crtc_state * 1521 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc) 1522 { 1523 struct drm_crtc_state *crtc_state; 1524 1525 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 1526 if (crtc_state) { 1527 lockdep_assert_held(&crtc->mutex.mutex.base); 1528 } else { 1529 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); 1530 1531 if (ret != 0 && ret != -EALREADY) 1532 return ERR_PTR(ret); 1533 1534 crtc_state = crtc->state; 1535 } 1536 1537 return crtc_state; 1538 } 1539 1540 /** 1541 * vmw_kms_check_implicit - Verify that all implicit display units scan out 1542 * from the same fb after the new state is committed. 1543 * @dev: The drm_device. 1544 * @state: The new state to be checked. 1545 * 1546 * Returns: 1547 * Zero on success, 1548 * -EINVAL on invalid state, 1549 * -EDEADLK if modeset locking needs to be rerun. 1550 */ 1551 static int vmw_kms_check_implicit(struct drm_device *dev, 1552 struct drm_atomic_state *state) 1553 { 1554 struct drm_framebuffer *implicit_fb = NULL; 1555 struct drm_crtc *crtc; 1556 struct drm_crtc_state *crtc_state; 1557 struct drm_plane_state *plane_state; 1558 1559 drm_for_each_crtc(crtc, dev) { 1560 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 1561 1562 if (!du->is_implicit) 1563 continue; 1564 1565 crtc_state = vmw_crtc_state_and_lock(state, crtc); 1566 if (IS_ERR(crtc_state)) 1567 return PTR_ERR(crtc_state); 1568 1569 if (!crtc_state || !crtc_state->enable) 1570 continue; 1571 1572 /* 1573 * Can't move primary planes across crtcs, so this is OK. 1574 * It also means we don't need to take the plane mutex. 1575 */ 1576 plane_state = du->primary.state; 1577 if (plane_state->crtc != crtc) 1578 continue; 1579 1580 if (!implicit_fb) 1581 implicit_fb = plane_state->fb; 1582 else if (implicit_fb != plane_state->fb) 1583 return -EINVAL; 1584 } 1585 1586 return 0; 1587 } 1588 1589 /** 1590 * vmw_kms_check_topology - Validates topology in drm_atomic_state 1591 * @dev: DRM device 1592 * @state: the driver state object 1593 * 1594 * Returns: 1595 * 0 on success otherwise negative error code 1596 */ 1597 static int vmw_kms_check_topology(struct drm_device *dev, 1598 struct drm_atomic_state *state) 1599 { 1600 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1601 struct drm_rect *rects; 1602 struct drm_crtc *crtc; 1603 uint32_t i; 1604 int ret = 0; 1605 1606 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect), 1607 GFP_KERNEL); 1608 if (!rects) 1609 return -ENOMEM; 1610 1611 drm_for_each_crtc(crtc, dev) { 1612 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 1613 struct drm_crtc_state *crtc_state; 1614 1615 i = drm_crtc_index(crtc); 1616 1617 crtc_state = vmw_crtc_state_and_lock(state, crtc); 1618 if (IS_ERR(crtc_state)) { 1619 ret = PTR_ERR(crtc_state); 1620 goto clean; 1621 } 1622 1623 if (!crtc_state) 1624 continue; 1625 1626 if (crtc_state->enable) { 1627 rects[i].x1 = du->gui_x; 1628 rects[i].y1 = du->gui_y; 1629 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay; 1630 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay; 1631 } else { 1632 rects[i].x1 = 0; 1633 rects[i].y1 = 0; 1634 rects[i].x2 = 0; 1635 rects[i].y2 = 0; 1636 } 1637 } 1638 1639 /* Determine change to topology due to new atomic state */ 1640 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 1641 new_crtc_state, i) { 1642 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 1643 struct drm_connector *connector; 1644 struct drm_connector_state *conn_state; 1645 struct vmw_connector_state *vmw_conn_state; 1646 1647 if (!du->pref_active && new_crtc_state->enable) { 1648 ret = -EINVAL; 1649 goto clean; 1650 } 1651 1652 /* 1653 * For vmwgfx each crtc has only one connector attached and it 1654 * is not changed so don't really need to check the 1655 * crtc->connector_mask and iterate over it. 1656 */ 1657 connector = &du->connector; 1658 conn_state = drm_atomic_get_connector_state(state, connector); 1659 if (IS_ERR(conn_state)) { 1660 ret = PTR_ERR(conn_state); 1661 goto clean; 1662 } 1663 1664 vmw_conn_state = vmw_connector_state_to_vcs(conn_state); 1665 vmw_conn_state->gui_x = du->gui_x; 1666 vmw_conn_state->gui_y = du->gui_y; 1667 } 1668 1669 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc, 1670 rects); 1671 1672 clean: 1673 kfree(rects); 1674 return ret; 1675 } 1676 1677 /** 1678 * vmw_kms_atomic_check_modeset- validate state object for modeset changes 1679 * 1680 * @dev: DRM device 1681 * @state: the driver state object 1682 * 1683 * This is a simple wrapper around drm_atomic_helper_check_modeset() for 1684 * us to assign a value to mode->crtc_clock so that 1685 * drm_calc_timestamping_constants() won't throw an error message 1686 * 1687 * Returns: 1688 * Zero for success or -errno 1689 */ 1690 static int 1691 vmw_kms_atomic_check_modeset(struct drm_device *dev, 1692 struct drm_atomic_state *state) 1693 { 1694 struct drm_crtc *crtc; 1695 struct drm_crtc_state *crtc_state; 1696 bool need_modeset = false; 1697 int i, ret; 1698 1699 ret = drm_atomic_helper_check(dev, state); 1700 if (ret) 1701 return ret; 1702 1703 ret = vmw_kms_check_implicit(dev, state); 1704 if (ret) 1705 return ret; 1706 1707 if (!state->allow_modeset) 1708 return ret; 1709 1710 /* 1711 * Legacy path do not set allow_modeset properly like 1712 * @drm_atomic_helper_update_plane, This will result in unnecessary call 1713 * to vmw_kms_check_topology. So extra set of check. 1714 */ 1715 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1716 if (drm_atomic_crtc_needs_modeset(crtc_state)) 1717 need_modeset = true; 1718 } 1719 1720 if (need_modeset) 1721 return vmw_kms_check_topology(dev, state); 1722 1723 return ret; 1724 } 1725 1726 static const struct drm_mode_config_funcs vmw_kms_funcs = { 1727 .fb_create = vmw_kms_fb_create, 1728 .atomic_check = vmw_kms_atomic_check_modeset, 1729 .atomic_commit = drm_atomic_helper_commit, 1730 }; 1731 1732 static int vmw_kms_generic_present(struct vmw_private *dev_priv, 1733 struct drm_file *file_priv, 1734 struct vmw_framebuffer *vfb, 1735 struct vmw_surface *surface, 1736 uint32_t sid, 1737 int32_t destX, int32_t destY, 1738 struct drm_vmw_rect *clips, 1739 uint32_t num_clips) 1740 { 1741 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips, 1742 &surface->res, destX, destY, 1743 num_clips, 1, NULL, NULL); 1744 } 1745 1746 1747 int vmw_kms_present(struct vmw_private *dev_priv, 1748 struct drm_file *file_priv, 1749 struct vmw_framebuffer *vfb, 1750 struct vmw_surface *surface, 1751 uint32_t sid, 1752 int32_t destX, int32_t destY, 1753 struct drm_vmw_rect *clips, 1754 uint32_t num_clips) 1755 { 1756 int ret; 1757 1758 switch (dev_priv->active_display_unit) { 1759 case vmw_du_screen_target: 1760 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips, 1761 &surface->res, destX, destY, 1762 num_clips, 1, NULL, NULL); 1763 break; 1764 case vmw_du_screen_object: 1765 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface, 1766 sid, destX, destY, clips, 1767 num_clips); 1768 break; 1769 default: 1770 WARN_ONCE(true, 1771 "Present called with invalid display system.\n"); 1772 ret = -ENOSYS; 1773 break; 1774 } 1775 if (ret) 1776 return ret; 1777 1778 vmw_fifo_flush(dev_priv, false); 1779 1780 return 0; 1781 } 1782 1783 static void 1784 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv) 1785 { 1786 if (dev_priv->hotplug_mode_update_property) 1787 return; 1788 1789 dev_priv->hotplug_mode_update_property = 1790 drm_property_create_range(dev_priv->dev, 1791 DRM_MODE_PROP_IMMUTABLE, 1792 "hotplug_mode_update", 0, 1); 1793 1794 if (!dev_priv->hotplug_mode_update_property) 1795 return; 1796 1797 } 1798 1799 int vmw_kms_init(struct vmw_private *dev_priv) 1800 { 1801 struct drm_device *dev = dev_priv->dev; 1802 int ret; 1803 1804 drm_mode_config_init(dev); 1805 dev->mode_config.funcs = &vmw_kms_funcs; 1806 dev->mode_config.min_width = 1; 1807 dev->mode_config.min_height = 1; 1808 dev->mode_config.max_width = dev_priv->texture_max_width; 1809 dev->mode_config.max_height = dev_priv->texture_max_height; 1810 1811 drm_mode_create_suggested_offset_properties(dev); 1812 vmw_kms_create_hotplug_mode_update_property(dev_priv); 1813 1814 ret = vmw_kms_stdu_init_display(dev_priv); 1815 if (ret) { 1816 ret = vmw_kms_sou_init_display(dev_priv); 1817 if (ret) /* Fallback */ 1818 ret = vmw_kms_ldu_init_display(dev_priv); 1819 } 1820 1821 return ret; 1822 } 1823 1824 int vmw_kms_close(struct vmw_private *dev_priv) 1825 { 1826 int ret = 0; 1827 1828 /* 1829 * Docs says we should take the lock before calling this function 1830 * but since it destroys encoders and our destructor calls 1831 * drm_encoder_cleanup which takes the lock we deadlock. 1832 */ 1833 drm_mode_config_cleanup(dev_priv->dev); 1834 if (dev_priv->active_display_unit == vmw_du_legacy) 1835 ret = vmw_kms_ldu_close_display(dev_priv); 1836 1837 return ret; 1838 } 1839 1840 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 1841 struct drm_file *file_priv) 1842 { 1843 struct drm_vmw_cursor_bypass_arg *arg = data; 1844 struct vmw_display_unit *du; 1845 struct drm_crtc *crtc; 1846 int ret = 0; 1847 1848 1849 mutex_lock(&dev->mode_config.mutex); 1850 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { 1851 1852 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1853 du = vmw_crtc_to_du(crtc); 1854 du->hotspot_x = arg->xhot; 1855 du->hotspot_y = arg->yhot; 1856 } 1857 1858 mutex_unlock(&dev->mode_config.mutex); 1859 return 0; 1860 } 1861 1862 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id); 1863 if (!crtc) { 1864 ret = -ENOENT; 1865 goto out; 1866 } 1867 1868 du = vmw_crtc_to_du(crtc); 1869 1870 du->hotspot_x = arg->xhot; 1871 du->hotspot_y = arg->yhot; 1872 1873 out: 1874 mutex_unlock(&dev->mode_config.mutex); 1875 1876 return ret; 1877 } 1878 1879 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 1880 unsigned width, unsigned height, unsigned pitch, 1881 unsigned bpp, unsigned depth) 1882 { 1883 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1884 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); 1885 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1886 vmw_mmio_write(pitch, vmw_priv->mmio_virt + 1887 SVGA_FIFO_PITCHLOCK); 1888 vmw_write(vmw_priv, SVGA_REG_WIDTH, width); 1889 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); 1890 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp); 1891 1892 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) { 1893 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n", 1894 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH)); 1895 return -EINVAL; 1896 } 1897 1898 return 0; 1899 } 1900 1901 int vmw_kms_save_vga(struct vmw_private *vmw_priv) 1902 { 1903 struct vmw_vga_topology_state *save; 1904 uint32_t i; 1905 1906 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); 1907 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); 1908 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); 1909 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1910 vmw_priv->vga_pitchlock = 1911 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); 1912 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1913 vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt + 1914 SVGA_FIFO_PITCHLOCK); 1915 1916 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1917 return 0; 1918 1919 vmw_priv->num_displays = vmw_read(vmw_priv, 1920 SVGA_REG_NUM_GUEST_DISPLAYS); 1921 1922 if (vmw_priv->num_displays == 0) 1923 vmw_priv->num_displays = 1; 1924 1925 for (i = 0; i < vmw_priv->num_displays; ++i) { 1926 save = &vmw_priv->vga_save[i]; 1927 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); 1928 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY); 1929 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X); 1930 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y); 1931 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); 1932 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); 1933 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 1934 if (i == 0 && vmw_priv->num_displays == 1 && 1935 save->width == 0 && save->height == 0) { 1936 1937 /* 1938 * It should be fairly safe to assume that these 1939 * values are uninitialized. 1940 */ 1941 1942 save->width = vmw_priv->vga_width - save->pos_x; 1943 save->height = vmw_priv->vga_height - save->pos_y; 1944 } 1945 } 1946 1947 return 0; 1948 } 1949 1950 int vmw_kms_restore_vga(struct vmw_private *vmw_priv) 1951 { 1952 struct vmw_vga_topology_state *save; 1953 uint32_t i; 1954 1955 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); 1956 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); 1957 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); 1958 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1959 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, 1960 vmw_priv->vga_pitchlock); 1961 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1962 vmw_mmio_write(vmw_priv->vga_pitchlock, 1963 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); 1964 1965 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1966 return 0; 1967 1968 for (i = 0; i < vmw_priv->num_displays; ++i) { 1969 save = &vmw_priv->vga_save[i]; 1970 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); 1971 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary); 1972 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x); 1973 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y); 1974 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width); 1975 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height); 1976 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 1977 } 1978 1979 return 0; 1980 } 1981 1982 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 1983 uint32_t pitch, 1984 uint32_t height) 1985 { 1986 return ((u64) pitch * (u64) height) < (u64) 1987 ((dev_priv->active_display_unit == vmw_du_screen_target) ? 1988 dev_priv->prim_bb_mem : dev_priv->vram_size); 1989 } 1990 1991 1992 /** 1993 * Function called by DRM code called with vbl_lock held. 1994 */ 1995 u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 1996 { 1997 return 0; 1998 } 1999 2000 /** 2001 * Function called by DRM code called with vbl_lock held. 2002 */ 2003 int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe) 2004 { 2005 return -EINVAL; 2006 } 2007 2008 /** 2009 * Function called by DRM code called with vbl_lock held. 2010 */ 2011 void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe) 2012 { 2013 } 2014 2015 /** 2016 * vmw_du_update_layout - Update the display unit with topology from resolution 2017 * plugin and generate DRM uevent 2018 * @dev_priv: device private 2019 * @num_rects: number of drm_rect in rects 2020 * @rects: toplogy to update 2021 */ 2022 static int vmw_du_update_layout(struct vmw_private *dev_priv, 2023 unsigned int num_rects, struct drm_rect *rects) 2024 { 2025 struct drm_device *dev = dev_priv->dev; 2026 struct vmw_display_unit *du; 2027 struct drm_connector *con; 2028 struct drm_connector_list_iter conn_iter; 2029 struct drm_modeset_acquire_ctx ctx; 2030 struct drm_crtc *crtc; 2031 int ret; 2032 2033 /* Currently gui_x/y is protected with the crtc mutex */ 2034 mutex_lock(&dev->mode_config.mutex); 2035 drm_modeset_acquire_init(&ctx, 0); 2036 retry: 2037 drm_for_each_crtc(crtc, dev) { 2038 ret = drm_modeset_lock(&crtc->mutex, &ctx); 2039 if (ret < 0) { 2040 if (ret == -EDEADLK) { 2041 drm_modeset_backoff(&ctx); 2042 goto retry; 2043 } 2044 goto out_fini; 2045 } 2046 } 2047 2048 drm_connector_list_iter_begin(dev, &conn_iter); 2049 drm_for_each_connector_iter(con, &conn_iter) { 2050 du = vmw_connector_to_du(con); 2051 if (num_rects > du->unit) { 2052 du->pref_width = drm_rect_width(&rects[du->unit]); 2053 du->pref_height = drm_rect_height(&rects[du->unit]); 2054 du->pref_active = true; 2055 du->gui_x = rects[du->unit].x1; 2056 du->gui_y = rects[du->unit].y1; 2057 } else { 2058 du->pref_width = 800; 2059 du->pref_height = 600; 2060 du->pref_active = false; 2061 du->gui_x = 0; 2062 du->gui_y = 0; 2063 } 2064 } 2065 drm_connector_list_iter_end(&conn_iter); 2066 2067 list_for_each_entry(con, &dev->mode_config.connector_list, head) { 2068 du = vmw_connector_to_du(con); 2069 if (num_rects > du->unit) { 2070 drm_object_property_set_value 2071 (&con->base, dev->mode_config.suggested_x_property, 2072 du->gui_x); 2073 drm_object_property_set_value 2074 (&con->base, dev->mode_config.suggested_y_property, 2075 du->gui_y); 2076 } else { 2077 drm_object_property_set_value 2078 (&con->base, dev->mode_config.suggested_x_property, 2079 0); 2080 drm_object_property_set_value 2081 (&con->base, dev->mode_config.suggested_y_property, 2082 0); 2083 } 2084 con->status = vmw_du_connector_detect(con, true); 2085 } 2086 2087 drm_sysfs_hotplug_event(dev); 2088 out_fini: 2089 drm_modeset_drop_locks(&ctx); 2090 drm_modeset_acquire_fini(&ctx); 2091 mutex_unlock(&dev->mode_config.mutex); 2092 2093 return 0; 2094 } 2095 2096 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 2097 u16 *r, u16 *g, u16 *b, 2098 uint32_t size, 2099 struct drm_modeset_acquire_ctx *ctx) 2100 { 2101 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 2102 int i; 2103 2104 for (i = 0; i < size; i++) { 2105 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i, 2106 r[i], g[i], b[i]); 2107 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8); 2108 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); 2109 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); 2110 } 2111 2112 return 0; 2113 } 2114 2115 int vmw_du_connector_dpms(struct drm_connector *connector, int mode) 2116 { 2117 return 0; 2118 } 2119 2120 enum drm_connector_status 2121 vmw_du_connector_detect(struct drm_connector *connector, bool force) 2122 { 2123 uint32_t num_displays; 2124 struct drm_device *dev = connector->dev; 2125 struct vmw_private *dev_priv = vmw_priv(dev); 2126 struct vmw_display_unit *du = vmw_connector_to_du(connector); 2127 2128 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); 2129 2130 return ((vmw_connector_to_du(connector)->unit < num_displays && 2131 du->pref_active) ? 2132 connector_status_connected : connector_status_disconnected); 2133 } 2134 2135 static struct drm_display_mode vmw_kms_connector_builtin[] = { 2136 /* 640x480@60Hz */ 2137 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 2138 752, 800, 0, 480, 489, 492, 525, 0, 2139 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2140 /* 800x600@60Hz */ 2141 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 2142 968, 1056, 0, 600, 601, 605, 628, 0, 2143 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2144 /* 1024x768@60Hz */ 2145 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 2146 1184, 1344, 0, 768, 771, 777, 806, 0, 2147 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2148 /* 1152x864@75Hz */ 2149 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 2150 1344, 1600, 0, 864, 865, 868, 900, 0, 2151 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2152 /* 1280x768@60Hz */ 2153 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 2154 1472, 1664, 0, 768, 771, 778, 798, 0, 2155 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2156 /* 1280x800@60Hz */ 2157 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, 2158 1480, 1680, 0, 800, 803, 809, 831, 0, 2159 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2160 /* 1280x960@60Hz */ 2161 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, 2162 1488, 1800, 0, 960, 961, 964, 1000, 0, 2163 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2164 /* 1280x1024@60Hz */ 2165 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, 2166 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, 2167 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2168 /* 1360x768@60Hz */ 2169 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, 2170 1536, 1792, 0, 768, 771, 777, 795, 0, 2171 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2172 /* 1440x1050@60Hz */ 2173 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, 2174 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, 2175 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2176 /* 1440x900@60Hz */ 2177 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, 2178 1672, 1904, 0, 900, 903, 909, 934, 0, 2179 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2180 /* 1600x1200@60Hz */ 2181 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, 2182 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, 2183 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2184 /* 1680x1050@60Hz */ 2185 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, 2186 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, 2187 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2188 /* 1792x1344@60Hz */ 2189 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, 2190 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, 2191 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2192 /* 1853x1392@60Hz */ 2193 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 2194 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, 2195 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2196 /* 1920x1200@60Hz */ 2197 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 2198 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, 2199 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2200 /* 1920x1440@60Hz */ 2201 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 2202 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, 2203 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2204 /* 2560x1600@60Hz */ 2205 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 2206 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, 2207 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2208 /* Terminate */ 2209 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, 2210 }; 2211 2212 /** 2213 * vmw_guess_mode_timing - Provide fake timings for a 2214 * 60Hz vrefresh mode. 2215 * 2216 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay 2217 * members filled in. 2218 */ 2219 void vmw_guess_mode_timing(struct drm_display_mode *mode) 2220 { 2221 mode->hsync_start = mode->hdisplay + 50; 2222 mode->hsync_end = mode->hsync_start + 50; 2223 mode->htotal = mode->hsync_end + 50; 2224 2225 mode->vsync_start = mode->vdisplay + 50; 2226 mode->vsync_end = mode->vsync_start + 50; 2227 mode->vtotal = mode->vsync_end + 50; 2228 2229 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6; 2230 mode->vrefresh = drm_mode_vrefresh(mode); 2231 } 2232 2233 2234 int vmw_du_connector_fill_modes(struct drm_connector *connector, 2235 uint32_t max_width, uint32_t max_height) 2236 { 2237 struct vmw_display_unit *du = vmw_connector_to_du(connector); 2238 struct drm_device *dev = connector->dev; 2239 struct vmw_private *dev_priv = vmw_priv(dev); 2240 struct drm_display_mode *mode = NULL; 2241 struct drm_display_mode *bmode; 2242 struct drm_display_mode prefmode = { DRM_MODE("preferred", 2243 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 2244 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2245 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) 2246 }; 2247 int i; 2248 u32 assumed_bpp = 4; 2249 2250 if (dev_priv->assume_16bpp) 2251 assumed_bpp = 2; 2252 2253 max_width = min(max_width, dev_priv->texture_max_width); 2254 max_height = min(max_height, dev_priv->texture_max_height); 2255 2256 /* 2257 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/ 2258 * HEIGHT registers. 2259 */ 2260 if (dev_priv->active_display_unit == vmw_du_screen_target) { 2261 max_width = min(max_width, dev_priv->stdu_max_width); 2262 max_height = min(max_height, dev_priv->stdu_max_height); 2263 } 2264 2265 /* Add preferred mode */ 2266 mode = drm_mode_duplicate(dev, &prefmode); 2267 if (!mode) 2268 return 0; 2269 mode->hdisplay = du->pref_width; 2270 mode->vdisplay = du->pref_height; 2271 vmw_guess_mode_timing(mode); 2272 2273 if (vmw_kms_validate_mode_vram(dev_priv, 2274 mode->hdisplay * assumed_bpp, 2275 mode->vdisplay)) { 2276 drm_mode_probed_add(connector, mode); 2277 } else { 2278 drm_mode_destroy(dev, mode); 2279 mode = NULL; 2280 } 2281 2282 if (du->pref_mode) { 2283 list_del_init(&du->pref_mode->head); 2284 drm_mode_destroy(dev, du->pref_mode); 2285 } 2286 2287 /* mode might be null here, this is intended */ 2288 du->pref_mode = mode; 2289 2290 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { 2291 bmode = &vmw_kms_connector_builtin[i]; 2292 if (bmode->hdisplay > max_width || 2293 bmode->vdisplay > max_height) 2294 continue; 2295 2296 if (!vmw_kms_validate_mode_vram(dev_priv, 2297 bmode->hdisplay * assumed_bpp, 2298 bmode->vdisplay)) 2299 continue; 2300 2301 mode = drm_mode_duplicate(dev, bmode); 2302 if (!mode) 2303 return 0; 2304 mode->vrefresh = drm_mode_vrefresh(mode); 2305 2306 drm_mode_probed_add(connector, mode); 2307 } 2308 2309 drm_connector_list_update(connector); 2310 /* Move the prefered mode first, help apps pick the right mode. */ 2311 drm_mode_sort(&connector->modes); 2312 2313 return 1; 2314 } 2315 2316 /** 2317 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl 2318 * @dev: drm device for the ioctl 2319 * @data: data pointer for the ioctl 2320 * @file_priv: drm file for the ioctl call 2321 * 2322 * Update preferred topology of display unit as per ioctl request. The topology 2323 * is expressed as array of drm_vmw_rect. 2324 * e.g. 2325 * [0 0 640 480] [640 0 800 600] [0 480 640 480] 2326 * 2327 * NOTE: 2328 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside 2329 * device limit on topology, x + w and y + h (lower right) cannot be greater 2330 * than INT_MAX. So topology beyond these limits will return with error. 2331 * 2332 * Returns: 2333 * Zero on success, negative errno on failure. 2334 */ 2335 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 2336 struct drm_file *file_priv) 2337 { 2338 struct vmw_private *dev_priv = vmw_priv(dev); 2339 struct drm_mode_config *mode_config = &dev->mode_config; 2340 struct drm_vmw_update_layout_arg *arg = 2341 (struct drm_vmw_update_layout_arg *)data; 2342 void __user *user_rects; 2343 struct drm_vmw_rect *rects; 2344 struct drm_rect *drm_rects; 2345 unsigned rects_size; 2346 int ret, i; 2347 2348 if (!arg->num_outputs) { 2349 struct drm_rect def_rect = {0, 0, 800, 600}; 2350 vmw_du_update_layout(dev_priv, 1, &def_rect); 2351 return 0; 2352 } 2353 2354 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 2355 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), 2356 GFP_KERNEL); 2357 if (unlikely(!rects)) 2358 return -ENOMEM; 2359 2360 user_rects = (void __user *)(unsigned long)arg->rects; 2361 ret = copy_from_user(rects, user_rects, rects_size); 2362 if (unlikely(ret != 0)) { 2363 DRM_ERROR("Failed to get rects.\n"); 2364 ret = -EFAULT; 2365 goto out_free; 2366 } 2367 2368 drm_rects = (struct drm_rect *)rects; 2369 2370 for (i = 0; i < arg->num_outputs; i++) { 2371 struct drm_vmw_rect curr_rect; 2372 2373 /* Verify user-space for overflow as kernel use drm_rect */ 2374 if ((rects[i].x + rects[i].w > INT_MAX) || 2375 (rects[i].y + rects[i].h > INT_MAX)) { 2376 ret = -ERANGE; 2377 goto out_free; 2378 } 2379 2380 curr_rect = rects[i]; 2381 drm_rects[i].x1 = curr_rect.x; 2382 drm_rects[i].y1 = curr_rect.y; 2383 drm_rects[i].x2 = curr_rect.x + curr_rect.w; 2384 drm_rects[i].y2 = curr_rect.y + curr_rect.h; 2385 2386 /* 2387 * Currently this check is limiting the topology within 2388 * mode_config->max (which actually is max texture size 2389 * supported by virtual device). This limit is here to address 2390 * window managers that create a big framebuffer for whole 2391 * topology. 2392 */ 2393 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 || 2394 drm_rects[i].x2 > mode_config->max_width || 2395 drm_rects[i].y2 > mode_config->max_height) { 2396 DRM_ERROR("Invalid GUI layout.\n"); 2397 ret = -EINVAL; 2398 goto out_free; 2399 } 2400 } 2401 2402 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); 2403 2404 if (ret == 0) 2405 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects); 2406 2407 out_free: 2408 kfree(rects); 2409 return ret; 2410 } 2411 2412 /** 2413 * vmw_kms_helper_dirty - Helper to build commands and perform actions based 2414 * on a set of cliprects and a set of display units. 2415 * 2416 * @dev_priv: Pointer to a device private structure. 2417 * @framebuffer: Pointer to the framebuffer on which to perform the actions. 2418 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL. 2419 * Cliprects are given in framebuffer coordinates. 2420 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must 2421 * be NULL. Cliprects are given in source coordinates. 2422 * @dest_x: X coordinate offset for the crtc / destination clip rects. 2423 * @dest_y: Y coordinate offset for the crtc / destination clip rects. 2424 * @num_clips: Number of cliprects in the @clips or @vclips array. 2425 * @increment: Integer with which to increment the clip counter when looping. 2426 * Used to skip a predetermined number of clip rects. 2427 * @dirty: Closure structure. See the description of struct vmw_kms_dirty. 2428 */ 2429 int vmw_kms_helper_dirty(struct vmw_private *dev_priv, 2430 struct vmw_framebuffer *framebuffer, 2431 const struct drm_clip_rect *clips, 2432 const struct drm_vmw_rect *vclips, 2433 s32 dest_x, s32 dest_y, 2434 int num_clips, 2435 int increment, 2436 struct vmw_kms_dirty *dirty) 2437 { 2438 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 2439 struct drm_crtc *crtc; 2440 u32 num_units = 0; 2441 u32 i, k; 2442 2443 dirty->dev_priv = dev_priv; 2444 2445 /* If crtc is passed, no need to iterate over other display units */ 2446 if (dirty->crtc) { 2447 units[num_units++] = vmw_crtc_to_du(dirty->crtc); 2448 } else { 2449 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, 2450 head) { 2451 struct drm_plane *plane = crtc->primary; 2452 2453 if (plane->state->fb == &framebuffer->base) 2454 units[num_units++] = vmw_crtc_to_du(crtc); 2455 } 2456 } 2457 2458 for (k = 0; k < num_units; k++) { 2459 struct vmw_display_unit *unit = units[k]; 2460 s32 crtc_x = unit->crtc.x; 2461 s32 crtc_y = unit->crtc.y; 2462 s32 crtc_width = unit->crtc.mode.hdisplay; 2463 s32 crtc_height = unit->crtc.mode.vdisplay; 2464 const struct drm_clip_rect *clips_ptr = clips; 2465 const struct drm_vmw_rect *vclips_ptr = vclips; 2466 2467 dirty->unit = unit; 2468 if (dirty->fifo_reserve_size > 0) { 2469 dirty->cmd = VMW_FIFO_RESERVE(dev_priv, 2470 dirty->fifo_reserve_size); 2471 if (!dirty->cmd) 2472 return -ENOMEM; 2473 2474 memset(dirty->cmd, 0, dirty->fifo_reserve_size); 2475 } 2476 dirty->num_hits = 0; 2477 for (i = 0; i < num_clips; i++, clips_ptr += increment, 2478 vclips_ptr += increment) { 2479 s32 clip_left; 2480 s32 clip_top; 2481 2482 /* 2483 * Select clip array type. Note that integer type 2484 * in @clips is unsigned short, whereas in @vclips 2485 * it's 32-bit. 2486 */ 2487 if (clips) { 2488 dirty->fb_x = (s32) clips_ptr->x1; 2489 dirty->fb_y = (s32) clips_ptr->y1; 2490 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x - 2491 crtc_x; 2492 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y - 2493 crtc_y; 2494 } else { 2495 dirty->fb_x = vclips_ptr->x; 2496 dirty->fb_y = vclips_ptr->y; 2497 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w + 2498 dest_x - crtc_x; 2499 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h + 2500 dest_y - crtc_y; 2501 } 2502 2503 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x; 2504 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y; 2505 2506 /* Skip this clip if it's outside the crtc region */ 2507 if (dirty->unit_x1 >= crtc_width || 2508 dirty->unit_y1 >= crtc_height || 2509 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0) 2510 continue; 2511 2512 /* Clip right and bottom to crtc limits */ 2513 dirty->unit_x2 = min_t(s32, dirty->unit_x2, 2514 crtc_width); 2515 dirty->unit_y2 = min_t(s32, dirty->unit_y2, 2516 crtc_height); 2517 2518 /* Clip left and top to crtc limits */ 2519 clip_left = min_t(s32, dirty->unit_x1, 0); 2520 clip_top = min_t(s32, dirty->unit_y1, 0); 2521 dirty->unit_x1 -= clip_left; 2522 dirty->unit_y1 -= clip_top; 2523 dirty->fb_x -= clip_left; 2524 dirty->fb_y -= clip_top; 2525 2526 dirty->clip(dirty); 2527 } 2528 2529 dirty->fifo_commit(dirty); 2530 } 2531 2532 return 0; 2533 } 2534 2535 /** 2536 * vmw_kms_helper_validation_finish - Helper for post KMS command submission 2537 * cleanup and fencing 2538 * @dev_priv: Pointer to the device-private struct 2539 * @file_priv: Pointer identifying the client when user-space fencing is used 2540 * @ctx: Pointer to the validation context 2541 * @out_fence: If non-NULL, returned refcounted fence-pointer 2542 * @user_fence_rep: If non-NULL, pointer to user-space address area 2543 * in which to copy user-space fence info 2544 */ 2545 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, 2546 struct drm_file *file_priv, 2547 struct vmw_validation_context *ctx, 2548 struct vmw_fence_obj **out_fence, 2549 struct drm_vmw_fence_rep __user * 2550 user_fence_rep) 2551 { 2552 struct vmw_fence_obj *fence = NULL; 2553 uint32_t handle = 0; 2554 int ret = 0; 2555 2556 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || 2557 out_fence) 2558 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, 2559 file_priv ? &handle : NULL); 2560 vmw_validation_done(ctx, fence); 2561 if (file_priv) 2562 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), 2563 ret, user_fence_rep, fence, 2564 handle, -1, NULL); 2565 if (out_fence) 2566 *out_fence = fence; 2567 else 2568 vmw_fence_obj_unreference(&fence); 2569 } 2570 2571 /** 2572 * vmw_kms_update_proxy - Helper function to update a proxy surface from 2573 * its backing MOB. 2574 * 2575 * @res: Pointer to the surface resource 2576 * @clips: Clip rects in framebuffer (surface) space. 2577 * @num_clips: Number of clips in @clips. 2578 * @increment: Integer with which to increment the clip counter when looping. 2579 * Used to skip a predetermined number of clip rects. 2580 * 2581 * This function makes sure the proxy surface is updated from its backing MOB 2582 * using the region given by @clips. The surface resource @res and its backing 2583 * MOB needs to be reserved and validated on call. 2584 */ 2585 int vmw_kms_update_proxy(struct vmw_resource *res, 2586 const struct drm_clip_rect *clips, 2587 unsigned num_clips, 2588 int increment) 2589 { 2590 struct vmw_private *dev_priv = res->dev_priv; 2591 struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size; 2592 struct { 2593 SVGA3dCmdHeader header; 2594 SVGA3dCmdUpdateGBImage body; 2595 } *cmd; 2596 SVGA3dBox *box; 2597 size_t copy_size = 0; 2598 int i; 2599 2600 if (!clips) 2601 return 0; 2602 2603 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips); 2604 if (!cmd) 2605 return -ENOMEM; 2606 2607 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) { 2608 box = &cmd->body.box; 2609 2610 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; 2611 cmd->header.size = sizeof(cmd->body); 2612 cmd->body.image.sid = res->id; 2613 cmd->body.image.face = 0; 2614 cmd->body.image.mipmap = 0; 2615 2616 if (clips->x1 > size->width || clips->x2 > size->width || 2617 clips->y1 > size->height || clips->y2 > size->height) { 2618 DRM_ERROR("Invalid clips outsize of framebuffer.\n"); 2619 return -EINVAL; 2620 } 2621 2622 box->x = clips->x1; 2623 box->y = clips->y1; 2624 box->z = 0; 2625 box->w = clips->x2 - clips->x1; 2626 box->h = clips->y2 - clips->y1; 2627 box->d = 1; 2628 2629 copy_size += sizeof(*cmd); 2630 } 2631 2632 vmw_fifo_commit(dev_priv, copy_size); 2633 2634 return 0; 2635 } 2636 2637 int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, 2638 unsigned unit, 2639 u32 max_width, 2640 u32 max_height, 2641 struct drm_connector **p_con, 2642 struct drm_crtc **p_crtc, 2643 struct drm_display_mode **p_mode) 2644 { 2645 struct drm_connector *con; 2646 struct vmw_display_unit *du; 2647 struct drm_display_mode *mode; 2648 int i = 0; 2649 int ret = 0; 2650 2651 mutex_lock(&dev_priv->dev->mode_config.mutex); 2652 list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list, 2653 head) { 2654 if (i == unit) 2655 break; 2656 2657 ++i; 2658 } 2659 2660 if (i != unit) { 2661 DRM_ERROR("Could not find initial display unit.\n"); 2662 ret = -EINVAL; 2663 goto out_unlock; 2664 } 2665 2666 if (list_empty(&con->modes)) 2667 (void) vmw_du_connector_fill_modes(con, max_width, max_height); 2668 2669 if (list_empty(&con->modes)) { 2670 DRM_ERROR("Could not find initial display mode.\n"); 2671 ret = -EINVAL; 2672 goto out_unlock; 2673 } 2674 2675 du = vmw_connector_to_du(con); 2676 *p_con = con; 2677 *p_crtc = &du->crtc; 2678 2679 list_for_each_entry(mode, &con->modes, head) { 2680 if (mode->type & DRM_MODE_TYPE_PREFERRED) 2681 break; 2682 } 2683 2684 if (mode->type & DRM_MODE_TYPE_PREFERRED) 2685 *p_mode = mode; 2686 else { 2687 WARN_ONCE(true, "Could not find initial preferred mode.\n"); 2688 *p_mode = list_first_entry(&con->modes, 2689 struct drm_display_mode, 2690 head); 2691 } 2692 2693 out_unlock: 2694 mutex_unlock(&dev_priv->dev->mode_config.mutex); 2695 2696 return ret; 2697 } 2698 2699 /** 2700 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement 2701 * property. 2702 * 2703 * @dev_priv: Pointer to a device private struct. 2704 * 2705 * Sets up the implicit placement property unless it's already set up. 2706 */ 2707 void 2708 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv) 2709 { 2710 if (dev_priv->implicit_placement_property) 2711 return; 2712 2713 dev_priv->implicit_placement_property = 2714 drm_property_create_range(dev_priv->dev, 2715 DRM_MODE_PROP_IMMUTABLE, 2716 "implicit_placement", 0, 1); 2717 } 2718 2719 /** 2720 * vmw_kms_suspend - Save modesetting state and turn modesetting off. 2721 * 2722 * @dev: Pointer to the drm device 2723 * Return: 0 on success. Negative error code on failure. 2724 */ 2725 int vmw_kms_suspend(struct drm_device *dev) 2726 { 2727 struct vmw_private *dev_priv = vmw_priv(dev); 2728 2729 dev_priv->suspend_state = drm_atomic_helper_suspend(dev); 2730 if (IS_ERR(dev_priv->suspend_state)) { 2731 int ret = PTR_ERR(dev_priv->suspend_state); 2732 2733 DRM_ERROR("Failed kms suspend: %d\n", ret); 2734 dev_priv->suspend_state = NULL; 2735 2736 return ret; 2737 } 2738 2739 return 0; 2740 } 2741 2742 2743 /** 2744 * vmw_kms_resume - Re-enable modesetting and restore state 2745 * 2746 * @dev: Pointer to the drm device 2747 * Return: 0 on success. Negative error code on failure. 2748 * 2749 * State is resumed from a previous vmw_kms_suspend(). It's illegal 2750 * to call this function without a previous vmw_kms_suspend(). 2751 */ 2752 int vmw_kms_resume(struct drm_device *dev) 2753 { 2754 struct vmw_private *dev_priv = vmw_priv(dev); 2755 int ret; 2756 2757 if (WARN_ON(!dev_priv->suspend_state)) 2758 return 0; 2759 2760 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state); 2761 dev_priv->suspend_state = NULL; 2762 2763 return ret; 2764 } 2765 2766 /** 2767 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost 2768 * 2769 * @dev: Pointer to the drm device 2770 */ 2771 void vmw_kms_lost_device(struct drm_device *dev) 2772 { 2773 drm_atomic_helper_shutdown(dev); 2774 } 2775 2776 /** 2777 * vmw_du_helper_plane_update - Helper to do plane update on a display unit. 2778 * @update: The closure structure. 2779 * 2780 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane 2781 * update on display unit. 2782 * 2783 * Return: 0 on success or a negative error code on failure. 2784 */ 2785 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update) 2786 { 2787 struct drm_plane_state *state = update->plane->state; 2788 struct drm_plane_state *old_state = update->old_state; 2789 struct drm_atomic_helper_damage_iter iter; 2790 struct drm_rect clip; 2791 struct drm_rect bb; 2792 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 2793 uint32_t reserved_size = 0; 2794 uint32_t submit_size = 0; 2795 uint32_t curr_size = 0; 2796 uint32_t num_hits = 0; 2797 void *cmd_start; 2798 char *cmd_next; 2799 int ret; 2800 2801 /* 2802 * Iterate in advance to check if really need plane update and find the 2803 * number of clips that actually are in plane src for fifo allocation. 2804 */ 2805 drm_atomic_helper_damage_iter_init(&iter, old_state, state); 2806 drm_atomic_for_each_plane_damage(&iter, &clip) 2807 num_hits++; 2808 2809 if (num_hits == 0) 2810 return 0; 2811 2812 if (update->vfb->bo) { 2813 struct vmw_framebuffer_bo *vfbbo = 2814 container_of(update->vfb, typeof(*vfbbo), base); 2815 2816 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false, 2817 update->cpu_blit); 2818 } else { 2819 struct vmw_framebuffer_surface *vfbs = 2820 container_of(update->vfb, typeof(*vfbs), base); 2821 2822 ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res, 2823 0, VMW_RES_DIRTY_NONE, NULL, 2824 NULL); 2825 } 2826 2827 if (ret) 2828 return ret; 2829 2830 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr); 2831 if (ret) 2832 goto out_unref; 2833 2834 reserved_size = update->calc_fifo_size(update, num_hits); 2835 cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size); 2836 if (!cmd_start) { 2837 ret = -ENOMEM; 2838 goto out_revert; 2839 } 2840 2841 cmd_next = cmd_start; 2842 2843 if (update->post_prepare) { 2844 curr_size = update->post_prepare(update, cmd_next); 2845 cmd_next += curr_size; 2846 submit_size += curr_size; 2847 } 2848 2849 if (update->pre_clip) { 2850 curr_size = update->pre_clip(update, cmd_next, num_hits); 2851 cmd_next += curr_size; 2852 submit_size += curr_size; 2853 } 2854 2855 bb.x1 = INT_MAX; 2856 bb.y1 = INT_MAX; 2857 bb.x2 = INT_MIN; 2858 bb.y2 = INT_MIN; 2859 2860 drm_atomic_helper_damage_iter_init(&iter, old_state, state); 2861 drm_atomic_for_each_plane_damage(&iter, &clip) { 2862 uint32_t fb_x = clip.x1; 2863 uint32_t fb_y = clip.y1; 2864 2865 vmw_du_translate_to_crtc(state, &clip); 2866 if (update->clip) { 2867 curr_size = update->clip(update, cmd_next, &clip, fb_x, 2868 fb_y); 2869 cmd_next += curr_size; 2870 submit_size += curr_size; 2871 } 2872 bb.x1 = min_t(int, bb.x1, clip.x1); 2873 bb.y1 = min_t(int, bb.y1, clip.y1); 2874 bb.x2 = max_t(int, bb.x2, clip.x2); 2875 bb.y2 = max_t(int, bb.y2, clip.y2); 2876 } 2877 2878 curr_size = update->post_clip(update, cmd_next, &bb); 2879 submit_size += curr_size; 2880 2881 if (reserved_size < submit_size) 2882 submit_size = 0; 2883 2884 vmw_fifo_commit(update->dev_priv, submit_size); 2885 2886 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx, 2887 update->out_fence, NULL); 2888 return ret; 2889 2890 out_revert: 2891 vmw_validation_revert(&val_ctx); 2892 2893 out_unref: 2894 vmw_validation_unref_lists(&val_ctx); 2895 return ret; 2896 } 2897