1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_kms.h" 29 #include <drm/drm_plane_helper.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_atomic_helper.h> 32 #include <drm/drm_rect.h> 33 #include <drm/drm_damage_helper.h> 34 35 /* Might need a hrtimer here? */ 36 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 37 38 void vmw_du_cleanup(struct vmw_display_unit *du) 39 { 40 drm_plane_cleanup(&du->primary); 41 drm_plane_cleanup(&du->cursor); 42 43 drm_connector_unregister(&du->connector); 44 drm_crtc_cleanup(&du->crtc); 45 drm_encoder_cleanup(&du->encoder); 46 drm_connector_cleanup(&du->connector); 47 } 48 49 /* 50 * Display Unit Cursor functions 51 */ 52 53 static int vmw_cursor_update_image(struct vmw_private *dev_priv, 54 u32 *image, u32 width, u32 height, 55 u32 hotspotX, u32 hotspotY) 56 { 57 struct { 58 u32 cmd; 59 SVGAFifoCmdDefineAlphaCursor cursor; 60 } *cmd; 61 u32 image_size = width * height * 4; 62 u32 cmd_size = sizeof(*cmd) + image_size; 63 64 if (!image) 65 return -EINVAL; 66 67 cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size); 68 if (unlikely(cmd == NULL)) 69 return -ENOMEM; 70 71 memset(cmd, 0, sizeof(*cmd)); 72 73 memcpy(&cmd[1], image, image_size); 74 75 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR; 76 cmd->cursor.id = 0; 77 cmd->cursor.width = width; 78 cmd->cursor.height = height; 79 cmd->cursor.hotspotX = hotspotX; 80 cmd->cursor.hotspotY = hotspotY; 81 82 vmw_fifo_commit_flush(dev_priv, cmd_size); 83 84 return 0; 85 } 86 87 static int vmw_cursor_update_bo(struct vmw_private *dev_priv, 88 struct vmw_buffer_object *bo, 89 u32 width, u32 height, 90 u32 hotspotX, u32 hotspotY) 91 { 92 struct ttm_bo_kmap_obj map; 93 unsigned long kmap_offset; 94 unsigned long kmap_num; 95 void *virtual; 96 bool dummy; 97 int ret; 98 99 kmap_offset = 0; 100 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; 101 102 ret = ttm_bo_reserve(&bo->base, true, false, NULL); 103 if (unlikely(ret != 0)) { 104 DRM_ERROR("reserve failed\n"); 105 return -EINVAL; 106 } 107 108 ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map); 109 if (unlikely(ret != 0)) 110 goto err_unreserve; 111 112 virtual = ttm_kmap_obj_virtual(&map, &dummy); 113 ret = vmw_cursor_update_image(dev_priv, virtual, width, height, 114 hotspotX, hotspotY); 115 116 ttm_bo_kunmap(&map); 117 err_unreserve: 118 ttm_bo_unreserve(&bo->base); 119 120 return ret; 121 } 122 123 124 static void vmw_cursor_update_position(struct vmw_private *dev_priv, 125 bool show, int x, int y) 126 { 127 u32 *fifo_mem = dev_priv->mmio_virt; 128 uint32_t count; 129 130 spin_lock(&dev_priv->cursor_lock); 131 vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); 132 vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X); 133 vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y); 134 count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT); 135 vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); 136 spin_unlock(&dev_priv->cursor_lock); 137 } 138 139 140 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 141 struct ttm_object_file *tfile, 142 struct ttm_buffer_object *bo, 143 SVGA3dCmdHeader *header) 144 { 145 struct ttm_bo_kmap_obj map; 146 unsigned long kmap_offset; 147 unsigned long kmap_num; 148 SVGA3dCopyBox *box; 149 unsigned box_count; 150 void *virtual; 151 bool dummy; 152 struct vmw_dma_cmd { 153 SVGA3dCmdHeader header; 154 SVGA3dCmdSurfaceDMA dma; 155 } *cmd; 156 int i, ret; 157 158 cmd = container_of(header, struct vmw_dma_cmd, header); 159 160 /* No snooper installed */ 161 if (!srf->snooper.image) 162 return; 163 164 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { 165 DRM_ERROR("face and mipmap for cursors should never != 0\n"); 166 return; 167 } 168 169 if (cmd->header.size < 64) { 170 DRM_ERROR("at least one full copy box must be given\n"); 171 return; 172 } 173 174 box = (SVGA3dCopyBox *)&cmd[1]; 175 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / 176 sizeof(SVGA3dCopyBox); 177 178 if (cmd->dma.guest.ptr.offset % PAGE_SIZE || 179 box->x != 0 || box->y != 0 || box->z != 0 || 180 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || 181 box->d != 1 || box_count != 1) { 182 /* TODO handle none page aligned offsets */ 183 /* TODO handle more dst & src != 0 */ 184 /* TODO handle more then one copy */ 185 DRM_ERROR("Cant snoop dma request for cursor!\n"); 186 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", 187 box->srcx, box->srcy, box->srcz, 188 box->x, box->y, box->z, 189 box->w, box->h, box->d, box_count, 190 cmd->dma.guest.ptr.offset); 191 return; 192 } 193 194 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; 195 kmap_num = (64*64*4) >> PAGE_SHIFT; 196 197 ret = ttm_bo_reserve(bo, true, false, NULL); 198 if (unlikely(ret != 0)) { 199 DRM_ERROR("reserve failed\n"); 200 return; 201 } 202 203 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 204 if (unlikely(ret != 0)) 205 goto err_unreserve; 206 207 virtual = ttm_kmap_obj_virtual(&map, &dummy); 208 209 if (box->w == 64 && cmd->dma.guest.pitch == 64*4) { 210 memcpy(srf->snooper.image, virtual, 64*64*4); 211 } else { 212 /* Image is unsigned pointer. */ 213 for (i = 0; i < box->h; i++) 214 memcpy(srf->snooper.image + i * 64, 215 virtual + i * cmd->dma.guest.pitch, 216 box->w * 4); 217 } 218 219 srf->snooper.age++; 220 221 ttm_bo_kunmap(&map); 222 err_unreserve: 223 ttm_bo_unreserve(bo); 224 } 225 226 /** 227 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots 228 * 229 * @dev_priv: Pointer to the device private struct. 230 * 231 * Clears all legacy hotspots. 232 */ 233 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv) 234 { 235 struct drm_device *dev = dev_priv->dev; 236 struct vmw_display_unit *du; 237 struct drm_crtc *crtc; 238 239 drm_modeset_lock_all(dev); 240 drm_for_each_crtc(crtc, dev) { 241 du = vmw_crtc_to_du(crtc); 242 243 du->hotspot_x = 0; 244 du->hotspot_y = 0; 245 } 246 drm_modeset_unlock_all(dev); 247 } 248 249 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) 250 { 251 struct drm_device *dev = dev_priv->dev; 252 struct vmw_display_unit *du; 253 struct drm_crtc *crtc; 254 255 mutex_lock(&dev->mode_config.mutex); 256 257 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 258 du = vmw_crtc_to_du(crtc); 259 if (!du->cursor_surface || 260 du->cursor_age == du->cursor_surface->snooper.age) 261 continue; 262 263 du->cursor_age = du->cursor_surface->snooper.age; 264 vmw_cursor_update_image(dev_priv, 265 du->cursor_surface->snooper.image, 266 64, 64, 267 du->hotspot_x + du->core_hotspot_x, 268 du->hotspot_y + du->core_hotspot_y); 269 } 270 271 mutex_unlock(&dev->mode_config.mutex); 272 } 273 274 275 void vmw_du_cursor_plane_destroy(struct drm_plane *plane) 276 { 277 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); 278 279 drm_plane_cleanup(plane); 280 } 281 282 283 void vmw_du_primary_plane_destroy(struct drm_plane *plane) 284 { 285 drm_plane_cleanup(plane); 286 287 /* Planes are static in our case so we don't free it */ 288 } 289 290 291 /** 292 * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface 293 * 294 * @vps: plane state associated with the display surface 295 * @unreference: true if we also want to unreference the display. 296 */ 297 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps, 298 bool unreference) 299 { 300 if (vps->surf) { 301 if (vps->pinned) { 302 vmw_resource_unpin(&vps->surf->res); 303 vps->pinned--; 304 } 305 306 if (unreference) { 307 if (vps->pinned) 308 DRM_ERROR("Surface still pinned\n"); 309 vmw_surface_unreference(&vps->surf); 310 } 311 } 312 } 313 314 315 /** 316 * vmw_du_plane_cleanup_fb - Unpins the cursor 317 * 318 * @plane: display plane 319 * @old_state: Contains the FB to clean up 320 * 321 * Unpins the framebuffer surface 322 * 323 * Returns 0 on success 324 */ 325 void 326 vmw_du_plane_cleanup_fb(struct drm_plane *plane, 327 struct drm_plane_state *old_state) 328 { 329 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); 330 331 vmw_du_plane_unpin_surf(vps, false); 332 } 333 334 335 /** 336 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it 337 * 338 * @plane: display plane 339 * @new_state: info on the new plane state, including the FB 340 * 341 * Returns 0 on success 342 */ 343 int 344 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, 345 struct drm_plane_state *new_state) 346 { 347 struct drm_framebuffer *fb = new_state->fb; 348 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); 349 350 351 if (vps->surf) 352 vmw_surface_unreference(&vps->surf); 353 354 if (vps->bo) 355 vmw_bo_unreference(&vps->bo); 356 357 if (fb) { 358 if (vmw_framebuffer_to_vfb(fb)->bo) { 359 vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer; 360 vmw_bo_reference(vps->bo); 361 } else { 362 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface; 363 vmw_surface_reference(vps->surf); 364 } 365 } 366 367 return 0; 368 } 369 370 371 void 372 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, 373 struct drm_plane_state *old_state) 374 { 375 struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; 376 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 377 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 378 struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state); 379 s32 hotspot_x, hotspot_y; 380 int ret = 0; 381 382 383 hotspot_x = du->hotspot_x; 384 hotspot_y = du->hotspot_y; 385 386 if (plane->state->fb) { 387 hotspot_x += plane->state->fb->hot_x; 388 hotspot_y += plane->state->fb->hot_y; 389 } 390 391 du->cursor_surface = vps->surf; 392 du->cursor_bo = vps->bo; 393 394 if (vps->surf) { 395 du->cursor_age = du->cursor_surface->snooper.age; 396 397 ret = vmw_cursor_update_image(dev_priv, 398 vps->surf->snooper.image, 399 64, 64, hotspot_x, 400 hotspot_y); 401 } else if (vps->bo) { 402 ret = vmw_cursor_update_bo(dev_priv, vps->bo, 403 plane->state->crtc_w, 404 plane->state->crtc_h, 405 hotspot_x, hotspot_y); 406 } else { 407 vmw_cursor_update_position(dev_priv, false, 0, 0); 408 return; 409 } 410 411 if (!ret) { 412 du->cursor_x = plane->state->crtc_x + du->set_gui_x; 413 du->cursor_y = plane->state->crtc_y + du->set_gui_y; 414 415 vmw_cursor_update_position(dev_priv, true, 416 du->cursor_x + hotspot_x, 417 du->cursor_y + hotspot_y); 418 419 du->core_hotspot_x = hotspot_x - du->hotspot_x; 420 du->core_hotspot_y = hotspot_y - du->hotspot_y; 421 } else { 422 DRM_ERROR("Failed to update cursor image\n"); 423 } 424 } 425 426 427 /** 428 * vmw_du_primary_plane_atomic_check - check if the new state is okay 429 * 430 * @plane: display plane 431 * @state: info on the new plane state, including the FB 432 * 433 * Check if the new state is settable given the current state. Other 434 * than what the atomic helper checks, we care about crtc fitting 435 * the FB and maintaining one active framebuffer. 436 * 437 * Returns 0 on success 438 */ 439 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, 440 struct drm_plane_state *state) 441 { 442 struct drm_crtc_state *crtc_state = NULL; 443 struct drm_framebuffer *new_fb = state->fb; 444 int ret; 445 446 if (state->crtc) 447 crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); 448 449 ret = drm_atomic_helper_check_plane_state(state, crtc_state, 450 DRM_PLANE_HELPER_NO_SCALING, 451 DRM_PLANE_HELPER_NO_SCALING, 452 false, true); 453 454 if (!ret && new_fb) { 455 struct drm_crtc *crtc = state->crtc; 456 struct vmw_connector_state *vcs; 457 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 458 459 vcs = vmw_connector_state_to_vcs(du->connector.state); 460 } 461 462 463 return ret; 464 } 465 466 467 /** 468 * vmw_du_cursor_plane_atomic_check - check if the new state is okay 469 * 470 * @plane: cursor plane 471 * @state: info on the new plane state 472 * 473 * This is a chance to fail if the new cursor state does not fit 474 * our requirements. 475 * 476 * Returns 0 on success 477 */ 478 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, 479 struct drm_plane_state *new_state) 480 { 481 int ret = 0; 482 struct drm_crtc_state *crtc_state = NULL; 483 struct vmw_surface *surface = NULL; 484 struct drm_framebuffer *fb = new_state->fb; 485 486 if (new_state->crtc) 487 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 488 new_state->crtc); 489 490 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, 491 DRM_PLANE_HELPER_NO_SCALING, 492 DRM_PLANE_HELPER_NO_SCALING, 493 true, true); 494 if (ret) 495 return ret; 496 497 /* Turning off */ 498 if (!fb) 499 return 0; 500 501 /* A lot of the code assumes this */ 502 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) { 503 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n", 504 new_state->crtc_w, new_state->crtc_h); 505 ret = -EINVAL; 506 } 507 508 if (!vmw_framebuffer_to_vfb(fb)->bo) 509 surface = vmw_framebuffer_to_vfbs(fb)->surface; 510 511 if (surface && !surface->snooper.image) { 512 DRM_ERROR("surface not suitable for cursor\n"); 513 ret = -EINVAL; 514 } 515 516 return ret; 517 } 518 519 520 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, 521 struct drm_crtc_state *new_state) 522 { 523 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc); 524 int connector_mask = drm_connector_mask(&du->connector); 525 bool has_primary = new_state->plane_mask & 526 drm_plane_mask(crtc->primary); 527 528 /* We always want to have an active plane with an active CRTC */ 529 if (has_primary != new_state->enable) 530 return -EINVAL; 531 532 533 if (new_state->connector_mask != connector_mask && 534 new_state->connector_mask != 0) { 535 DRM_ERROR("Invalid connectors configuration\n"); 536 return -EINVAL; 537 } 538 539 /* 540 * Our virtual device does not have a dot clock, so use the logical 541 * clock value as the dot clock. 542 */ 543 if (new_state->mode.crtc_clock == 0) 544 new_state->adjusted_mode.crtc_clock = new_state->mode.clock; 545 546 return 0; 547 } 548 549 550 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, 551 struct drm_crtc_state *old_crtc_state) 552 { 553 } 554 555 556 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, 557 struct drm_crtc_state *old_crtc_state) 558 { 559 struct drm_pending_vblank_event *event = crtc->state->event; 560 561 if (event) { 562 crtc->state->event = NULL; 563 564 spin_lock_irq(&crtc->dev->event_lock); 565 drm_crtc_send_vblank_event(crtc, event); 566 spin_unlock_irq(&crtc->dev->event_lock); 567 } 568 } 569 570 571 /** 572 * vmw_du_crtc_duplicate_state - duplicate crtc state 573 * @crtc: DRM crtc 574 * 575 * Allocates and returns a copy of the crtc state (both common and 576 * vmw-specific) for the specified crtc. 577 * 578 * Returns: The newly allocated crtc state, or NULL on failure. 579 */ 580 struct drm_crtc_state * 581 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc) 582 { 583 struct drm_crtc_state *state; 584 struct vmw_crtc_state *vcs; 585 586 if (WARN_ON(!crtc->state)) 587 return NULL; 588 589 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL); 590 591 if (!vcs) 592 return NULL; 593 594 state = &vcs->base; 595 596 __drm_atomic_helper_crtc_duplicate_state(crtc, state); 597 598 return state; 599 } 600 601 602 /** 603 * vmw_du_crtc_reset - creates a blank vmw crtc state 604 * @crtc: DRM crtc 605 * 606 * Resets the atomic state for @crtc by freeing the state pointer (which 607 * might be NULL, e.g. at driver load time) and allocating a new empty state 608 * object. 609 */ 610 void vmw_du_crtc_reset(struct drm_crtc *crtc) 611 { 612 struct vmw_crtc_state *vcs; 613 614 615 if (crtc->state) { 616 __drm_atomic_helper_crtc_destroy_state(crtc->state); 617 618 kfree(vmw_crtc_state_to_vcs(crtc->state)); 619 } 620 621 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL); 622 623 if (!vcs) { 624 DRM_ERROR("Cannot allocate vmw_crtc_state\n"); 625 return; 626 } 627 628 crtc->state = &vcs->base; 629 crtc->state->crtc = crtc; 630 } 631 632 633 /** 634 * vmw_du_crtc_destroy_state - destroy crtc state 635 * @crtc: DRM crtc 636 * @state: state object to destroy 637 * 638 * Destroys the crtc state (both common and vmw-specific) for the 639 * specified plane. 640 */ 641 void 642 vmw_du_crtc_destroy_state(struct drm_crtc *crtc, 643 struct drm_crtc_state *state) 644 { 645 drm_atomic_helper_crtc_destroy_state(crtc, state); 646 } 647 648 649 /** 650 * vmw_du_plane_duplicate_state - duplicate plane state 651 * @plane: drm plane 652 * 653 * Allocates and returns a copy of the plane state (both common and 654 * vmw-specific) for the specified plane. 655 * 656 * Returns: The newly allocated plane state, or NULL on failure. 657 */ 658 struct drm_plane_state * 659 vmw_du_plane_duplicate_state(struct drm_plane *plane) 660 { 661 struct drm_plane_state *state; 662 struct vmw_plane_state *vps; 663 664 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL); 665 666 if (!vps) 667 return NULL; 668 669 vps->pinned = 0; 670 vps->cpp = 0; 671 672 /* Each ref counted resource needs to be acquired again */ 673 if (vps->surf) 674 (void) vmw_surface_reference(vps->surf); 675 676 if (vps->bo) 677 (void) vmw_bo_reference(vps->bo); 678 679 state = &vps->base; 680 681 __drm_atomic_helper_plane_duplicate_state(plane, state); 682 683 return state; 684 } 685 686 687 /** 688 * vmw_du_plane_reset - creates a blank vmw plane state 689 * @plane: drm plane 690 * 691 * Resets the atomic state for @plane by freeing the state pointer (which might 692 * be NULL, e.g. at driver load time) and allocating a new empty state object. 693 */ 694 void vmw_du_plane_reset(struct drm_plane *plane) 695 { 696 struct vmw_plane_state *vps; 697 698 699 if (plane->state) 700 vmw_du_plane_destroy_state(plane, plane->state); 701 702 vps = kzalloc(sizeof(*vps), GFP_KERNEL); 703 704 if (!vps) { 705 DRM_ERROR("Cannot allocate vmw_plane_state\n"); 706 return; 707 } 708 709 __drm_atomic_helper_plane_reset(plane, &vps->base); 710 } 711 712 713 /** 714 * vmw_du_plane_destroy_state - destroy plane state 715 * @plane: DRM plane 716 * @state: state object to destroy 717 * 718 * Destroys the plane state (both common and vmw-specific) for the 719 * specified plane. 720 */ 721 void 722 vmw_du_plane_destroy_state(struct drm_plane *plane, 723 struct drm_plane_state *state) 724 { 725 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state); 726 727 728 /* Should have been freed by cleanup_fb */ 729 if (vps->surf) 730 vmw_surface_unreference(&vps->surf); 731 732 if (vps->bo) 733 vmw_bo_unreference(&vps->bo); 734 735 drm_atomic_helper_plane_destroy_state(plane, state); 736 } 737 738 739 /** 740 * vmw_du_connector_duplicate_state - duplicate connector state 741 * @connector: DRM connector 742 * 743 * Allocates and returns a copy of the connector state (both common and 744 * vmw-specific) for the specified connector. 745 * 746 * Returns: The newly allocated connector state, or NULL on failure. 747 */ 748 struct drm_connector_state * 749 vmw_du_connector_duplicate_state(struct drm_connector *connector) 750 { 751 struct drm_connector_state *state; 752 struct vmw_connector_state *vcs; 753 754 if (WARN_ON(!connector->state)) 755 return NULL; 756 757 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL); 758 759 if (!vcs) 760 return NULL; 761 762 state = &vcs->base; 763 764 __drm_atomic_helper_connector_duplicate_state(connector, state); 765 766 return state; 767 } 768 769 770 /** 771 * vmw_du_connector_reset - creates a blank vmw connector state 772 * @connector: DRM connector 773 * 774 * Resets the atomic state for @connector by freeing the state pointer (which 775 * might be NULL, e.g. at driver load time) and allocating a new empty state 776 * object. 777 */ 778 void vmw_du_connector_reset(struct drm_connector *connector) 779 { 780 struct vmw_connector_state *vcs; 781 782 783 if (connector->state) { 784 __drm_atomic_helper_connector_destroy_state(connector->state); 785 786 kfree(vmw_connector_state_to_vcs(connector->state)); 787 } 788 789 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL); 790 791 if (!vcs) { 792 DRM_ERROR("Cannot allocate vmw_connector_state\n"); 793 return; 794 } 795 796 __drm_atomic_helper_connector_reset(connector, &vcs->base); 797 } 798 799 800 /** 801 * vmw_du_connector_destroy_state - destroy connector state 802 * @connector: DRM connector 803 * @state: state object to destroy 804 * 805 * Destroys the connector state (both common and vmw-specific) for the 806 * specified plane. 807 */ 808 void 809 vmw_du_connector_destroy_state(struct drm_connector *connector, 810 struct drm_connector_state *state) 811 { 812 drm_atomic_helper_connector_destroy_state(connector, state); 813 } 814 /* 815 * Generic framebuffer code 816 */ 817 818 /* 819 * Surface framebuffer code 820 */ 821 822 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 823 { 824 struct vmw_framebuffer_surface *vfbs = 825 vmw_framebuffer_to_vfbs(framebuffer); 826 827 drm_framebuffer_cleanup(framebuffer); 828 vmw_surface_unreference(&vfbs->surface); 829 if (vfbs->base.user_obj) 830 ttm_base_object_unref(&vfbs->base.user_obj); 831 832 kfree(vfbs); 833 } 834 835 /** 836 * vmw_kms_readback - Perform a readback from the screen system to 837 * a buffer-object backed framebuffer. 838 * 839 * @dev_priv: Pointer to the device private structure. 840 * @file_priv: Pointer to a struct drm_file identifying the caller. 841 * Must be set to NULL if @user_fence_rep is NULL. 842 * @vfb: Pointer to the buffer-object backed framebuffer. 843 * @user_fence_rep: User-space provided structure for fence information. 844 * Must be set to non-NULL if @file_priv is non-NULL. 845 * @vclips: Array of clip rects. 846 * @num_clips: Number of clip rects in @vclips. 847 * 848 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if 849 * interrupted. 850 */ 851 int vmw_kms_readback(struct vmw_private *dev_priv, 852 struct drm_file *file_priv, 853 struct vmw_framebuffer *vfb, 854 struct drm_vmw_fence_rep __user *user_fence_rep, 855 struct drm_vmw_rect *vclips, 856 uint32_t num_clips) 857 { 858 switch (dev_priv->active_display_unit) { 859 case vmw_du_screen_object: 860 return vmw_kms_sou_readback(dev_priv, file_priv, vfb, 861 user_fence_rep, vclips, num_clips, 862 NULL); 863 case vmw_du_screen_target: 864 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb, 865 user_fence_rep, NULL, vclips, num_clips, 866 1, false, true, NULL); 867 default: 868 WARN_ONCE(true, 869 "Readback called with invalid display system.\n"); 870 } 871 872 return -ENOSYS; 873 } 874 875 876 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { 877 .destroy = vmw_framebuffer_surface_destroy, 878 .dirty = drm_atomic_helper_dirtyfb, 879 }; 880 881 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 882 struct vmw_surface *surface, 883 struct vmw_framebuffer **out, 884 const struct drm_mode_fb_cmd2 885 *mode_cmd, 886 bool is_bo_proxy) 887 888 { 889 struct drm_device *dev = dev_priv->dev; 890 struct vmw_framebuffer_surface *vfbs; 891 enum SVGA3dSurfaceFormat format; 892 int ret; 893 struct drm_format_name_buf format_name; 894 895 /* 3D is only supported on HWv8 and newer hosts */ 896 if (dev_priv->active_display_unit == vmw_du_legacy) 897 return -ENOSYS; 898 899 /* 900 * Sanity checks. 901 */ 902 903 /* Surface must be marked as a scanout. */ 904 if (unlikely(!surface->scanout)) 905 return -EINVAL; 906 907 if (unlikely(surface->mip_levels[0] != 1 || 908 surface->num_sizes != 1 || 909 surface->base_size.width < mode_cmd->width || 910 surface->base_size.height < mode_cmd->height || 911 surface->base_size.depth != 1)) { 912 DRM_ERROR("Incompatible surface dimensions " 913 "for requested mode.\n"); 914 return -EINVAL; 915 } 916 917 switch (mode_cmd->pixel_format) { 918 case DRM_FORMAT_ARGB8888: 919 format = SVGA3D_A8R8G8B8; 920 break; 921 case DRM_FORMAT_XRGB8888: 922 format = SVGA3D_X8R8G8B8; 923 break; 924 case DRM_FORMAT_RGB565: 925 format = SVGA3D_R5G6B5; 926 break; 927 case DRM_FORMAT_XRGB1555: 928 format = SVGA3D_A1R5G5B5; 929 break; 930 default: 931 DRM_ERROR("Invalid pixel format: %s\n", 932 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 933 return -EINVAL; 934 } 935 936 /* 937 * For DX, surface format validation is done when surface->scanout 938 * is set. 939 */ 940 if (!dev_priv->has_dx && format != surface->format) { 941 DRM_ERROR("Invalid surface format for requested mode.\n"); 942 return -EINVAL; 943 } 944 945 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); 946 if (!vfbs) { 947 ret = -ENOMEM; 948 goto out_err1; 949 } 950 951 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); 952 vfbs->surface = vmw_surface_reference(surface); 953 vfbs->base.user_handle = mode_cmd->handles[0]; 954 vfbs->is_bo_proxy = is_bo_proxy; 955 956 *out = &vfbs->base; 957 958 ret = drm_framebuffer_init(dev, &vfbs->base.base, 959 &vmw_framebuffer_surface_funcs); 960 if (ret) 961 goto out_err2; 962 963 return 0; 964 965 out_err2: 966 vmw_surface_unreference(&surface); 967 kfree(vfbs); 968 out_err1: 969 return ret; 970 } 971 972 /* 973 * Buffer-object framebuffer code 974 */ 975 976 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) 977 { 978 struct vmw_framebuffer_bo *vfbd = 979 vmw_framebuffer_to_vfbd(framebuffer); 980 981 drm_framebuffer_cleanup(framebuffer); 982 vmw_bo_unreference(&vfbd->buffer); 983 if (vfbd->base.user_obj) 984 ttm_base_object_unref(&vfbd->base.user_obj); 985 986 kfree(vfbd); 987 } 988 989 static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer, 990 struct drm_file *file_priv, 991 unsigned int flags, unsigned int color, 992 struct drm_clip_rect *clips, 993 unsigned int num_clips) 994 { 995 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 996 struct vmw_framebuffer_bo *vfbd = 997 vmw_framebuffer_to_vfbd(framebuffer); 998 struct drm_clip_rect norect; 999 int ret, increment = 1; 1000 1001 drm_modeset_lock_all(dev_priv->dev); 1002 1003 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 1004 if (unlikely(ret != 0)) { 1005 drm_modeset_unlock_all(dev_priv->dev); 1006 return ret; 1007 } 1008 1009 if (!num_clips) { 1010 num_clips = 1; 1011 clips = &norect; 1012 norect.x1 = norect.y1 = 0; 1013 norect.x2 = framebuffer->width; 1014 norect.y2 = framebuffer->height; 1015 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { 1016 num_clips /= 2; 1017 increment = 2; 1018 } 1019 1020 switch (dev_priv->active_display_unit) { 1021 case vmw_du_legacy: 1022 ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0, 1023 clips, num_clips, increment); 1024 break; 1025 default: 1026 ret = -EINVAL; 1027 WARN_ONCE(true, "Dirty called with invalid display system.\n"); 1028 break; 1029 } 1030 1031 vmw_fifo_flush(dev_priv, false); 1032 ttm_read_unlock(&dev_priv->reservation_sem); 1033 1034 drm_modeset_unlock_all(dev_priv->dev); 1035 1036 return ret; 1037 } 1038 1039 static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer, 1040 struct drm_file *file_priv, 1041 unsigned int flags, unsigned int color, 1042 struct drm_clip_rect *clips, 1043 unsigned int num_clips) 1044 { 1045 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 1046 1047 if (dev_priv->active_display_unit == vmw_du_legacy) 1048 return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags, 1049 color, clips, num_clips); 1050 1051 return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color, 1052 clips, num_clips); 1053 } 1054 1055 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = { 1056 .destroy = vmw_framebuffer_bo_destroy, 1057 .dirty = vmw_framebuffer_bo_dirty_ext, 1058 }; 1059 1060 /** 1061 * Pin the bofer in a location suitable for access by the 1062 * display system. 1063 */ 1064 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) 1065 { 1066 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 1067 struct vmw_buffer_object *buf; 1068 struct ttm_placement *placement; 1069 int ret; 1070 1071 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 1072 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; 1073 1074 if (!buf) 1075 return 0; 1076 1077 switch (dev_priv->active_display_unit) { 1078 case vmw_du_legacy: 1079 vmw_overlay_pause_all(dev_priv); 1080 ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false); 1081 vmw_overlay_resume_all(dev_priv); 1082 break; 1083 case vmw_du_screen_object: 1084 case vmw_du_screen_target: 1085 if (vfb->bo) { 1086 if (dev_priv->capabilities & SVGA_CAP_3D) { 1087 /* 1088 * Use surface DMA to get content to 1089 * sreen target surface. 1090 */ 1091 placement = &vmw_vram_gmr_placement; 1092 } else { 1093 /* Use CPU blit. */ 1094 placement = &vmw_sys_placement; 1095 } 1096 } else { 1097 /* Use surface / image update */ 1098 placement = &vmw_mob_placement; 1099 } 1100 1101 return vmw_bo_pin_in_placement(dev_priv, buf, placement, false); 1102 default: 1103 return -EINVAL; 1104 } 1105 1106 return ret; 1107 } 1108 1109 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb) 1110 { 1111 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 1112 struct vmw_buffer_object *buf; 1113 1114 buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 1115 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; 1116 1117 if (WARN_ON(!buf)) 1118 return 0; 1119 1120 return vmw_bo_unpin(dev_priv, buf, false); 1121 } 1122 1123 /** 1124 * vmw_create_bo_proxy - create a proxy surface for the buffer object 1125 * 1126 * @dev: DRM device 1127 * @mode_cmd: parameters for the new surface 1128 * @bo_mob: MOB backing the buffer object 1129 * @srf_out: newly created surface 1130 * 1131 * When the content FB is a buffer object, we create a surface as a proxy to the 1132 * same buffer. This way we can do a surface copy rather than a surface DMA. 1133 * This is a more efficient approach 1134 * 1135 * RETURNS: 1136 * 0 on success, error code otherwise 1137 */ 1138 static int vmw_create_bo_proxy(struct drm_device *dev, 1139 const struct drm_mode_fb_cmd2 *mode_cmd, 1140 struct vmw_buffer_object *bo_mob, 1141 struct vmw_surface **srf_out) 1142 { 1143 uint32_t format; 1144 struct drm_vmw_size content_base_size = {0}; 1145 struct vmw_resource *res; 1146 unsigned int bytes_pp; 1147 struct drm_format_name_buf format_name; 1148 int ret; 1149 1150 switch (mode_cmd->pixel_format) { 1151 case DRM_FORMAT_ARGB8888: 1152 case DRM_FORMAT_XRGB8888: 1153 format = SVGA3D_X8R8G8B8; 1154 bytes_pp = 4; 1155 break; 1156 1157 case DRM_FORMAT_RGB565: 1158 case DRM_FORMAT_XRGB1555: 1159 format = SVGA3D_R5G6B5; 1160 bytes_pp = 2; 1161 break; 1162 1163 case 8: 1164 format = SVGA3D_P8; 1165 bytes_pp = 1; 1166 break; 1167 1168 default: 1169 DRM_ERROR("Invalid framebuffer format %s\n", 1170 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 1171 return -EINVAL; 1172 } 1173 1174 content_base_size.width = mode_cmd->pitches[0] / bytes_pp; 1175 content_base_size.height = mode_cmd->height; 1176 content_base_size.depth = 1; 1177 1178 ret = vmw_surface_gb_priv_define(dev, 1179 0, /* kernel visible only */ 1180 0, /* flags */ 1181 format, 1182 true, /* can be a scanout buffer */ 1183 1, /* num of mip levels */ 1184 0, 1185 0, 1186 content_base_size, 1187 SVGA3D_MS_PATTERN_NONE, 1188 SVGA3D_MS_QUALITY_NONE, 1189 srf_out); 1190 if (ret) { 1191 DRM_ERROR("Failed to allocate proxy content buffer\n"); 1192 return ret; 1193 } 1194 1195 res = &(*srf_out)->res; 1196 1197 /* Reserve and switch the backing mob. */ 1198 mutex_lock(&res->dev_priv->cmdbuf_mutex); 1199 (void) vmw_resource_reserve(res, false, true); 1200 vmw_bo_unreference(&res->backup); 1201 res->backup = vmw_bo_reference(bo_mob); 1202 res->backup_offset = 0; 1203 vmw_resource_unreserve(res, false, false, false, NULL, 0); 1204 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 1205 1206 return 0; 1207 } 1208 1209 1210 1211 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, 1212 struct vmw_buffer_object *bo, 1213 struct vmw_framebuffer **out, 1214 const struct drm_mode_fb_cmd2 1215 *mode_cmd) 1216 1217 { 1218 struct drm_device *dev = dev_priv->dev; 1219 struct vmw_framebuffer_bo *vfbd; 1220 unsigned int requested_size; 1221 struct drm_format_name_buf format_name; 1222 int ret; 1223 1224 requested_size = mode_cmd->height * mode_cmd->pitches[0]; 1225 if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) { 1226 DRM_ERROR("Screen buffer object size is too small " 1227 "for requested mode.\n"); 1228 return -EINVAL; 1229 } 1230 1231 /* Limited framebuffer color depth support for screen objects */ 1232 if (dev_priv->active_display_unit == vmw_du_screen_object) { 1233 switch (mode_cmd->pixel_format) { 1234 case DRM_FORMAT_XRGB8888: 1235 case DRM_FORMAT_ARGB8888: 1236 break; 1237 case DRM_FORMAT_XRGB1555: 1238 case DRM_FORMAT_RGB565: 1239 break; 1240 default: 1241 DRM_ERROR("Invalid pixel format: %s\n", 1242 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 1243 return -EINVAL; 1244 } 1245 } 1246 1247 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); 1248 if (!vfbd) { 1249 ret = -ENOMEM; 1250 goto out_err1; 1251 } 1252 1253 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); 1254 vfbd->base.bo = true; 1255 vfbd->buffer = vmw_bo_reference(bo); 1256 vfbd->base.user_handle = mode_cmd->handles[0]; 1257 *out = &vfbd->base; 1258 1259 ret = drm_framebuffer_init(dev, &vfbd->base.base, 1260 &vmw_framebuffer_bo_funcs); 1261 if (ret) 1262 goto out_err2; 1263 1264 return 0; 1265 1266 out_err2: 1267 vmw_bo_unreference(&bo); 1268 kfree(vfbd); 1269 out_err1: 1270 return ret; 1271 } 1272 1273 1274 /** 1275 * vmw_kms_srf_ok - check if a surface can be created 1276 * 1277 * @width: requested width 1278 * @height: requested height 1279 * 1280 * Surfaces need to be less than texture size 1281 */ 1282 static bool 1283 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height) 1284 { 1285 if (width > dev_priv->texture_max_width || 1286 height > dev_priv->texture_max_height) 1287 return false; 1288 1289 return true; 1290 } 1291 1292 /** 1293 * vmw_kms_new_framebuffer - Create a new framebuffer. 1294 * 1295 * @dev_priv: Pointer to device private struct. 1296 * @bo: Pointer to buffer object to wrap the kms framebuffer around. 1297 * Either @bo or @surface must be NULL. 1298 * @surface: Pointer to a surface to wrap the kms framebuffer around. 1299 * Either @bo or @surface must be NULL. 1300 * @only_2d: No presents will occur to this buffer object based framebuffer. 1301 * This helps the code to do some important optimizations. 1302 * @mode_cmd: Frame-buffer metadata. 1303 */ 1304 struct vmw_framebuffer * 1305 vmw_kms_new_framebuffer(struct vmw_private *dev_priv, 1306 struct vmw_buffer_object *bo, 1307 struct vmw_surface *surface, 1308 bool only_2d, 1309 const struct drm_mode_fb_cmd2 *mode_cmd) 1310 { 1311 struct vmw_framebuffer *vfb = NULL; 1312 bool is_bo_proxy = false; 1313 int ret; 1314 1315 /* 1316 * We cannot use the SurfaceDMA command in an non-accelerated VM, 1317 * therefore, wrap the buffer object in a surface so we can use the 1318 * SurfaceCopy command. 1319 */ 1320 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && 1321 bo && only_2d && 1322 mode_cmd->width > 64 && /* Don't create a proxy for cursor */ 1323 dev_priv->active_display_unit == vmw_du_screen_target) { 1324 ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd, 1325 bo, &surface); 1326 if (ret) 1327 return ERR_PTR(ret); 1328 1329 is_bo_proxy = true; 1330 } 1331 1332 /* Create the new framebuffer depending one what we have */ 1333 if (surface) { 1334 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, 1335 mode_cmd, 1336 is_bo_proxy); 1337 1338 /* 1339 * vmw_create_bo_proxy() adds a reference that is no longer 1340 * needed 1341 */ 1342 if (is_bo_proxy) 1343 vmw_surface_unreference(&surface); 1344 } else if (bo) { 1345 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb, 1346 mode_cmd); 1347 } else { 1348 BUG(); 1349 } 1350 1351 if (ret) 1352 return ERR_PTR(ret); 1353 1354 vfb->pin = vmw_framebuffer_pin; 1355 vfb->unpin = vmw_framebuffer_unpin; 1356 1357 return vfb; 1358 } 1359 1360 /* 1361 * Generic Kernel modesetting functions 1362 */ 1363 1364 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, 1365 struct drm_file *file_priv, 1366 const struct drm_mode_fb_cmd2 *mode_cmd) 1367 { 1368 struct vmw_private *dev_priv = vmw_priv(dev); 1369 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1370 struct vmw_framebuffer *vfb = NULL; 1371 struct vmw_surface *surface = NULL; 1372 struct vmw_buffer_object *bo = NULL; 1373 struct ttm_base_object *user_obj; 1374 int ret; 1375 1376 /* 1377 * Take a reference on the user object of the resource 1378 * backing the kms fb. This ensures that user-space handle 1379 * lookups on that resource will always work as long as 1380 * it's registered with a kms framebuffer. This is important, 1381 * since vmw_execbuf_process identifies resources in the 1382 * command stream using user-space handles. 1383 */ 1384 1385 user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]); 1386 if (unlikely(user_obj == NULL)) { 1387 DRM_ERROR("Could not locate requested kms frame buffer.\n"); 1388 return ERR_PTR(-ENOENT); 1389 } 1390 1391 /** 1392 * End conditioned code. 1393 */ 1394 1395 /* returns either a bo or surface */ 1396 ret = vmw_user_lookup_handle(dev_priv, tfile, 1397 mode_cmd->handles[0], 1398 &surface, &bo); 1399 if (ret) 1400 goto err_out; 1401 1402 1403 if (!bo && 1404 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) { 1405 DRM_ERROR("Surface size cannot exceed %dx%d", 1406 dev_priv->texture_max_width, 1407 dev_priv->texture_max_height); 1408 goto err_out; 1409 } 1410 1411 1412 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface, 1413 !(dev_priv->capabilities & SVGA_CAP_3D), 1414 mode_cmd); 1415 if (IS_ERR(vfb)) { 1416 ret = PTR_ERR(vfb); 1417 goto err_out; 1418 } 1419 1420 err_out: 1421 /* vmw_user_lookup_handle takes one ref so does new_fb */ 1422 if (bo) 1423 vmw_bo_unreference(&bo); 1424 if (surface) 1425 vmw_surface_unreference(&surface); 1426 1427 if (ret) { 1428 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); 1429 ttm_base_object_unref(&user_obj); 1430 return ERR_PTR(ret); 1431 } else 1432 vfb->user_obj = user_obj; 1433 1434 return &vfb->base; 1435 } 1436 1437 /** 1438 * vmw_kms_check_display_memory - Validates display memory required for a 1439 * topology 1440 * @dev: DRM device 1441 * @num_rects: number of drm_rect in rects 1442 * @rects: array of drm_rect representing the topology to validate indexed by 1443 * crtc index. 1444 * 1445 * Returns: 1446 * 0 on success otherwise negative error code 1447 */ 1448 static int vmw_kms_check_display_memory(struct drm_device *dev, 1449 uint32_t num_rects, 1450 struct drm_rect *rects) 1451 { 1452 struct vmw_private *dev_priv = vmw_priv(dev); 1453 struct drm_rect bounding_box = {0}; 1454 u64 total_pixels = 0, pixel_mem, bb_mem; 1455 int i; 1456 1457 for (i = 0; i < num_rects; i++) { 1458 /* 1459 * For STDU only individual screen (screen target) is limited by 1460 * SCREENTARGET_MAX_WIDTH/HEIGHT registers. 1461 */ 1462 if (dev_priv->active_display_unit == vmw_du_screen_target && 1463 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width || 1464 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) { 1465 DRM_ERROR("Screen size not supported.\n"); 1466 return -EINVAL; 1467 } 1468 1469 /* Bounding box upper left is at (0,0). */ 1470 if (rects[i].x2 > bounding_box.x2) 1471 bounding_box.x2 = rects[i].x2; 1472 1473 if (rects[i].y2 > bounding_box.y2) 1474 bounding_box.y2 = rects[i].y2; 1475 1476 total_pixels += (u64) drm_rect_width(&rects[i]) * 1477 (u64) drm_rect_height(&rects[i]); 1478 } 1479 1480 /* Virtual svga device primary limits are always in 32-bpp. */ 1481 pixel_mem = total_pixels * 4; 1482 1483 /* 1484 * For HV10 and below prim_bb_mem is vram size. When 1485 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is 1486 * limit on primary bounding box 1487 */ 1488 if (pixel_mem > dev_priv->prim_bb_mem) { 1489 DRM_ERROR("Combined output size too large.\n"); 1490 return -EINVAL; 1491 } 1492 1493 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */ 1494 if (dev_priv->active_display_unit != vmw_du_screen_target || 1495 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) { 1496 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4; 1497 1498 if (bb_mem > dev_priv->prim_bb_mem) { 1499 DRM_ERROR("Topology is beyond supported limits.\n"); 1500 return -EINVAL; 1501 } 1502 } 1503 1504 return 0; 1505 } 1506 1507 /** 1508 * vmw_crtc_state_and_lock - Return new or current crtc state with locked 1509 * crtc mutex 1510 * @state: The atomic state pointer containing the new atomic state 1511 * @crtc: The crtc 1512 * 1513 * This function returns the new crtc state if it's part of the state update. 1514 * Otherwise returns the current crtc state. It also makes sure that the 1515 * crtc mutex is locked. 1516 * 1517 * Returns: A valid crtc state pointer or NULL. It may also return a 1518 * pointer error, in particular -EDEADLK if locking needs to be rerun. 1519 */ 1520 static struct drm_crtc_state * 1521 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc) 1522 { 1523 struct drm_crtc_state *crtc_state; 1524 1525 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 1526 if (crtc_state) { 1527 lockdep_assert_held(&crtc->mutex.mutex.base); 1528 } else { 1529 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); 1530 1531 if (ret != 0 && ret != -EALREADY) 1532 return ERR_PTR(ret); 1533 1534 crtc_state = crtc->state; 1535 } 1536 1537 return crtc_state; 1538 } 1539 1540 /** 1541 * vmw_kms_check_implicit - Verify that all implicit display units scan out 1542 * from the same fb after the new state is committed. 1543 * @dev: The drm_device. 1544 * @state: The new state to be checked. 1545 * 1546 * Returns: 1547 * Zero on success, 1548 * -EINVAL on invalid state, 1549 * -EDEADLK if modeset locking needs to be rerun. 1550 */ 1551 static int vmw_kms_check_implicit(struct drm_device *dev, 1552 struct drm_atomic_state *state) 1553 { 1554 struct drm_framebuffer *implicit_fb = NULL; 1555 struct drm_crtc *crtc; 1556 struct drm_crtc_state *crtc_state; 1557 struct drm_plane_state *plane_state; 1558 1559 drm_for_each_crtc(crtc, dev) { 1560 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 1561 1562 if (!du->is_implicit) 1563 continue; 1564 1565 crtc_state = vmw_crtc_state_and_lock(state, crtc); 1566 if (IS_ERR(crtc_state)) 1567 return PTR_ERR(crtc_state); 1568 1569 if (!crtc_state || !crtc_state->enable) 1570 continue; 1571 1572 /* 1573 * Can't move primary planes across crtcs, so this is OK. 1574 * It also means we don't need to take the plane mutex. 1575 */ 1576 plane_state = du->primary.state; 1577 if (plane_state->crtc != crtc) 1578 continue; 1579 1580 if (!implicit_fb) 1581 implicit_fb = plane_state->fb; 1582 else if (implicit_fb != plane_state->fb) 1583 return -EINVAL; 1584 } 1585 1586 return 0; 1587 } 1588 1589 /** 1590 * vmw_kms_check_topology - Validates topology in drm_atomic_state 1591 * @dev: DRM device 1592 * @state: the driver state object 1593 * 1594 * Returns: 1595 * 0 on success otherwise negative error code 1596 */ 1597 static int vmw_kms_check_topology(struct drm_device *dev, 1598 struct drm_atomic_state *state) 1599 { 1600 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1601 struct drm_rect *rects; 1602 struct drm_crtc *crtc; 1603 uint32_t i; 1604 int ret = 0; 1605 1606 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect), 1607 GFP_KERNEL); 1608 if (!rects) 1609 return -ENOMEM; 1610 1611 drm_for_each_crtc(crtc, dev) { 1612 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 1613 struct drm_crtc_state *crtc_state; 1614 1615 i = drm_crtc_index(crtc); 1616 1617 crtc_state = vmw_crtc_state_and_lock(state, crtc); 1618 if (IS_ERR(crtc_state)) { 1619 ret = PTR_ERR(crtc_state); 1620 goto clean; 1621 } 1622 1623 if (!crtc_state) 1624 continue; 1625 1626 if (crtc_state->enable) { 1627 rects[i].x1 = du->gui_x; 1628 rects[i].y1 = du->gui_y; 1629 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay; 1630 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay; 1631 } else { 1632 rects[i].x1 = 0; 1633 rects[i].y1 = 0; 1634 rects[i].x2 = 0; 1635 rects[i].y2 = 0; 1636 } 1637 } 1638 1639 /* Determine change to topology due to new atomic state */ 1640 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 1641 new_crtc_state, i) { 1642 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 1643 struct drm_connector *connector; 1644 struct drm_connector_state *conn_state; 1645 struct vmw_connector_state *vmw_conn_state; 1646 1647 if (!du->pref_active && new_crtc_state->enable) { 1648 ret = -EINVAL; 1649 goto clean; 1650 } 1651 1652 /* 1653 * For vmwgfx each crtc has only one connector attached and it 1654 * is not changed so don't really need to check the 1655 * crtc->connector_mask and iterate over it. 1656 */ 1657 connector = &du->connector; 1658 conn_state = drm_atomic_get_connector_state(state, connector); 1659 if (IS_ERR(conn_state)) { 1660 ret = PTR_ERR(conn_state); 1661 goto clean; 1662 } 1663 1664 vmw_conn_state = vmw_connector_state_to_vcs(conn_state); 1665 vmw_conn_state->gui_x = du->gui_x; 1666 vmw_conn_state->gui_y = du->gui_y; 1667 } 1668 1669 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc, 1670 rects); 1671 1672 clean: 1673 kfree(rects); 1674 return ret; 1675 } 1676 1677 /** 1678 * vmw_kms_atomic_check_modeset- validate state object for modeset changes 1679 * 1680 * @dev: DRM device 1681 * @state: the driver state object 1682 * 1683 * This is a simple wrapper around drm_atomic_helper_check_modeset() for 1684 * us to assign a value to mode->crtc_clock so that 1685 * drm_calc_timestamping_constants() won't throw an error message 1686 * 1687 * Returns: 1688 * Zero for success or -errno 1689 */ 1690 static int 1691 vmw_kms_atomic_check_modeset(struct drm_device *dev, 1692 struct drm_atomic_state *state) 1693 { 1694 struct drm_crtc *crtc; 1695 struct drm_crtc_state *crtc_state; 1696 bool need_modeset = false; 1697 int i, ret; 1698 1699 ret = drm_atomic_helper_check(dev, state); 1700 if (ret) 1701 return ret; 1702 1703 ret = vmw_kms_check_implicit(dev, state); 1704 if (ret) 1705 return ret; 1706 1707 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1708 if (drm_atomic_crtc_needs_modeset(crtc_state)) 1709 need_modeset = true; 1710 } 1711 1712 if (need_modeset) 1713 return vmw_kms_check_topology(dev, state); 1714 1715 return ret; 1716 } 1717 1718 static const struct drm_mode_config_funcs vmw_kms_funcs = { 1719 .fb_create = vmw_kms_fb_create, 1720 .atomic_check = vmw_kms_atomic_check_modeset, 1721 .atomic_commit = drm_atomic_helper_commit, 1722 }; 1723 1724 static int vmw_kms_generic_present(struct vmw_private *dev_priv, 1725 struct drm_file *file_priv, 1726 struct vmw_framebuffer *vfb, 1727 struct vmw_surface *surface, 1728 uint32_t sid, 1729 int32_t destX, int32_t destY, 1730 struct drm_vmw_rect *clips, 1731 uint32_t num_clips) 1732 { 1733 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips, 1734 &surface->res, destX, destY, 1735 num_clips, 1, NULL, NULL); 1736 } 1737 1738 1739 int vmw_kms_present(struct vmw_private *dev_priv, 1740 struct drm_file *file_priv, 1741 struct vmw_framebuffer *vfb, 1742 struct vmw_surface *surface, 1743 uint32_t sid, 1744 int32_t destX, int32_t destY, 1745 struct drm_vmw_rect *clips, 1746 uint32_t num_clips) 1747 { 1748 int ret; 1749 1750 switch (dev_priv->active_display_unit) { 1751 case vmw_du_screen_target: 1752 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips, 1753 &surface->res, destX, destY, 1754 num_clips, 1, NULL, NULL); 1755 break; 1756 case vmw_du_screen_object: 1757 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface, 1758 sid, destX, destY, clips, 1759 num_clips); 1760 break; 1761 default: 1762 WARN_ONCE(true, 1763 "Present called with invalid display system.\n"); 1764 ret = -ENOSYS; 1765 break; 1766 } 1767 if (ret) 1768 return ret; 1769 1770 vmw_fifo_flush(dev_priv, false); 1771 1772 return 0; 1773 } 1774 1775 static void 1776 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv) 1777 { 1778 if (dev_priv->hotplug_mode_update_property) 1779 return; 1780 1781 dev_priv->hotplug_mode_update_property = 1782 drm_property_create_range(dev_priv->dev, 1783 DRM_MODE_PROP_IMMUTABLE, 1784 "hotplug_mode_update", 0, 1); 1785 1786 if (!dev_priv->hotplug_mode_update_property) 1787 return; 1788 1789 } 1790 1791 int vmw_kms_init(struct vmw_private *dev_priv) 1792 { 1793 struct drm_device *dev = dev_priv->dev; 1794 int ret; 1795 1796 drm_mode_config_init(dev); 1797 dev->mode_config.funcs = &vmw_kms_funcs; 1798 dev->mode_config.min_width = 1; 1799 dev->mode_config.min_height = 1; 1800 dev->mode_config.max_width = dev_priv->texture_max_width; 1801 dev->mode_config.max_height = dev_priv->texture_max_height; 1802 1803 drm_mode_create_suggested_offset_properties(dev); 1804 vmw_kms_create_hotplug_mode_update_property(dev_priv); 1805 1806 ret = vmw_kms_stdu_init_display(dev_priv); 1807 if (ret) { 1808 ret = vmw_kms_sou_init_display(dev_priv); 1809 if (ret) /* Fallback */ 1810 ret = vmw_kms_ldu_init_display(dev_priv); 1811 } 1812 1813 return ret; 1814 } 1815 1816 int vmw_kms_close(struct vmw_private *dev_priv) 1817 { 1818 int ret = 0; 1819 1820 /* 1821 * Docs says we should take the lock before calling this function 1822 * but since it destroys encoders and our destructor calls 1823 * drm_encoder_cleanup which takes the lock we deadlock. 1824 */ 1825 drm_mode_config_cleanup(dev_priv->dev); 1826 if (dev_priv->active_display_unit == vmw_du_legacy) 1827 ret = vmw_kms_ldu_close_display(dev_priv); 1828 1829 return ret; 1830 } 1831 1832 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 1833 struct drm_file *file_priv) 1834 { 1835 struct drm_vmw_cursor_bypass_arg *arg = data; 1836 struct vmw_display_unit *du; 1837 struct drm_crtc *crtc; 1838 int ret = 0; 1839 1840 1841 mutex_lock(&dev->mode_config.mutex); 1842 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { 1843 1844 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1845 du = vmw_crtc_to_du(crtc); 1846 du->hotspot_x = arg->xhot; 1847 du->hotspot_y = arg->yhot; 1848 } 1849 1850 mutex_unlock(&dev->mode_config.mutex); 1851 return 0; 1852 } 1853 1854 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id); 1855 if (!crtc) { 1856 ret = -ENOENT; 1857 goto out; 1858 } 1859 1860 du = vmw_crtc_to_du(crtc); 1861 1862 du->hotspot_x = arg->xhot; 1863 du->hotspot_y = arg->yhot; 1864 1865 out: 1866 mutex_unlock(&dev->mode_config.mutex); 1867 1868 return ret; 1869 } 1870 1871 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 1872 unsigned width, unsigned height, unsigned pitch, 1873 unsigned bpp, unsigned depth) 1874 { 1875 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1876 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); 1877 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1878 vmw_mmio_write(pitch, vmw_priv->mmio_virt + 1879 SVGA_FIFO_PITCHLOCK); 1880 vmw_write(vmw_priv, SVGA_REG_WIDTH, width); 1881 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); 1882 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp); 1883 1884 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) { 1885 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n", 1886 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH)); 1887 return -EINVAL; 1888 } 1889 1890 return 0; 1891 } 1892 1893 int vmw_kms_save_vga(struct vmw_private *vmw_priv) 1894 { 1895 struct vmw_vga_topology_state *save; 1896 uint32_t i; 1897 1898 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); 1899 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); 1900 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); 1901 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1902 vmw_priv->vga_pitchlock = 1903 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); 1904 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1905 vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt + 1906 SVGA_FIFO_PITCHLOCK); 1907 1908 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1909 return 0; 1910 1911 vmw_priv->num_displays = vmw_read(vmw_priv, 1912 SVGA_REG_NUM_GUEST_DISPLAYS); 1913 1914 if (vmw_priv->num_displays == 0) 1915 vmw_priv->num_displays = 1; 1916 1917 for (i = 0; i < vmw_priv->num_displays; ++i) { 1918 save = &vmw_priv->vga_save[i]; 1919 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); 1920 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY); 1921 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X); 1922 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y); 1923 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); 1924 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); 1925 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 1926 if (i == 0 && vmw_priv->num_displays == 1 && 1927 save->width == 0 && save->height == 0) { 1928 1929 /* 1930 * It should be fairly safe to assume that these 1931 * values are uninitialized. 1932 */ 1933 1934 save->width = vmw_priv->vga_width - save->pos_x; 1935 save->height = vmw_priv->vga_height - save->pos_y; 1936 } 1937 } 1938 1939 return 0; 1940 } 1941 1942 int vmw_kms_restore_vga(struct vmw_private *vmw_priv) 1943 { 1944 struct vmw_vga_topology_state *save; 1945 uint32_t i; 1946 1947 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); 1948 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); 1949 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); 1950 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1951 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, 1952 vmw_priv->vga_pitchlock); 1953 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1954 vmw_mmio_write(vmw_priv->vga_pitchlock, 1955 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); 1956 1957 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1958 return 0; 1959 1960 for (i = 0; i < vmw_priv->num_displays; ++i) { 1961 save = &vmw_priv->vga_save[i]; 1962 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); 1963 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary); 1964 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x); 1965 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y); 1966 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width); 1967 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height); 1968 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 1969 } 1970 1971 return 0; 1972 } 1973 1974 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 1975 uint32_t pitch, 1976 uint32_t height) 1977 { 1978 return ((u64) pitch * (u64) height) < (u64) 1979 ((dev_priv->active_display_unit == vmw_du_screen_target) ? 1980 dev_priv->prim_bb_mem : dev_priv->vram_size); 1981 } 1982 1983 1984 /** 1985 * Function called by DRM code called with vbl_lock held. 1986 */ 1987 u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 1988 { 1989 return 0; 1990 } 1991 1992 /** 1993 * Function called by DRM code called with vbl_lock held. 1994 */ 1995 int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe) 1996 { 1997 return -EINVAL; 1998 } 1999 2000 /** 2001 * Function called by DRM code called with vbl_lock held. 2002 */ 2003 void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe) 2004 { 2005 } 2006 2007 /** 2008 * vmw_du_update_layout - Update the display unit with topology from resolution 2009 * plugin and generate DRM uevent 2010 * @dev_priv: device private 2011 * @num_rects: number of drm_rect in rects 2012 * @rects: toplogy to update 2013 */ 2014 static int vmw_du_update_layout(struct vmw_private *dev_priv, 2015 unsigned int num_rects, struct drm_rect *rects) 2016 { 2017 struct drm_device *dev = dev_priv->dev; 2018 struct vmw_display_unit *du; 2019 struct drm_connector *con; 2020 struct drm_connector_list_iter conn_iter; 2021 struct drm_modeset_acquire_ctx ctx; 2022 struct drm_crtc *crtc; 2023 int ret; 2024 2025 /* Currently gui_x/y is protected with the crtc mutex */ 2026 mutex_lock(&dev->mode_config.mutex); 2027 drm_modeset_acquire_init(&ctx, 0); 2028 retry: 2029 drm_for_each_crtc(crtc, dev) { 2030 ret = drm_modeset_lock(&crtc->mutex, &ctx); 2031 if (ret < 0) { 2032 if (ret == -EDEADLK) { 2033 drm_modeset_backoff(&ctx); 2034 goto retry; 2035 } 2036 goto out_fini; 2037 } 2038 } 2039 2040 drm_connector_list_iter_begin(dev, &conn_iter); 2041 drm_for_each_connector_iter(con, &conn_iter) { 2042 du = vmw_connector_to_du(con); 2043 if (num_rects > du->unit) { 2044 du->pref_width = drm_rect_width(&rects[du->unit]); 2045 du->pref_height = drm_rect_height(&rects[du->unit]); 2046 du->pref_active = true; 2047 du->gui_x = rects[du->unit].x1; 2048 du->gui_y = rects[du->unit].y1; 2049 } else { 2050 du->pref_width = 800; 2051 du->pref_height = 600; 2052 du->pref_active = false; 2053 du->gui_x = 0; 2054 du->gui_y = 0; 2055 } 2056 } 2057 drm_connector_list_iter_end(&conn_iter); 2058 2059 list_for_each_entry(con, &dev->mode_config.connector_list, head) { 2060 du = vmw_connector_to_du(con); 2061 if (num_rects > du->unit) { 2062 drm_object_property_set_value 2063 (&con->base, dev->mode_config.suggested_x_property, 2064 du->gui_x); 2065 drm_object_property_set_value 2066 (&con->base, dev->mode_config.suggested_y_property, 2067 du->gui_y); 2068 } else { 2069 drm_object_property_set_value 2070 (&con->base, dev->mode_config.suggested_x_property, 2071 0); 2072 drm_object_property_set_value 2073 (&con->base, dev->mode_config.suggested_y_property, 2074 0); 2075 } 2076 con->status = vmw_du_connector_detect(con, true); 2077 } 2078 2079 drm_sysfs_hotplug_event(dev); 2080 out_fini: 2081 drm_modeset_drop_locks(&ctx); 2082 drm_modeset_acquire_fini(&ctx); 2083 mutex_unlock(&dev->mode_config.mutex); 2084 2085 return 0; 2086 } 2087 2088 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 2089 u16 *r, u16 *g, u16 *b, 2090 uint32_t size, 2091 struct drm_modeset_acquire_ctx *ctx) 2092 { 2093 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 2094 int i; 2095 2096 for (i = 0; i < size; i++) { 2097 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i, 2098 r[i], g[i], b[i]); 2099 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8); 2100 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); 2101 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); 2102 } 2103 2104 return 0; 2105 } 2106 2107 int vmw_du_connector_dpms(struct drm_connector *connector, int mode) 2108 { 2109 return 0; 2110 } 2111 2112 enum drm_connector_status 2113 vmw_du_connector_detect(struct drm_connector *connector, bool force) 2114 { 2115 uint32_t num_displays; 2116 struct drm_device *dev = connector->dev; 2117 struct vmw_private *dev_priv = vmw_priv(dev); 2118 struct vmw_display_unit *du = vmw_connector_to_du(connector); 2119 2120 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); 2121 2122 return ((vmw_connector_to_du(connector)->unit < num_displays && 2123 du->pref_active) ? 2124 connector_status_connected : connector_status_disconnected); 2125 } 2126 2127 static struct drm_display_mode vmw_kms_connector_builtin[] = { 2128 /* 640x480@60Hz */ 2129 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 2130 752, 800, 0, 480, 489, 492, 525, 0, 2131 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2132 /* 800x600@60Hz */ 2133 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 2134 968, 1056, 0, 600, 601, 605, 628, 0, 2135 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2136 /* 1024x768@60Hz */ 2137 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 2138 1184, 1344, 0, 768, 771, 777, 806, 0, 2139 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2140 /* 1152x864@75Hz */ 2141 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 2142 1344, 1600, 0, 864, 865, 868, 900, 0, 2143 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2144 /* 1280x768@60Hz */ 2145 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 2146 1472, 1664, 0, 768, 771, 778, 798, 0, 2147 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2148 /* 1280x800@60Hz */ 2149 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, 2150 1480, 1680, 0, 800, 803, 809, 831, 0, 2151 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2152 /* 1280x960@60Hz */ 2153 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, 2154 1488, 1800, 0, 960, 961, 964, 1000, 0, 2155 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2156 /* 1280x1024@60Hz */ 2157 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, 2158 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, 2159 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2160 /* 1360x768@60Hz */ 2161 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, 2162 1536, 1792, 0, 768, 771, 777, 795, 0, 2163 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2164 /* 1440x1050@60Hz */ 2165 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, 2166 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, 2167 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2168 /* 1440x900@60Hz */ 2169 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, 2170 1672, 1904, 0, 900, 903, 909, 934, 0, 2171 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2172 /* 1600x1200@60Hz */ 2173 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, 2174 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, 2175 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2176 /* 1680x1050@60Hz */ 2177 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, 2178 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, 2179 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2180 /* 1792x1344@60Hz */ 2181 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, 2182 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, 2183 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2184 /* 1853x1392@60Hz */ 2185 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 2186 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, 2187 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2188 /* 1920x1200@60Hz */ 2189 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 2190 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, 2191 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2192 /* 1920x1440@60Hz */ 2193 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 2194 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, 2195 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2196 /* 2560x1600@60Hz */ 2197 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 2198 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, 2199 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2200 /* Terminate */ 2201 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, 2202 }; 2203 2204 /** 2205 * vmw_guess_mode_timing - Provide fake timings for a 2206 * 60Hz vrefresh mode. 2207 * 2208 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay 2209 * members filled in. 2210 */ 2211 void vmw_guess_mode_timing(struct drm_display_mode *mode) 2212 { 2213 mode->hsync_start = mode->hdisplay + 50; 2214 mode->hsync_end = mode->hsync_start + 50; 2215 mode->htotal = mode->hsync_end + 50; 2216 2217 mode->vsync_start = mode->vdisplay + 50; 2218 mode->vsync_end = mode->vsync_start + 50; 2219 mode->vtotal = mode->vsync_end + 50; 2220 2221 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6; 2222 mode->vrefresh = drm_mode_vrefresh(mode); 2223 } 2224 2225 2226 int vmw_du_connector_fill_modes(struct drm_connector *connector, 2227 uint32_t max_width, uint32_t max_height) 2228 { 2229 struct vmw_display_unit *du = vmw_connector_to_du(connector); 2230 struct drm_device *dev = connector->dev; 2231 struct vmw_private *dev_priv = vmw_priv(dev); 2232 struct drm_display_mode *mode = NULL; 2233 struct drm_display_mode *bmode; 2234 struct drm_display_mode prefmode = { DRM_MODE("preferred", 2235 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 2236 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2237 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) 2238 }; 2239 int i; 2240 u32 assumed_bpp = 4; 2241 2242 if (dev_priv->assume_16bpp) 2243 assumed_bpp = 2; 2244 2245 max_width = min(max_width, dev_priv->texture_max_width); 2246 max_height = min(max_height, dev_priv->texture_max_height); 2247 2248 /* 2249 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/ 2250 * HEIGHT registers. 2251 */ 2252 if (dev_priv->active_display_unit == vmw_du_screen_target) { 2253 max_width = min(max_width, dev_priv->stdu_max_width); 2254 max_height = min(max_height, dev_priv->stdu_max_height); 2255 } 2256 2257 /* Add preferred mode */ 2258 mode = drm_mode_duplicate(dev, &prefmode); 2259 if (!mode) 2260 return 0; 2261 mode->hdisplay = du->pref_width; 2262 mode->vdisplay = du->pref_height; 2263 vmw_guess_mode_timing(mode); 2264 2265 if (vmw_kms_validate_mode_vram(dev_priv, 2266 mode->hdisplay * assumed_bpp, 2267 mode->vdisplay)) { 2268 drm_mode_probed_add(connector, mode); 2269 } else { 2270 drm_mode_destroy(dev, mode); 2271 mode = NULL; 2272 } 2273 2274 if (du->pref_mode) { 2275 list_del_init(&du->pref_mode->head); 2276 drm_mode_destroy(dev, du->pref_mode); 2277 } 2278 2279 /* mode might be null here, this is intended */ 2280 du->pref_mode = mode; 2281 2282 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { 2283 bmode = &vmw_kms_connector_builtin[i]; 2284 if (bmode->hdisplay > max_width || 2285 bmode->vdisplay > max_height) 2286 continue; 2287 2288 if (!vmw_kms_validate_mode_vram(dev_priv, 2289 bmode->hdisplay * assumed_bpp, 2290 bmode->vdisplay)) 2291 continue; 2292 2293 mode = drm_mode_duplicate(dev, bmode); 2294 if (!mode) 2295 return 0; 2296 mode->vrefresh = drm_mode_vrefresh(mode); 2297 2298 drm_mode_probed_add(connector, mode); 2299 } 2300 2301 drm_connector_list_update(connector); 2302 /* Move the prefered mode first, help apps pick the right mode. */ 2303 drm_mode_sort(&connector->modes); 2304 2305 return 1; 2306 } 2307 2308 /** 2309 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl 2310 * @dev: drm device for the ioctl 2311 * @data: data pointer for the ioctl 2312 * @file_priv: drm file for the ioctl call 2313 * 2314 * Update preferred topology of display unit as per ioctl request. The topology 2315 * is expressed as array of drm_vmw_rect. 2316 * e.g. 2317 * [0 0 640 480] [640 0 800 600] [0 480 640 480] 2318 * 2319 * NOTE: 2320 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside 2321 * device limit on topology, x + w and y + h (lower right) cannot be greater 2322 * than INT_MAX. So topology beyond these limits will return with error. 2323 * 2324 * Returns: 2325 * Zero on success, negative errno on failure. 2326 */ 2327 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 2328 struct drm_file *file_priv) 2329 { 2330 struct vmw_private *dev_priv = vmw_priv(dev); 2331 struct drm_mode_config *mode_config = &dev->mode_config; 2332 struct drm_vmw_update_layout_arg *arg = 2333 (struct drm_vmw_update_layout_arg *)data; 2334 void __user *user_rects; 2335 struct drm_vmw_rect *rects; 2336 struct drm_rect *drm_rects; 2337 unsigned rects_size; 2338 int ret, i; 2339 2340 if (!arg->num_outputs) { 2341 struct drm_rect def_rect = {0, 0, 800, 600}; 2342 vmw_du_update_layout(dev_priv, 1, &def_rect); 2343 return 0; 2344 } 2345 2346 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 2347 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), 2348 GFP_KERNEL); 2349 if (unlikely(!rects)) 2350 return -ENOMEM; 2351 2352 user_rects = (void __user *)(unsigned long)arg->rects; 2353 ret = copy_from_user(rects, user_rects, rects_size); 2354 if (unlikely(ret != 0)) { 2355 DRM_ERROR("Failed to get rects.\n"); 2356 ret = -EFAULT; 2357 goto out_free; 2358 } 2359 2360 drm_rects = (struct drm_rect *)rects; 2361 2362 for (i = 0; i < arg->num_outputs; i++) { 2363 struct drm_vmw_rect curr_rect; 2364 2365 /* Verify user-space for overflow as kernel use drm_rect */ 2366 if ((rects[i].x + rects[i].w > INT_MAX) || 2367 (rects[i].y + rects[i].h > INT_MAX)) { 2368 ret = -ERANGE; 2369 goto out_free; 2370 } 2371 2372 curr_rect = rects[i]; 2373 drm_rects[i].x1 = curr_rect.x; 2374 drm_rects[i].y1 = curr_rect.y; 2375 drm_rects[i].x2 = curr_rect.x + curr_rect.w; 2376 drm_rects[i].y2 = curr_rect.y + curr_rect.h; 2377 2378 /* 2379 * Currently this check is limiting the topology within 2380 * mode_config->max (which actually is max texture size 2381 * supported by virtual device). This limit is here to address 2382 * window managers that create a big framebuffer for whole 2383 * topology. 2384 */ 2385 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 || 2386 drm_rects[i].x2 > mode_config->max_width || 2387 drm_rects[i].y2 > mode_config->max_height) { 2388 DRM_ERROR("Invalid GUI layout.\n"); 2389 ret = -EINVAL; 2390 goto out_free; 2391 } 2392 } 2393 2394 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); 2395 2396 if (ret == 0) 2397 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects); 2398 2399 out_free: 2400 kfree(rects); 2401 return ret; 2402 } 2403 2404 /** 2405 * vmw_kms_helper_dirty - Helper to build commands and perform actions based 2406 * on a set of cliprects and a set of display units. 2407 * 2408 * @dev_priv: Pointer to a device private structure. 2409 * @framebuffer: Pointer to the framebuffer on which to perform the actions. 2410 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL. 2411 * Cliprects are given in framebuffer coordinates. 2412 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must 2413 * be NULL. Cliprects are given in source coordinates. 2414 * @dest_x: X coordinate offset for the crtc / destination clip rects. 2415 * @dest_y: Y coordinate offset for the crtc / destination clip rects. 2416 * @num_clips: Number of cliprects in the @clips or @vclips array. 2417 * @increment: Integer with which to increment the clip counter when looping. 2418 * Used to skip a predetermined number of clip rects. 2419 * @dirty: Closure structure. See the description of struct vmw_kms_dirty. 2420 */ 2421 int vmw_kms_helper_dirty(struct vmw_private *dev_priv, 2422 struct vmw_framebuffer *framebuffer, 2423 const struct drm_clip_rect *clips, 2424 const struct drm_vmw_rect *vclips, 2425 s32 dest_x, s32 dest_y, 2426 int num_clips, 2427 int increment, 2428 struct vmw_kms_dirty *dirty) 2429 { 2430 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 2431 struct drm_crtc *crtc; 2432 u32 num_units = 0; 2433 u32 i, k; 2434 2435 dirty->dev_priv = dev_priv; 2436 2437 /* If crtc is passed, no need to iterate over other display units */ 2438 if (dirty->crtc) { 2439 units[num_units++] = vmw_crtc_to_du(dirty->crtc); 2440 } else { 2441 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, 2442 head) { 2443 struct drm_plane *plane = crtc->primary; 2444 2445 if (plane->state->fb == &framebuffer->base) 2446 units[num_units++] = vmw_crtc_to_du(crtc); 2447 } 2448 } 2449 2450 for (k = 0; k < num_units; k++) { 2451 struct vmw_display_unit *unit = units[k]; 2452 s32 crtc_x = unit->crtc.x; 2453 s32 crtc_y = unit->crtc.y; 2454 s32 crtc_width = unit->crtc.mode.hdisplay; 2455 s32 crtc_height = unit->crtc.mode.vdisplay; 2456 const struct drm_clip_rect *clips_ptr = clips; 2457 const struct drm_vmw_rect *vclips_ptr = vclips; 2458 2459 dirty->unit = unit; 2460 if (dirty->fifo_reserve_size > 0) { 2461 dirty->cmd = VMW_FIFO_RESERVE(dev_priv, 2462 dirty->fifo_reserve_size); 2463 if (!dirty->cmd) 2464 return -ENOMEM; 2465 2466 memset(dirty->cmd, 0, dirty->fifo_reserve_size); 2467 } 2468 dirty->num_hits = 0; 2469 for (i = 0; i < num_clips; i++, clips_ptr += increment, 2470 vclips_ptr += increment) { 2471 s32 clip_left; 2472 s32 clip_top; 2473 2474 /* 2475 * Select clip array type. Note that integer type 2476 * in @clips is unsigned short, whereas in @vclips 2477 * it's 32-bit. 2478 */ 2479 if (clips) { 2480 dirty->fb_x = (s32) clips_ptr->x1; 2481 dirty->fb_y = (s32) clips_ptr->y1; 2482 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x - 2483 crtc_x; 2484 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y - 2485 crtc_y; 2486 } else { 2487 dirty->fb_x = vclips_ptr->x; 2488 dirty->fb_y = vclips_ptr->y; 2489 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w + 2490 dest_x - crtc_x; 2491 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h + 2492 dest_y - crtc_y; 2493 } 2494 2495 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x; 2496 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y; 2497 2498 /* Skip this clip if it's outside the crtc region */ 2499 if (dirty->unit_x1 >= crtc_width || 2500 dirty->unit_y1 >= crtc_height || 2501 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0) 2502 continue; 2503 2504 /* Clip right and bottom to crtc limits */ 2505 dirty->unit_x2 = min_t(s32, dirty->unit_x2, 2506 crtc_width); 2507 dirty->unit_y2 = min_t(s32, dirty->unit_y2, 2508 crtc_height); 2509 2510 /* Clip left and top to crtc limits */ 2511 clip_left = min_t(s32, dirty->unit_x1, 0); 2512 clip_top = min_t(s32, dirty->unit_y1, 0); 2513 dirty->unit_x1 -= clip_left; 2514 dirty->unit_y1 -= clip_top; 2515 dirty->fb_x -= clip_left; 2516 dirty->fb_y -= clip_top; 2517 2518 dirty->clip(dirty); 2519 } 2520 2521 dirty->fifo_commit(dirty); 2522 } 2523 2524 return 0; 2525 } 2526 2527 /** 2528 * vmw_kms_helper_validation_finish - Helper for post KMS command submission 2529 * cleanup and fencing 2530 * @dev_priv: Pointer to the device-private struct 2531 * @file_priv: Pointer identifying the client when user-space fencing is used 2532 * @ctx: Pointer to the validation context 2533 * @out_fence: If non-NULL, returned refcounted fence-pointer 2534 * @user_fence_rep: If non-NULL, pointer to user-space address area 2535 * in which to copy user-space fence info 2536 */ 2537 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, 2538 struct drm_file *file_priv, 2539 struct vmw_validation_context *ctx, 2540 struct vmw_fence_obj **out_fence, 2541 struct drm_vmw_fence_rep __user * 2542 user_fence_rep) 2543 { 2544 struct vmw_fence_obj *fence = NULL; 2545 uint32_t handle = 0; 2546 int ret = 0; 2547 2548 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || 2549 out_fence) 2550 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, 2551 file_priv ? &handle : NULL); 2552 vmw_validation_done(ctx, fence); 2553 if (file_priv) 2554 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), 2555 ret, user_fence_rep, fence, 2556 handle, -1, NULL); 2557 if (out_fence) 2558 *out_fence = fence; 2559 else 2560 vmw_fence_obj_unreference(&fence); 2561 } 2562 2563 /** 2564 * vmw_kms_update_proxy - Helper function to update a proxy surface from 2565 * its backing MOB. 2566 * 2567 * @res: Pointer to the surface resource 2568 * @clips: Clip rects in framebuffer (surface) space. 2569 * @num_clips: Number of clips in @clips. 2570 * @increment: Integer with which to increment the clip counter when looping. 2571 * Used to skip a predetermined number of clip rects. 2572 * 2573 * This function makes sure the proxy surface is updated from its backing MOB 2574 * using the region given by @clips. The surface resource @res and its backing 2575 * MOB needs to be reserved and validated on call. 2576 */ 2577 int vmw_kms_update_proxy(struct vmw_resource *res, 2578 const struct drm_clip_rect *clips, 2579 unsigned num_clips, 2580 int increment) 2581 { 2582 struct vmw_private *dev_priv = res->dev_priv; 2583 struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size; 2584 struct { 2585 SVGA3dCmdHeader header; 2586 SVGA3dCmdUpdateGBImage body; 2587 } *cmd; 2588 SVGA3dBox *box; 2589 size_t copy_size = 0; 2590 int i; 2591 2592 if (!clips) 2593 return 0; 2594 2595 cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips); 2596 if (!cmd) 2597 return -ENOMEM; 2598 2599 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) { 2600 box = &cmd->body.box; 2601 2602 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; 2603 cmd->header.size = sizeof(cmd->body); 2604 cmd->body.image.sid = res->id; 2605 cmd->body.image.face = 0; 2606 cmd->body.image.mipmap = 0; 2607 2608 if (clips->x1 > size->width || clips->x2 > size->width || 2609 clips->y1 > size->height || clips->y2 > size->height) { 2610 DRM_ERROR("Invalid clips outsize of framebuffer.\n"); 2611 return -EINVAL; 2612 } 2613 2614 box->x = clips->x1; 2615 box->y = clips->y1; 2616 box->z = 0; 2617 box->w = clips->x2 - clips->x1; 2618 box->h = clips->y2 - clips->y1; 2619 box->d = 1; 2620 2621 copy_size += sizeof(*cmd); 2622 } 2623 2624 vmw_fifo_commit(dev_priv, copy_size); 2625 2626 return 0; 2627 } 2628 2629 int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, 2630 unsigned unit, 2631 u32 max_width, 2632 u32 max_height, 2633 struct drm_connector **p_con, 2634 struct drm_crtc **p_crtc, 2635 struct drm_display_mode **p_mode) 2636 { 2637 struct drm_connector *con; 2638 struct vmw_display_unit *du; 2639 struct drm_display_mode *mode; 2640 int i = 0; 2641 int ret = 0; 2642 2643 mutex_lock(&dev_priv->dev->mode_config.mutex); 2644 list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list, 2645 head) { 2646 if (i == unit) 2647 break; 2648 2649 ++i; 2650 } 2651 2652 if (i != unit) { 2653 DRM_ERROR("Could not find initial display unit.\n"); 2654 ret = -EINVAL; 2655 goto out_unlock; 2656 } 2657 2658 if (list_empty(&con->modes)) 2659 (void) vmw_du_connector_fill_modes(con, max_width, max_height); 2660 2661 if (list_empty(&con->modes)) { 2662 DRM_ERROR("Could not find initial display mode.\n"); 2663 ret = -EINVAL; 2664 goto out_unlock; 2665 } 2666 2667 du = vmw_connector_to_du(con); 2668 *p_con = con; 2669 *p_crtc = &du->crtc; 2670 2671 list_for_each_entry(mode, &con->modes, head) { 2672 if (mode->type & DRM_MODE_TYPE_PREFERRED) 2673 break; 2674 } 2675 2676 if (mode->type & DRM_MODE_TYPE_PREFERRED) 2677 *p_mode = mode; 2678 else { 2679 WARN_ONCE(true, "Could not find initial preferred mode.\n"); 2680 *p_mode = list_first_entry(&con->modes, 2681 struct drm_display_mode, 2682 head); 2683 } 2684 2685 out_unlock: 2686 mutex_unlock(&dev_priv->dev->mode_config.mutex); 2687 2688 return ret; 2689 } 2690 2691 /** 2692 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement 2693 * property. 2694 * 2695 * @dev_priv: Pointer to a device private struct. 2696 * 2697 * Sets up the implicit placement property unless it's already set up. 2698 */ 2699 void 2700 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv) 2701 { 2702 if (dev_priv->implicit_placement_property) 2703 return; 2704 2705 dev_priv->implicit_placement_property = 2706 drm_property_create_range(dev_priv->dev, 2707 DRM_MODE_PROP_IMMUTABLE, 2708 "implicit_placement", 0, 1); 2709 } 2710 2711 /** 2712 * vmw_kms_suspend - Save modesetting state and turn modesetting off. 2713 * 2714 * @dev: Pointer to the drm device 2715 * Return: 0 on success. Negative error code on failure. 2716 */ 2717 int vmw_kms_suspend(struct drm_device *dev) 2718 { 2719 struct vmw_private *dev_priv = vmw_priv(dev); 2720 2721 dev_priv->suspend_state = drm_atomic_helper_suspend(dev); 2722 if (IS_ERR(dev_priv->suspend_state)) { 2723 int ret = PTR_ERR(dev_priv->suspend_state); 2724 2725 DRM_ERROR("Failed kms suspend: %d\n", ret); 2726 dev_priv->suspend_state = NULL; 2727 2728 return ret; 2729 } 2730 2731 return 0; 2732 } 2733 2734 2735 /** 2736 * vmw_kms_resume - Re-enable modesetting and restore state 2737 * 2738 * @dev: Pointer to the drm device 2739 * Return: 0 on success. Negative error code on failure. 2740 * 2741 * State is resumed from a previous vmw_kms_suspend(). It's illegal 2742 * to call this function without a previous vmw_kms_suspend(). 2743 */ 2744 int vmw_kms_resume(struct drm_device *dev) 2745 { 2746 struct vmw_private *dev_priv = vmw_priv(dev); 2747 int ret; 2748 2749 if (WARN_ON(!dev_priv->suspend_state)) 2750 return 0; 2751 2752 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state); 2753 dev_priv->suspend_state = NULL; 2754 2755 return ret; 2756 } 2757 2758 /** 2759 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost 2760 * 2761 * @dev: Pointer to the drm device 2762 */ 2763 void vmw_kms_lost_device(struct drm_device *dev) 2764 { 2765 drm_atomic_helper_shutdown(dev); 2766 } 2767 2768 /** 2769 * vmw_du_helper_plane_update - Helper to do plane update on a display unit. 2770 * @update: The closure structure. 2771 * 2772 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane 2773 * update on display unit. 2774 * 2775 * Return: 0 on success or a negative error code on failure. 2776 */ 2777 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update) 2778 { 2779 struct drm_plane_state *state = update->plane->state; 2780 struct drm_plane_state *old_state = update->old_state; 2781 struct drm_atomic_helper_damage_iter iter; 2782 struct drm_rect clip; 2783 struct drm_rect bb; 2784 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 2785 uint32_t reserved_size = 0; 2786 uint32_t submit_size = 0; 2787 uint32_t curr_size = 0; 2788 uint32_t num_hits = 0; 2789 void *cmd_start; 2790 char *cmd_next; 2791 int ret; 2792 2793 /* 2794 * Iterate in advance to check if really need plane update and find the 2795 * number of clips that actually are in plane src for fifo allocation. 2796 */ 2797 drm_atomic_helper_damage_iter_init(&iter, old_state, state); 2798 drm_atomic_for_each_plane_damage(&iter, &clip) 2799 num_hits++; 2800 2801 if (num_hits == 0) 2802 return 0; 2803 2804 if (update->vfb->bo) { 2805 struct vmw_framebuffer_bo *vfbbo = 2806 container_of(update->vfb, typeof(*vfbbo), base); 2807 2808 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false, 2809 update->cpu_blit); 2810 } else { 2811 struct vmw_framebuffer_surface *vfbs = 2812 container_of(update->vfb, typeof(*vfbs), base); 2813 2814 ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res, 2815 0, VMW_RES_DIRTY_NONE, NULL, 2816 NULL); 2817 } 2818 2819 if (ret) 2820 return ret; 2821 2822 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr); 2823 if (ret) 2824 goto out_unref; 2825 2826 reserved_size = update->calc_fifo_size(update, num_hits); 2827 cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size); 2828 if (!cmd_start) { 2829 ret = -ENOMEM; 2830 goto out_revert; 2831 } 2832 2833 cmd_next = cmd_start; 2834 2835 if (update->post_prepare) { 2836 curr_size = update->post_prepare(update, cmd_next); 2837 cmd_next += curr_size; 2838 submit_size += curr_size; 2839 } 2840 2841 if (update->pre_clip) { 2842 curr_size = update->pre_clip(update, cmd_next, num_hits); 2843 cmd_next += curr_size; 2844 submit_size += curr_size; 2845 } 2846 2847 bb.x1 = INT_MAX; 2848 bb.y1 = INT_MAX; 2849 bb.x2 = INT_MIN; 2850 bb.y2 = INT_MIN; 2851 2852 drm_atomic_helper_damage_iter_init(&iter, old_state, state); 2853 drm_atomic_for_each_plane_damage(&iter, &clip) { 2854 uint32_t fb_x = clip.x1; 2855 uint32_t fb_y = clip.y1; 2856 2857 vmw_du_translate_to_crtc(state, &clip); 2858 if (update->clip) { 2859 curr_size = update->clip(update, cmd_next, &clip, fb_x, 2860 fb_y); 2861 cmd_next += curr_size; 2862 submit_size += curr_size; 2863 } 2864 bb.x1 = min_t(int, bb.x1, clip.x1); 2865 bb.y1 = min_t(int, bb.y1, clip.y1); 2866 bb.x2 = max_t(int, bb.x2, clip.x2); 2867 bb.y2 = max_t(int, bb.y2, clip.y2); 2868 } 2869 2870 curr_size = update->post_clip(update, cmd_next, &bb); 2871 submit_size += curr_size; 2872 2873 if (reserved_size < submit_size) 2874 submit_size = 0; 2875 2876 vmw_fifo_commit(update->dev_priv, submit_size); 2877 2878 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx, 2879 update->out_fence, NULL); 2880 return ret; 2881 2882 out_revert: 2883 vmw_validation_revert(&val_ctx); 2884 2885 out_unref: 2886 vmw_validation_unref_lists(&val_ctx); 2887 return ret; 2888 } 2889