1 /************************************************************************** 2 * 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_kms.h" 29 #include <drm/drm_plane_helper.h> 30 #include <drm/drm_atomic.h> 31 #include <drm/drm_atomic_helper.h> 32 #include <drm/drm_rect.h> 33 34 35 /* Might need a hrtimer here? */ 36 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 37 38 void vmw_du_cleanup(struct vmw_display_unit *du) 39 { 40 drm_plane_cleanup(&du->primary); 41 drm_plane_cleanup(&du->cursor); 42 43 drm_connector_unregister(&du->connector); 44 drm_crtc_cleanup(&du->crtc); 45 drm_encoder_cleanup(&du->encoder); 46 drm_connector_cleanup(&du->connector); 47 } 48 49 /* 50 * Display Unit Cursor functions 51 */ 52 53 static int vmw_cursor_update_image(struct vmw_private *dev_priv, 54 u32 *image, u32 width, u32 height, 55 u32 hotspotX, u32 hotspotY) 56 { 57 struct { 58 u32 cmd; 59 SVGAFifoCmdDefineAlphaCursor cursor; 60 } *cmd; 61 u32 image_size = width * height * 4; 62 u32 cmd_size = sizeof(*cmd) + image_size; 63 64 if (!image) 65 return -EINVAL; 66 67 cmd = vmw_fifo_reserve(dev_priv, cmd_size); 68 if (unlikely(cmd == NULL)) { 69 DRM_ERROR("Fifo reserve failed.\n"); 70 return -ENOMEM; 71 } 72 73 memset(cmd, 0, sizeof(*cmd)); 74 75 memcpy(&cmd[1], image, image_size); 76 77 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR; 78 cmd->cursor.id = 0; 79 cmd->cursor.width = width; 80 cmd->cursor.height = height; 81 cmd->cursor.hotspotX = hotspotX; 82 cmd->cursor.hotspotY = hotspotY; 83 84 vmw_fifo_commit_flush(dev_priv, cmd_size); 85 86 return 0; 87 } 88 89 static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, 90 struct vmw_dma_buffer *dmabuf, 91 u32 width, u32 height, 92 u32 hotspotX, u32 hotspotY) 93 { 94 struct ttm_bo_kmap_obj map; 95 unsigned long kmap_offset; 96 unsigned long kmap_num; 97 void *virtual; 98 bool dummy; 99 int ret; 100 101 kmap_offset = 0; 102 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; 103 104 ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL); 105 if (unlikely(ret != 0)) { 106 DRM_ERROR("reserve failed\n"); 107 return -EINVAL; 108 } 109 110 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map); 111 if (unlikely(ret != 0)) 112 goto err_unreserve; 113 114 virtual = ttm_kmap_obj_virtual(&map, &dummy); 115 ret = vmw_cursor_update_image(dev_priv, virtual, width, height, 116 hotspotX, hotspotY); 117 118 ttm_bo_kunmap(&map); 119 err_unreserve: 120 ttm_bo_unreserve(&dmabuf->base); 121 122 return ret; 123 } 124 125 126 static void vmw_cursor_update_position(struct vmw_private *dev_priv, 127 bool show, int x, int y) 128 { 129 u32 *fifo_mem = dev_priv->mmio_virt; 130 uint32_t count; 131 132 spin_lock(&dev_priv->cursor_lock); 133 vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); 134 vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X); 135 vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y); 136 count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT); 137 vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); 138 spin_unlock(&dev_priv->cursor_lock); 139 } 140 141 142 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 143 struct ttm_object_file *tfile, 144 struct ttm_buffer_object *bo, 145 SVGA3dCmdHeader *header) 146 { 147 struct ttm_bo_kmap_obj map; 148 unsigned long kmap_offset; 149 unsigned long kmap_num; 150 SVGA3dCopyBox *box; 151 unsigned box_count; 152 void *virtual; 153 bool dummy; 154 struct vmw_dma_cmd { 155 SVGA3dCmdHeader header; 156 SVGA3dCmdSurfaceDMA dma; 157 } *cmd; 158 int i, ret; 159 160 cmd = container_of(header, struct vmw_dma_cmd, header); 161 162 /* No snooper installed */ 163 if (!srf->snooper.image) 164 return; 165 166 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { 167 DRM_ERROR("face and mipmap for cursors should never != 0\n"); 168 return; 169 } 170 171 if (cmd->header.size < 64) { 172 DRM_ERROR("at least one full copy box must be given\n"); 173 return; 174 } 175 176 box = (SVGA3dCopyBox *)&cmd[1]; 177 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / 178 sizeof(SVGA3dCopyBox); 179 180 if (cmd->dma.guest.ptr.offset % PAGE_SIZE || 181 box->x != 0 || box->y != 0 || box->z != 0 || 182 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || 183 box->d != 1 || box_count != 1) { 184 /* TODO handle none page aligned offsets */ 185 /* TODO handle more dst & src != 0 */ 186 /* TODO handle more then one copy */ 187 DRM_ERROR("Cant snoop dma request for cursor!\n"); 188 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", 189 box->srcx, box->srcy, box->srcz, 190 box->x, box->y, box->z, 191 box->w, box->h, box->d, box_count, 192 cmd->dma.guest.ptr.offset); 193 return; 194 } 195 196 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; 197 kmap_num = (64*64*4) >> PAGE_SHIFT; 198 199 ret = ttm_bo_reserve(bo, true, false, NULL); 200 if (unlikely(ret != 0)) { 201 DRM_ERROR("reserve failed\n"); 202 return; 203 } 204 205 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 206 if (unlikely(ret != 0)) 207 goto err_unreserve; 208 209 virtual = ttm_kmap_obj_virtual(&map, &dummy); 210 211 if (box->w == 64 && cmd->dma.guest.pitch == 64*4) { 212 memcpy(srf->snooper.image, virtual, 64*64*4); 213 } else { 214 /* Image is unsigned pointer. */ 215 for (i = 0; i < box->h; i++) 216 memcpy(srf->snooper.image + i * 64, 217 virtual + i * cmd->dma.guest.pitch, 218 box->w * 4); 219 } 220 221 srf->snooper.age++; 222 223 ttm_bo_kunmap(&map); 224 err_unreserve: 225 ttm_bo_unreserve(bo); 226 } 227 228 /** 229 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots 230 * 231 * @dev_priv: Pointer to the device private struct. 232 * 233 * Clears all legacy hotspots. 234 */ 235 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv) 236 { 237 struct drm_device *dev = dev_priv->dev; 238 struct vmw_display_unit *du; 239 struct drm_crtc *crtc; 240 241 drm_modeset_lock_all(dev); 242 drm_for_each_crtc(crtc, dev) { 243 du = vmw_crtc_to_du(crtc); 244 245 du->hotspot_x = 0; 246 du->hotspot_y = 0; 247 } 248 drm_modeset_unlock_all(dev); 249 } 250 251 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) 252 { 253 struct drm_device *dev = dev_priv->dev; 254 struct vmw_display_unit *du; 255 struct drm_crtc *crtc; 256 257 mutex_lock(&dev->mode_config.mutex); 258 259 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 260 du = vmw_crtc_to_du(crtc); 261 if (!du->cursor_surface || 262 du->cursor_age == du->cursor_surface->snooper.age) 263 continue; 264 265 du->cursor_age = du->cursor_surface->snooper.age; 266 vmw_cursor_update_image(dev_priv, 267 du->cursor_surface->snooper.image, 268 64, 64, 269 du->hotspot_x + du->core_hotspot_x, 270 du->hotspot_y + du->core_hotspot_y); 271 } 272 273 mutex_unlock(&dev->mode_config.mutex); 274 } 275 276 277 278 /** 279 * vmw_du_cursor_plane_update() - Update cursor image and location 280 * 281 * @plane: plane object to update 282 * @crtc: owning CRTC of @plane 283 * @fb: framebuffer to flip onto plane 284 * @crtc_x: x offset of plane on crtc 285 * @crtc_y: y offset of plane on crtc 286 * @crtc_w: width of plane rectangle on crtc 287 * @crtc_h: height of plane rectangle on crtc 288 * @src_x: Not used 289 * @src_y: Not used 290 * @src_w: Not used 291 * @src_h: Not used 292 * 293 * 294 * RETURNS: 295 * Zero on success, error code on failure 296 */ 297 int vmw_du_cursor_plane_update(struct drm_plane *plane, 298 struct drm_crtc *crtc, 299 struct drm_framebuffer *fb, 300 int crtc_x, int crtc_y, 301 unsigned int crtc_w, 302 unsigned int crtc_h, 303 uint32_t src_x, uint32_t src_y, 304 uint32_t src_w, uint32_t src_h) 305 { 306 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 307 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 308 struct vmw_surface *surface = NULL; 309 struct vmw_dma_buffer *dmabuf = NULL; 310 s32 hotspot_x, hotspot_y; 311 int ret; 312 313 hotspot_x = du->hotspot_x + fb->hot_x; 314 hotspot_y = du->hotspot_y + fb->hot_y; 315 316 /* A lot of the code assumes this */ 317 if (crtc_w != 64 || crtc_h != 64) { 318 ret = -EINVAL; 319 goto out; 320 } 321 322 if (vmw_framebuffer_to_vfb(fb)->dmabuf) 323 dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer; 324 else 325 surface = vmw_framebuffer_to_vfbs(fb)->surface; 326 327 if (surface && !surface->snooper.image) { 328 DRM_ERROR("surface not suitable for cursor\n"); 329 ret = -EINVAL; 330 goto out; 331 } 332 333 /* setup new image */ 334 ret = 0; 335 if (surface) { 336 /* vmw_user_surface_lookup takes one reference */ 337 du->cursor_surface = surface; 338 339 du->cursor_age = du->cursor_surface->snooper.age; 340 341 ret = vmw_cursor_update_image(dev_priv, surface->snooper.image, 342 64, 64, hotspot_x, hotspot_y); 343 } else if (dmabuf) { 344 /* vmw_user_surface_lookup takes one reference */ 345 du->cursor_dmabuf = dmabuf; 346 347 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h, 348 hotspot_x, hotspot_y); 349 } else { 350 vmw_cursor_update_position(dev_priv, false, 0, 0); 351 goto out; 352 } 353 354 if (!ret) { 355 du->cursor_x = crtc_x + du->set_gui_x; 356 du->cursor_y = crtc_y + du->set_gui_y; 357 358 vmw_cursor_update_position(dev_priv, true, 359 du->cursor_x + hotspot_x, 360 du->cursor_y + hotspot_y); 361 } 362 363 out: 364 return ret; 365 } 366 367 368 int vmw_du_cursor_plane_disable(struct drm_plane *plane) 369 { 370 if (plane->fb) { 371 drm_framebuffer_unreference(plane->fb); 372 plane->fb = NULL; 373 } 374 375 return -EINVAL; 376 } 377 378 379 void vmw_du_cursor_plane_destroy(struct drm_plane *plane) 380 { 381 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); 382 383 drm_plane_cleanup(plane); 384 } 385 386 387 void vmw_du_primary_plane_destroy(struct drm_plane *plane) 388 { 389 drm_plane_cleanup(plane); 390 391 /* Planes are static in our case so we don't free it */ 392 } 393 394 395 /** 396 * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface 397 * 398 * @vps: plane state associated with the display surface 399 * @unreference: true if we also want to unreference the display. 400 */ 401 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps, 402 bool unreference) 403 { 404 if (vps->surf) { 405 if (vps->pinned) { 406 vmw_resource_unpin(&vps->surf->res); 407 vps->pinned--; 408 } 409 410 if (unreference) { 411 if (vps->pinned) 412 DRM_ERROR("Surface still pinned\n"); 413 vmw_surface_unreference(&vps->surf); 414 } 415 } 416 } 417 418 419 /** 420 * vmw_du_plane_cleanup_fb - Unpins the cursor 421 * 422 * @plane: display plane 423 * @old_state: Contains the FB to clean up 424 * 425 * Unpins the framebuffer surface 426 * 427 * Returns 0 on success 428 */ 429 void 430 vmw_du_plane_cleanup_fb(struct drm_plane *plane, 431 struct drm_plane_state *old_state) 432 { 433 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); 434 435 vmw_du_plane_unpin_surf(vps, false); 436 } 437 438 439 /** 440 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it 441 * 442 * @plane: display plane 443 * @new_state: info on the new plane state, including the FB 444 * 445 * Returns 0 on success 446 */ 447 int 448 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, 449 struct drm_plane_state *new_state) 450 { 451 struct drm_framebuffer *fb = new_state->fb; 452 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); 453 454 455 if (vps->surf) 456 vmw_surface_unreference(&vps->surf); 457 458 if (vps->dmabuf) 459 vmw_dmabuf_unreference(&vps->dmabuf); 460 461 if (fb) { 462 if (vmw_framebuffer_to_vfb(fb)->dmabuf) { 463 vps->dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer; 464 vmw_dmabuf_reference(vps->dmabuf); 465 } else { 466 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface; 467 vmw_surface_reference(vps->surf); 468 } 469 } 470 471 return 0; 472 } 473 474 475 void 476 vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane, 477 struct drm_plane_state *old_state) 478 { 479 struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; 480 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 481 482 drm_atomic_set_fb_for_plane(plane->state, NULL); 483 vmw_cursor_update_position(dev_priv, false, 0, 0); 484 } 485 486 487 void 488 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, 489 struct drm_plane_state *old_state) 490 { 491 struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; 492 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 493 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 494 struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state); 495 s32 hotspot_x, hotspot_y; 496 int ret = 0; 497 498 499 hotspot_x = du->hotspot_x; 500 hotspot_y = du->hotspot_y; 501 du->cursor_surface = vps->surf; 502 du->cursor_dmabuf = vps->dmabuf; 503 504 /* setup new image */ 505 if (vps->surf) { 506 du->cursor_age = du->cursor_surface->snooper.age; 507 508 ret = vmw_cursor_update_image(dev_priv, 509 vps->surf->snooper.image, 510 64, 64, hotspot_x, hotspot_y); 511 } else if (vps->dmabuf) { 512 ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf, 513 plane->state->crtc_w, 514 plane->state->crtc_h, 515 hotspot_x, hotspot_y); 516 } else { 517 vmw_cursor_update_position(dev_priv, false, 0, 0); 518 return; 519 } 520 521 if (!ret) { 522 du->cursor_x = plane->state->crtc_x + du->set_gui_x; 523 du->cursor_y = plane->state->crtc_y + du->set_gui_y; 524 525 vmw_cursor_update_position(dev_priv, true, 526 du->cursor_x + hotspot_x, 527 du->cursor_y + hotspot_y); 528 } else { 529 DRM_ERROR("Failed to update cursor image\n"); 530 } 531 } 532 533 534 /** 535 * vmw_du_primary_plane_atomic_check - check if the new state is okay 536 * 537 * @plane: display plane 538 * @state: info on the new plane state, including the FB 539 * 540 * Check if the new state is settable given the current state. Other 541 * than what the atomic helper checks, we care about crtc fitting 542 * the FB and maintaining one active framebuffer. 543 * 544 * Returns 0 on success 545 */ 546 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, 547 struct drm_plane_state *state) 548 { 549 struct drm_framebuffer *new_fb = state->fb; 550 bool visible; 551 552 struct drm_rect src = { 553 .x1 = state->src_x, 554 .y1 = state->src_y, 555 .x2 = state->src_x + state->src_w, 556 .y2 = state->src_y + state->src_h, 557 }; 558 struct drm_rect dest = { 559 .x1 = state->crtc_x, 560 .y1 = state->crtc_y, 561 .x2 = state->crtc_x + state->crtc_w, 562 .y2 = state->crtc_y + state->crtc_h, 563 }; 564 struct drm_rect clip = dest; 565 int ret; 566 567 ret = drm_plane_helper_check_update(plane, state->crtc, new_fb, 568 &src, &dest, &clip, 569 DRM_ROTATE_0, 570 DRM_PLANE_HELPER_NO_SCALING, 571 DRM_PLANE_HELPER_NO_SCALING, 572 false, true, &visible); 573 574 575 if (!ret && new_fb) { 576 struct drm_crtc *crtc = state->crtc; 577 struct vmw_connector_state *vcs; 578 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 579 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 580 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb); 581 582 vcs = vmw_connector_state_to_vcs(du->connector.state); 583 584 if ((dest.x2 > new_fb->width || 585 dest.y2 > new_fb->height)) { 586 DRM_ERROR("CRTC area outside of framebuffer\n"); 587 return -EINVAL; 588 } 589 590 /* Only one active implicit framebuffer at a time. */ 591 mutex_lock(&dev_priv->global_kms_state_mutex); 592 if (vcs->is_implicit && dev_priv->implicit_fb && 593 !(dev_priv->num_implicit == 1 && du->active_implicit) 594 && dev_priv->implicit_fb != vfb) { 595 DRM_ERROR("Multiple implicit framebuffers " 596 "not supported.\n"); 597 ret = -EINVAL; 598 } 599 mutex_unlock(&dev_priv->global_kms_state_mutex); 600 } 601 602 603 return ret; 604 } 605 606 607 /** 608 * vmw_du_cursor_plane_atomic_check - check if the new state is okay 609 * 610 * @plane: cursor plane 611 * @state: info on the new plane state 612 * 613 * This is a chance to fail if the new cursor state does not fit 614 * our requirements. 615 * 616 * Returns 0 on success 617 */ 618 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, 619 struct drm_plane_state *new_state) 620 { 621 int ret = 0; 622 struct vmw_surface *surface = NULL; 623 struct drm_framebuffer *fb = new_state->fb; 624 625 626 /* Turning off */ 627 if (!fb) 628 return ret; 629 630 /* A lot of the code assumes this */ 631 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) { 632 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n", 633 new_state->crtc_w, new_state->crtc_h); 634 ret = -EINVAL; 635 } 636 637 if (!vmw_framebuffer_to_vfb(fb)->dmabuf) 638 surface = vmw_framebuffer_to_vfbs(fb)->surface; 639 640 if (surface && !surface->snooper.image) { 641 DRM_ERROR("surface not suitable for cursor\n"); 642 ret = -EINVAL; 643 } 644 645 return ret; 646 } 647 648 649 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, 650 struct drm_crtc_state *new_state) 651 { 652 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc); 653 int connector_mask = 1 << drm_connector_index(&du->connector); 654 bool has_primary = new_state->plane_mask & 655 BIT(drm_plane_index(crtc->primary)); 656 657 /* We always want to have an active plane with an active CRTC */ 658 if (has_primary != new_state->enable) 659 return -EINVAL; 660 661 662 if (new_state->connector_mask != connector_mask && 663 new_state->connector_mask != 0) { 664 DRM_ERROR("Invalid connectors configuration\n"); 665 return -EINVAL; 666 } 667 668 /* 669 * Our virtual device does not have a dot clock, so use the logical 670 * clock value as the dot clock. 671 */ 672 if (new_state->mode.crtc_clock == 0) 673 new_state->adjusted_mode.crtc_clock = new_state->mode.clock; 674 675 return 0; 676 } 677 678 679 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, 680 struct drm_crtc_state *old_crtc_state) 681 { 682 } 683 684 685 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, 686 struct drm_crtc_state *old_crtc_state) 687 { 688 struct drm_pending_vblank_event *event = crtc->state->event; 689 690 if (event) { 691 crtc->state->event = NULL; 692 693 spin_lock_irq(&crtc->dev->event_lock); 694 if (drm_crtc_vblank_get(crtc) == 0) 695 drm_crtc_arm_vblank_event(crtc, event); 696 else 697 drm_crtc_send_vblank_event(crtc, event); 698 spin_unlock_irq(&crtc->dev->event_lock); 699 } 700 701 } 702 703 704 /** 705 * vmw_du_crtc_duplicate_state - duplicate crtc state 706 * @crtc: DRM crtc 707 * 708 * Allocates and returns a copy of the crtc state (both common and 709 * vmw-specific) for the specified crtc. 710 * 711 * Returns: The newly allocated crtc state, or NULL on failure. 712 */ 713 struct drm_crtc_state * 714 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc) 715 { 716 struct drm_crtc_state *state; 717 struct vmw_crtc_state *vcs; 718 719 if (WARN_ON(!crtc->state)) 720 return NULL; 721 722 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL); 723 724 if (!vcs) 725 return NULL; 726 727 state = &vcs->base; 728 729 __drm_atomic_helper_crtc_duplicate_state(crtc, state); 730 731 return state; 732 } 733 734 735 /** 736 * vmw_du_crtc_reset - creates a blank vmw crtc state 737 * @crtc: DRM crtc 738 * 739 * Resets the atomic state for @crtc by freeing the state pointer (which 740 * might be NULL, e.g. at driver load time) and allocating a new empty state 741 * object. 742 */ 743 void vmw_du_crtc_reset(struct drm_crtc *crtc) 744 { 745 struct vmw_crtc_state *vcs; 746 747 748 if (crtc->state) { 749 __drm_atomic_helper_crtc_destroy_state(crtc->state); 750 751 kfree(vmw_crtc_state_to_vcs(crtc->state)); 752 } 753 754 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL); 755 756 if (!vcs) { 757 DRM_ERROR("Cannot allocate vmw_crtc_state\n"); 758 return; 759 } 760 761 crtc->state = &vcs->base; 762 crtc->state->crtc = crtc; 763 } 764 765 766 /** 767 * vmw_du_crtc_destroy_state - destroy crtc state 768 * @crtc: DRM crtc 769 * @state: state object to destroy 770 * 771 * Destroys the crtc state (both common and vmw-specific) for the 772 * specified plane. 773 */ 774 void 775 vmw_du_crtc_destroy_state(struct drm_crtc *crtc, 776 struct drm_crtc_state *state) 777 { 778 drm_atomic_helper_crtc_destroy_state(crtc, state); 779 } 780 781 782 /** 783 * vmw_du_plane_duplicate_state - duplicate plane state 784 * @plane: drm plane 785 * 786 * Allocates and returns a copy of the plane state (both common and 787 * vmw-specific) for the specified plane. 788 * 789 * Returns: The newly allocated plane state, or NULL on failure. 790 */ 791 struct drm_plane_state * 792 vmw_du_plane_duplicate_state(struct drm_plane *plane) 793 { 794 struct drm_plane_state *state; 795 struct vmw_plane_state *vps; 796 797 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL); 798 799 if (!vps) 800 return NULL; 801 802 vps->pinned = 0; 803 804 /* Mapping is managed by prepare_fb/cleanup_fb */ 805 memset(&vps->guest_map, 0, sizeof(vps->guest_map)); 806 memset(&vps->host_map, 0, sizeof(vps->host_map)); 807 vps->cpp = 0; 808 809 /* Each ref counted resource needs to be acquired again */ 810 if (vps->surf) 811 (void) vmw_surface_reference(vps->surf); 812 813 if (vps->dmabuf) 814 (void) vmw_dmabuf_reference(vps->dmabuf); 815 816 state = &vps->base; 817 818 __drm_atomic_helper_plane_duplicate_state(plane, state); 819 820 return state; 821 } 822 823 824 /** 825 * vmw_du_plane_reset - creates a blank vmw plane state 826 * @plane: drm plane 827 * 828 * Resets the atomic state for @plane by freeing the state pointer (which might 829 * be NULL, e.g. at driver load time) and allocating a new empty state object. 830 */ 831 void vmw_du_plane_reset(struct drm_plane *plane) 832 { 833 struct vmw_plane_state *vps; 834 835 836 if (plane->state) 837 vmw_du_plane_destroy_state(plane, plane->state); 838 839 vps = kzalloc(sizeof(*vps), GFP_KERNEL); 840 841 if (!vps) { 842 DRM_ERROR("Cannot allocate vmw_plane_state\n"); 843 return; 844 } 845 846 plane->state = &vps->base; 847 plane->state->plane = plane; 848 plane->state->rotation = DRM_ROTATE_0; 849 } 850 851 852 /** 853 * vmw_du_plane_destroy_state - destroy plane state 854 * @plane: DRM plane 855 * @state: state object to destroy 856 * 857 * Destroys the plane state (both common and vmw-specific) for the 858 * specified plane. 859 */ 860 void 861 vmw_du_plane_destroy_state(struct drm_plane *plane, 862 struct drm_plane_state *state) 863 { 864 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state); 865 866 867 /* Should have been freed by cleanup_fb */ 868 if (vps->guest_map.virtual) { 869 DRM_ERROR("Guest mapping not freed\n"); 870 ttm_bo_kunmap(&vps->guest_map); 871 } 872 873 if (vps->host_map.virtual) { 874 DRM_ERROR("Host mapping not freed\n"); 875 ttm_bo_kunmap(&vps->host_map); 876 } 877 878 if (vps->surf) 879 vmw_surface_unreference(&vps->surf); 880 881 if (vps->dmabuf) 882 vmw_dmabuf_unreference(&vps->dmabuf); 883 884 drm_atomic_helper_plane_destroy_state(plane, state); 885 } 886 887 888 /** 889 * vmw_du_connector_duplicate_state - duplicate connector state 890 * @connector: DRM connector 891 * 892 * Allocates and returns a copy of the connector state (both common and 893 * vmw-specific) for the specified connector. 894 * 895 * Returns: The newly allocated connector state, or NULL on failure. 896 */ 897 struct drm_connector_state * 898 vmw_du_connector_duplicate_state(struct drm_connector *connector) 899 { 900 struct drm_connector_state *state; 901 struct vmw_connector_state *vcs; 902 903 if (WARN_ON(!connector->state)) 904 return NULL; 905 906 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL); 907 908 if (!vcs) 909 return NULL; 910 911 state = &vcs->base; 912 913 __drm_atomic_helper_connector_duplicate_state(connector, state); 914 915 return state; 916 } 917 918 919 /** 920 * vmw_du_connector_reset - creates a blank vmw connector state 921 * @connector: DRM connector 922 * 923 * Resets the atomic state for @connector by freeing the state pointer (which 924 * might be NULL, e.g. at driver load time) and allocating a new empty state 925 * object. 926 */ 927 void vmw_du_connector_reset(struct drm_connector *connector) 928 { 929 struct vmw_connector_state *vcs; 930 931 932 if (connector->state) { 933 __drm_atomic_helper_connector_destroy_state(connector->state); 934 935 kfree(vmw_connector_state_to_vcs(connector->state)); 936 } 937 938 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL); 939 940 if (!vcs) { 941 DRM_ERROR("Cannot allocate vmw_connector_state\n"); 942 return; 943 } 944 945 __drm_atomic_helper_connector_reset(connector, &vcs->base); 946 } 947 948 949 /** 950 * vmw_du_connector_destroy_state - destroy connector state 951 * @connector: DRM connector 952 * @state: state object to destroy 953 * 954 * Destroys the connector state (both common and vmw-specific) for the 955 * specified plane. 956 */ 957 void 958 vmw_du_connector_destroy_state(struct drm_connector *connector, 959 struct drm_connector_state *state) 960 { 961 drm_atomic_helper_connector_destroy_state(connector, state); 962 } 963 /* 964 * Generic framebuffer code 965 */ 966 967 /* 968 * Surface framebuffer code 969 */ 970 971 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 972 { 973 struct vmw_framebuffer_surface *vfbs = 974 vmw_framebuffer_to_vfbs(framebuffer); 975 976 drm_framebuffer_cleanup(framebuffer); 977 vmw_surface_unreference(&vfbs->surface); 978 if (vfbs->base.user_obj) 979 ttm_base_object_unref(&vfbs->base.user_obj); 980 981 kfree(vfbs); 982 } 983 984 static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, 985 struct drm_file *file_priv, 986 unsigned flags, unsigned color, 987 struct drm_clip_rect *clips, 988 unsigned num_clips) 989 { 990 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 991 struct vmw_framebuffer_surface *vfbs = 992 vmw_framebuffer_to_vfbs(framebuffer); 993 struct drm_clip_rect norect; 994 int ret, inc = 1; 995 996 /* Legacy Display Unit does not support 3D */ 997 if (dev_priv->active_display_unit == vmw_du_legacy) 998 return -EINVAL; 999 1000 drm_modeset_lock_all(dev_priv->dev); 1001 1002 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 1003 if (unlikely(ret != 0)) { 1004 drm_modeset_unlock_all(dev_priv->dev); 1005 return ret; 1006 } 1007 1008 if (!num_clips) { 1009 num_clips = 1; 1010 clips = &norect; 1011 norect.x1 = norect.y1 = 0; 1012 norect.x2 = framebuffer->width; 1013 norect.y2 = framebuffer->height; 1014 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { 1015 num_clips /= 2; 1016 inc = 2; /* skip source rects */ 1017 } 1018 1019 if (dev_priv->active_display_unit == vmw_du_screen_object) 1020 ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base, 1021 clips, NULL, NULL, 0, 0, 1022 num_clips, inc, NULL); 1023 else 1024 ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base, 1025 clips, NULL, NULL, 0, 0, 1026 num_clips, inc, NULL); 1027 1028 vmw_fifo_flush(dev_priv, false); 1029 ttm_read_unlock(&dev_priv->reservation_sem); 1030 1031 drm_modeset_unlock_all(dev_priv->dev); 1032 1033 return 0; 1034 } 1035 1036 /** 1037 * vmw_kms_readback - Perform a readback from the screen system to 1038 * a dma-buffer backed framebuffer. 1039 * 1040 * @dev_priv: Pointer to the device private structure. 1041 * @file_priv: Pointer to a struct drm_file identifying the caller. 1042 * Must be set to NULL if @user_fence_rep is NULL. 1043 * @vfb: Pointer to the dma-buffer backed framebuffer. 1044 * @user_fence_rep: User-space provided structure for fence information. 1045 * Must be set to non-NULL if @file_priv is non-NULL. 1046 * @vclips: Array of clip rects. 1047 * @num_clips: Number of clip rects in @vclips. 1048 * 1049 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if 1050 * interrupted. 1051 */ 1052 int vmw_kms_readback(struct vmw_private *dev_priv, 1053 struct drm_file *file_priv, 1054 struct vmw_framebuffer *vfb, 1055 struct drm_vmw_fence_rep __user *user_fence_rep, 1056 struct drm_vmw_rect *vclips, 1057 uint32_t num_clips) 1058 { 1059 switch (dev_priv->active_display_unit) { 1060 case vmw_du_screen_object: 1061 return vmw_kms_sou_readback(dev_priv, file_priv, vfb, 1062 user_fence_rep, vclips, num_clips); 1063 case vmw_du_screen_target: 1064 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb, 1065 user_fence_rep, NULL, vclips, num_clips, 1066 1, false, true); 1067 default: 1068 WARN_ONCE(true, 1069 "Readback called with invalid display system.\n"); 1070 } 1071 1072 return -ENOSYS; 1073 } 1074 1075 1076 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { 1077 .destroy = vmw_framebuffer_surface_destroy, 1078 .dirty = vmw_framebuffer_surface_dirty, 1079 }; 1080 1081 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 1082 struct vmw_surface *surface, 1083 struct vmw_framebuffer **out, 1084 const struct drm_mode_fb_cmd2 1085 *mode_cmd, 1086 bool is_dmabuf_proxy) 1087 1088 { 1089 struct drm_device *dev = dev_priv->dev; 1090 struct vmw_framebuffer_surface *vfbs; 1091 enum SVGA3dSurfaceFormat format; 1092 int ret; 1093 struct drm_format_name_buf format_name; 1094 1095 /* 3D is only supported on HWv8 and newer hosts */ 1096 if (dev_priv->active_display_unit == vmw_du_legacy) 1097 return -ENOSYS; 1098 1099 /* 1100 * Sanity checks. 1101 */ 1102 1103 /* Surface must be marked as a scanout. */ 1104 if (unlikely(!surface->scanout)) 1105 return -EINVAL; 1106 1107 if (unlikely(surface->mip_levels[0] != 1 || 1108 surface->num_sizes != 1 || 1109 surface->base_size.width < mode_cmd->width || 1110 surface->base_size.height < mode_cmd->height || 1111 surface->base_size.depth != 1)) { 1112 DRM_ERROR("Incompatible surface dimensions " 1113 "for requested mode.\n"); 1114 return -EINVAL; 1115 } 1116 1117 switch (mode_cmd->pixel_format) { 1118 case DRM_FORMAT_ARGB8888: 1119 format = SVGA3D_A8R8G8B8; 1120 break; 1121 case DRM_FORMAT_XRGB8888: 1122 format = SVGA3D_X8R8G8B8; 1123 break; 1124 case DRM_FORMAT_RGB565: 1125 format = SVGA3D_R5G6B5; 1126 break; 1127 case DRM_FORMAT_XRGB1555: 1128 format = SVGA3D_A1R5G5B5; 1129 break; 1130 default: 1131 DRM_ERROR("Invalid pixel format: %s\n", 1132 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 1133 return -EINVAL; 1134 } 1135 1136 /* 1137 * For DX, surface format validation is done when surface->scanout 1138 * is set. 1139 */ 1140 if (!dev_priv->has_dx && format != surface->format) { 1141 DRM_ERROR("Invalid surface format for requested mode.\n"); 1142 return -EINVAL; 1143 } 1144 1145 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); 1146 if (!vfbs) { 1147 ret = -ENOMEM; 1148 goto out_err1; 1149 } 1150 1151 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); 1152 vfbs->surface = vmw_surface_reference(surface); 1153 vfbs->base.user_handle = mode_cmd->handles[0]; 1154 vfbs->is_dmabuf_proxy = is_dmabuf_proxy; 1155 1156 *out = &vfbs->base; 1157 1158 ret = drm_framebuffer_init(dev, &vfbs->base.base, 1159 &vmw_framebuffer_surface_funcs); 1160 if (ret) 1161 goto out_err2; 1162 1163 return 0; 1164 1165 out_err2: 1166 vmw_surface_unreference(&surface); 1167 kfree(vfbs); 1168 out_err1: 1169 return ret; 1170 } 1171 1172 /* 1173 * Dmabuf framebuffer code 1174 */ 1175 1176 static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) 1177 { 1178 struct vmw_framebuffer_dmabuf *vfbd = 1179 vmw_framebuffer_to_vfbd(framebuffer); 1180 1181 drm_framebuffer_cleanup(framebuffer); 1182 vmw_dmabuf_unreference(&vfbd->buffer); 1183 if (vfbd->base.user_obj) 1184 ttm_base_object_unref(&vfbd->base.user_obj); 1185 1186 kfree(vfbd); 1187 } 1188 1189 static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, 1190 struct drm_file *file_priv, 1191 unsigned flags, unsigned color, 1192 struct drm_clip_rect *clips, 1193 unsigned num_clips) 1194 { 1195 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 1196 struct vmw_framebuffer_dmabuf *vfbd = 1197 vmw_framebuffer_to_vfbd(framebuffer); 1198 struct drm_clip_rect norect; 1199 int ret, increment = 1; 1200 1201 drm_modeset_lock_all(dev_priv->dev); 1202 1203 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 1204 if (unlikely(ret != 0)) { 1205 drm_modeset_unlock_all(dev_priv->dev); 1206 return ret; 1207 } 1208 1209 if (!num_clips) { 1210 num_clips = 1; 1211 clips = &norect; 1212 norect.x1 = norect.y1 = 0; 1213 norect.x2 = framebuffer->width; 1214 norect.y2 = framebuffer->height; 1215 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { 1216 num_clips /= 2; 1217 increment = 2; 1218 } 1219 1220 switch (dev_priv->active_display_unit) { 1221 case vmw_du_screen_target: 1222 ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL, 1223 clips, NULL, num_clips, increment, 1224 true, true); 1225 break; 1226 case vmw_du_screen_object: 1227 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base, 1228 clips, NULL, num_clips, 1229 increment, true, NULL); 1230 break; 1231 case vmw_du_legacy: 1232 ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0, 1233 clips, num_clips, increment); 1234 break; 1235 default: 1236 ret = -EINVAL; 1237 WARN_ONCE(true, "Dirty called with invalid display system.\n"); 1238 break; 1239 } 1240 1241 vmw_fifo_flush(dev_priv, false); 1242 ttm_read_unlock(&dev_priv->reservation_sem); 1243 1244 drm_modeset_unlock_all(dev_priv->dev); 1245 1246 return ret; 1247 } 1248 1249 static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { 1250 .destroy = vmw_framebuffer_dmabuf_destroy, 1251 .dirty = vmw_framebuffer_dmabuf_dirty, 1252 }; 1253 1254 /** 1255 * Pin the dmabuffer to the start of vram. 1256 */ 1257 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) 1258 { 1259 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 1260 struct vmw_dma_buffer *buf; 1261 int ret; 1262 1263 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 1264 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; 1265 1266 if (!buf) 1267 return 0; 1268 1269 switch (dev_priv->active_display_unit) { 1270 case vmw_du_legacy: 1271 vmw_overlay_pause_all(dev_priv); 1272 ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false); 1273 vmw_overlay_resume_all(dev_priv); 1274 break; 1275 case vmw_du_screen_object: 1276 case vmw_du_screen_target: 1277 if (vfb->dmabuf) 1278 return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, 1279 false); 1280 1281 return vmw_dmabuf_pin_in_placement(dev_priv, buf, 1282 &vmw_mob_placement, false); 1283 default: 1284 return -EINVAL; 1285 } 1286 1287 return ret; 1288 } 1289 1290 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb) 1291 { 1292 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 1293 struct vmw_dma_buffer *buf; 1294 1295 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 1296 vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup; 1297 1298 if (WARN_ON(!buf)) 1299 return 0; 1300 1301 return vmw_dmabuf_unpin(dev_priv, buf, false); 1302 } 1303 1304 /** 1305 * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf 1306 * 1307 * @dev: DRM device 1308 * @mode_cmd: parameters for the new surface 1309 * @dmabuf_mob: MOB backing the DMA buf 1310 * @srf_out: newly created surface 1311 * 1312 * When the content FB is a DMA buf, we create a surface as a proxy to the 1313 * same buffer. This way we can do a surface copy rather than a surface DMA. 1314 * This is a more efficient approach 1315 * 1316 * RETURNS: 1317 * 0 on success, error code otherwise 1318 */ 1319 static int vmw_create_dmabuf_proxy(struct drm_device *dev, 1320 const struct drm_mode_fb_cmd2 *mode_cmd, 1321 struct vmw_dma_buffer *dmabuf_mob, 1322 struct vmw_surface **srf_out) 1323 { 1324 uint32_t format; 1325 struct drm_vmw_size content_base_size = {0}; 1326 struct vmw_resource *res; 1327 unsigned int bytes_pp; 1328 struct drm_format_name_buf format_name; 1329 int ret; 1330 1331 switch (mode_cmd->pixel_format) { 1332 case DRM_FORMAT_ARGB8888: 1333 case DRM_FORMAT_XRGB8888: 1334 format = SVGA3D_X8R8G8B8; 1335 bytes_pp = 4; 1336 break; 1337 1338 case DRM_FORMAT_RGB565: 1339 case DRM_FORMAT_XRGB1555: 1340 format = SVGA3D_R5G6B5; 1341 bytes_pp = 2; 1342 break; 1343 1344 case 8: 1345 format = SVGA3D_P8; 1346 bytes_pp = 1; 1347 break; 1348 1349 default: 1350 DRM_ERROR("Invalid framebuffer format %s\n", 1351 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 1352 return -EINVAL; 1353 } 1354 1355 content_base_size.width = mode_cmd->pitches[0] / bytes_pp; 1356 content_base_size.height = mode_cmd->height; 1357 content_base_size.depth = 1; 1358 1359 ret = vmw_surface_gb_priv_define(dev, 1360 0, /* kernel visible only */ 1361 0, /* flags */ 1362 format, 1363 true, /* can be a scanout buffer */ 1364 1, /* num of mip levels */ 1365 0, 1366 0, 1367 content_base_size, 1368 srf_out); 1369 if (ret) { 1370 DRM_ERROR("Failed to allocate proxy content buffer\n"); 1371 return ret; 1372 } 1373 1374 res = &(*srf_out)->res; 1375 1376 /* Reserve and switch the backing mob. */ 1377 mutex_lock(&res->dev_priv->cmdbuf_mutex); 1378 (void) vmw_resource_reserve(res, false, true); 1379 vmw_dmabuf_unreference(&res->backup); 1380 res->backup = vmw_dmabuf_reference(dmabuf_mob); 1381 res->backup_offset = 0; 1382 vmw_resource_unreserve(res, false, NULL, 0); 1383 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 1384 1385 return 0; 1386 } 1387 1388 1389 1390 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, 1391 struct vmw_dma_buffer *dmabuf, 1392 struct vmw_framebuffer **out, 1393 const struct drm_mode_fb_cmd2 1394 *mode_cmd) 1395 1396 { 1397 struct drm_device *dev = dev_priv->dev; 1398 struct vmw_framebuffer_dmabuf *vfbd; 1399 unsigned int requested_size; 1400 struct drm_format_name_buf format_name; 1401 int ret; 1402 1403 requested_size = mode_cmd->height * mode_cmd->pitches[0]; 1404 if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) { 1405 DRM_ERROR("Screen buffer object size is too small " 1406 "for requested mode.\n"); 1407 return -EINVAL; 1408 } 1409 1410 /* Limited framebuffer color depth support for screen objects */ 1411 if (dev_priv->active_display_unit == vmw_du_screen_object) { 1412 switch (mode_cmd->pixel_format) { 1413 case DRM_FORMAT_XRGB8888: 1414 case DRM_FORMAT_ARGB8888: 1415 break; 1416 case DRM_FORMAT_XRGB1555: 1417 case DRM_FORMAT_RGB565: 1418 break; 1419 default: 1420 DRM_ERROR("Invalid pixel format: %s\n", 1421 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 1422 return -EINVAL; 1423 } 1424 } 1425 1426 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); 1427 if (!vfbd) { 1428 ret = -ENOMEM; 1429 goto out_err1; 1430 } 1431 1432 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); 1433 vfbd->base.dmabuf = true; 1434 vfbd->buffer = vmw_dmabuf_reference(dmabuf); 1435 vfbd->base.user_handle = mode_cmd->handles[0]; 1436 *out = &vfbd->base; 1437 1438 ret = drm_framebuffer_init(dev, &vfbd->base.base, 1439 &vmw_framebuffer_dmabuf_funcs); 1440 if (ret) 1441 goto out_err2; 1442 1443 return 0; 1444 1445 out_err2: 1446 vmw_dmabuf_unreference(&dmabuf); 1447 kfree(vfbd); 1448 out_err1: 1449 return ret; 1450 } 1451 1452 1453 /** 1454 * vmw_kms_srf_ok - check if a surface can be created 1455 * 1456 * @width: requested width 1457 * @height: requested height 1458 * 1459 * Surfaces need to be less than texture size 1460 */ 1461 static bool 1462 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height) 1463 { 1464 if (width > dev_priv->texture_max_width || 1465 height > dev_priv->texture_max_height) 1466 return false; 1467 1468 return true; 1469 } 1470 1471 /** 1472 * vmw_kms_new_framebuffer - Create a new framebuffer. 1473 * 1474 * @dev_priv: Pointer to device private struct. 1475 * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around. 1476 * Either @dmabuf or @surface must be NULL. 1477 * @surface: Pointer to a surface to wrap the kms framebuffer around. 1478 * Either @dmabuf or @surface must be NULL. 1479 * @only_2d: No presents will occur to this dma buffer based framebuffer. This 1480 * Helps the code to do some important optimizations. 1481 * @mode_cmd: Frame-buffer metadata. 1482 */ 1483 struct vmw_framebuffer * 1484 vmw_kms_new_framebuffer(struct vmw_private *dev_priv, 1485 struct vmw_dma_buffer *dmabuf, 1486 struct vmw_surface *surface, 1487 bool only_2d, 1488 const struct drm_mode_fb_cmd2 *mode_cmd) 1489 { 1490 struct vmw_framebuffer *vfb = NULL; 1491 bool is_dmabuf_proxy = false; 1492 int ret; 1493 1494 /* 1495 * We cannot use the SurfaceDMA command in an non-accelerated VM, 1496 * therefore, wrap the DMA buf in a surface so we can use the 1497 * SurfaceCopy command. 1498 */ 1499 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && 1500 dmabuf && only_2d && 1501 dev_priv->active_display_unit == vmw_du_screen_target) { 1502 ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd, 1503 dmabuf, &surface); 1504 if (ret) 1505 return ERR_PTR(ret); 1506 1507 is_dmabuf_proxy = true; 1508 } 1509 1510 /* Create the new framebuffer depending one what we have */ 1511 if (surface) { 1512 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, 1513 mode_cmd, 1514 is_dmabuf_proxy); 1515 1516 /* 1517 * vmw_create_dmabuf_proxy() adds a reference that is no longer 1518 * needed 1519 */ 1520 if (is_dmabuf_proxy) 1521 vmw_surface_unreference(&surface); 1522 } else if (dmabuf) { 1523 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb, 1524 mode_cmd); 1525 } else { 1526 BUG(); 1527 } 1528 1529 if (ret) 1530 return ERR_PTR(ret); 1531 1532 vfb->pin = vmw_framebuffer_pin; 1533 vfb->unpin = vmw_framebuffer_unpin; 1534 1535 return vfb; 1536 } 1537 1538 /* 1539 * Generic Kernel modesetting functions 1540 */ 1541 1542 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, 1543 struct drm_file *file_priv, 1544 const struct drm_mode_fb_cmd2 *mode_cmd) 1545 { 1546 struct vmw_private *dev_priv = vmw_priv(dev); 1547 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1548 struct vmw_framebuffer *vfb = NULL; 1549 struct vmw_surface *surface = NULL; 1550 struct vmw_dma_buffer *bo = NULL; 1551 struct ttm_base_object *user_obj; 1552 int ret; 1553 1554 /** 1555 * This code should be conditioned on Screen Objects not being used. 1556 * If screen objects are used, we can allocate a GMR to hold the 1557 * requested framebuffer. 1558 */ 1559 1560 if (!vmw_kms_validate_mode_vram(dev_priv, 1561 mode_cmd->pitches[0], 1562 mode_cmd->height)) { 1563 DRM_ERROR("Requested mode exceed bounding box limit.\n"); 1564 return ERR_PTR(-ENOMEM); 1565 } 1566 1567 /* 1568 * Take a reference on the user object of the resource 1569 * backing the kms fb. This ensures that user-space handle 1570 * lookups on that resource will always work as long as 1571 * it's registered with a kms framebuffer. This is important, 1572 * since vmw_execbuf_process identifies resources in the 1573 * command stream using user-space handles. 1574 */ 1575 1576 user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]); 1577 if (unlikely(user_obj == NULL)) { 1578 DRM_ERROR("Could not locate requested kms frame buffer.\n"); 1579 return ERR_PTR(-ENOENT); 1580 } 1581 1582 /** 1583 * End conditioned code. 1584 */ 1585 1586 /* returns either a dmabuf or surface */ 1587 ret = vmw_user_lookup_handle(dev_priv, tfile, 1588 mode_cmd->handles[0], 1589 &surface, &bo); 1590 if (ret) 1591 goto err_out; 1592 1593 1594 if (!bo && 1595 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) { 1596 DRM_ERROR("Surface size cannot exceed %dx%d", 1597 dev_priv->texture_max_width, 1598 dev_priv->texture_max_height); 1599 goto err_out; 1600 } 1601 1602 1603 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface, 1604 !(dev_priv->capabilities & SVGA_CAP_3D), 1605 mode_cmd); 1606 if (IS_ERR(vfb)) { 1607 ret = PTR_ERR(vfb); 1608 goto err_out; 1609 } 1610 1611 err_out: 1612 /* vmw_user_lookup_handle takes one ref so does new_fb */ 1613 if (bo) 1614 vmw_dmabuf_unreference(&bo); 1615 if (surface) 1616 vmw_surface_unreference(&surface); 1617 1618 if (ret) { 1619 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); 1620 ttm_base_object_unref(&user_obj); 1621 return ERR_PTR(ret); 1622 } else 1623 vfb->user_obj = user_obj; 1624 1625 return &vfb->base; 1626 } 1627 1628 1629 1630 /** 1631 * vmw_kms_atomic_check_modeset- validate state object for modeset changes 1632 * 1633 * @dev: DRM device 1634 * @state: the driver state object 1635 * 1636 * This is a simple wrapper around drm_atomic_helper_check_modeset() for 1637 * us to assign a value to mode->crtc_clock so that 1638 * drm_calc_timestamping_constants() won't throw an error message 1639 * 1640 * RETURNS 1641 * Zero for success or -errno 1642 */ 1643 int 1644 vmw_kms_atomic_check_modeset(struct drm_device *dev, 1645 struct drm_atomic_state *state) 1646 { 1647 struct drm_crtc_state *crtc_state; 1648 struct drm_crtc *crtc; 1649 struct vmw_private *dev_priv = vmw_priv(dev); 1650 int i; 1651 1652 1653 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1654 unsigned long requested_bb_mem = 0; 1655 1656 if (dev_priv->active_display_unit == vmw_du_screen_target) { 1657 if (crtc->primary->fb) { 1658 int cpp = crtc->primary->fb->pitches[0] / 1659 crtc->primary->fb->width; 1660 1661 requested_bb_mem += crtc->mode.hdisplay * cpp * 1662 crtc->mode.vdisplay; 1663 } 1664 1665 if (requested_bb_mem > dev_priv->prim_bb_mem) 1666 return -EINVAL; 1667 } 1668 } 1669 1670 return drm_atomic_helper_check(dev, state); 1671 } 1672 1673 1674 static const struct drm_mode_config_funcs vmw_kms_funcs = { 1675 .fb_create = vmw_kms_fb_create, 1676 .atomic_check = vmw_kms_atomic_check_modeset, 1677 .atomic_commit = drm_atomic_helper_commit, 1678 }; 1679 1680 static int vmw_kms_generic_present(struct vmw_private *dev_priv, 1681 struct drm_file *file_priv, 1682 struct vmw_framebuffer *vfb, 1683 struct vmw_surface *surface, 1684 uint32_t sid, 1685 int32_t destX, int32_t destY, 1686 struct drm_vmw_rect *clips, 1687 uint32_t num_clips) 1688 { 1689 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips, 1690 &surface->res, destX, destY, 1691 num_clips, 1, NULL); 1692 } 1693 1694 1695 int vmw_kms_present(struct vmw_private *dev_priv, 1696 struct drm_file *file_priv, 1697 struct vmw_framebuffer *vfb, 1698 struct vmw_surface *surface, 1699 uint32_t sid, 1700 int32_t destX, int32_t destY, 1701 struct drm_vmw_rect *clips, 1702 uint32_t num_clips) 1703 { 1704 int ret; 1705 1706 switch (dev_priv->active_display_unit) { 1707 case vmw_du_screen_target: 1708 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips, 1709 &surface->res, destX, destY, 1710 num_clips, 1, NULL); 1711 break; 1712 case vmw_du_screen_object: 1713 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface, 1714 sid, destX, destY, clips, 1715 num_clips); 1716 break; 1717 default: 1718 WARN_ONCE(true, 1719 "Present called with invalid display system.\n"); 1720 ret = -ENOSYS; 1721 break; 1722 } 1723 if (ret) 1724 return ret; 1725 1726 vmw_fifo_flush(dev_priv, false); 1727 1728 return 0; 1729 } 1730 1731 static void 1732 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv) 1733 { 1734 if (dev_priv->hotplug_mode_update_property) 1735 return; 1736 1737 dev_priv->hotplug_mode_update_property = 1738 drm_property_create_range(dev_priv->dev, 1739 DRM_MODE_PROP_IMMUTABLE, 1740 "hotplug_mode_update", 0, 1); 1741 1742 if (!dev_priv->hotplug_mode_update_property) 1743 return; 1744 1745 } 1746 1747 int vmw_kms_init(struct vmw_private *dev_priv) 1748 { 1749 struct drm_device *dev = dev_priv->dev; 1750 int ret; 1751 1752 drm_mode_config_init(dev); 1753 dev->mode_config.funcs = &vmw_kms_funcs; 1754 dev->mode_config.min_width = 1; 1755 dev->mode_config.min_height = 1; 1756 dev->mode_config.max_width = dev_priv->texture_max_width; 1757 dev->mode_config.max_height = dev_priv->texture_max_height; 1758 1759 drm_mode_create_suggested_offset_properties(dev); 1760 vmw_kms_create_hotplug_mode_update_property(dev_priv); 1761 1762 ret = vmw_kms_stdu_init_display(dev_priv); 1763 if (ret) { 1764 ret = vmw_kms_sou_init_display(dev_priv); 1765 if (ret) /* Fallback */ 1766 ret = vmw_kms_ldu_init_display(dev_priv); 1767 } 1768 1769 return ret; 1770 } 1771 1772 int vmw_kms_close(struct vmw_private *dev_priv) 1773 { 1774 int ret; 1775 1776 /* 1777 * Docs says we should take the lock before calling this function 1778 * but since it destroys encoders and our destructor calls 1779 * drm_encoder_cleanup which takes the lock we deadlock. 1780 */ 1781 drm_mode_config_cleanup(dev_priv->dev); 1782 if (dev_priv->active_display_unit == vmw_du_screen_object) 1783 ret = vmw_kms_sou_close_display(dev_priv); 1784 else if (dev_priv->active_display_unit == vmw_du_screen_target) 1785 ret = vmw_kms_stdu_close_display(dev_priv); 1786 else 1787 ret = vmw_kms_ldu_close_display(dev_priv); 1788 1789 return ret; 1790 } 1791 1792 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 1793 struct drm_file *file_priv) 1794 { 1795 struct drm_vmw_cursor_bypass_arg *arg = data; 1796 struct vmw_display_unit *du; 1797 struct drm_crtc *crtc; 1798 int ret = 0; 1799 1800 1801 mutex_lock(&dev->mode_config.mutex); 1802 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { 1803 1804 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1805 du = vmw_crtc_to_du(crtc); 1806 du->hotspot_x = arg->xhot; 1807 du->hotspot_y = arg->yhot; 1808 } 1809 1810 mutex_unlock(&dev->mode_config.mutex); 1811 return 0; 1812 } 1813 1814 crtc = drm_crtc_find(dev, arg->crtc_id); 1815 if (!crtc) { 1816 ret = -ENOENT; 1817 goto out; 1818 } 1819 1820 du = vmw_crtc_to_du(crtc); 1821 1822 du->hotspot_x = arg->xhot; 1823 du->hotspot_y = arg->yhot; 1824 1825 out: 1826 mutex_unlock(&dev->mode_config.mutex); 1827 1828 return ret; 1829 } 1830 1831 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 1832 unsigned width, unsigned height, unsigned pitch, 1833 unsigned bpp, unsigned depth) 1834 { 1835 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1836 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); 1837 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1838 vmw_mmio_write(pitch, vmw_priv->mmio_virt + 1839 SVGA_FIFO_PITCHLOCK); 1840 vmw_write(vmw_priv, SVGA_REG_WIDTH, width); 1841 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); 1842 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp); 1843 1844 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) { 1845 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n", 1846 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH)); 1847 return -EINVAL; 1848 } 1849 1850 return 0; 1851 } 1852 1853 int vmw_kms_save_vga(struct vmw_private *vmw_priv) 1854 { 1855 struct vmw_vga_topology_state *save; 1856 uint32_t i; 1857 1858 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); 1859 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); 1860 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); 1861 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1862 vmw_priv->vga_pitchlock = 1863 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); 1864 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1865 vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt + 1866 SVGA_FIFO_PITCHLOCK); 1867 1868 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1869 return 0; 1870 1871 vmw_priv->num_displays = vmw_read(vmw_priv, 1872 SVGA_REG_NUM_GUEST_DISPLAYS); 1873 1874 if (vmw_priv->num_displays == 0) 1875 vmw_priv->num_displays = 1; 1876 1877 for (i = 0; i < vmw_priv->num_displays; ++i) { 1878 save = &vmw_priv->vga_save[i]; 1879 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); 1880 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY); 1881 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X); 1882 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y); 1883 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); 1884 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); 1885 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 1886 if (i == 0 && vmw_priv->num_displays == 1 && 1887 save->width == 0 && save->height == 0) { 1888 1889 /* 1890 * It should be fairly safe to assume that these 1891 * values are uninitialized. 1892 */ 1893 1894 save->width = vmw_priv->vga_width - save->pos_x; 1895 save->height = vmw_priv->vga_height - save->pos_y; 1896 } 1897 } 1898 1899 return 0; 1900 } 1901 1902 int vmw_kms_restore_vga(struct vmw_private *vmw_priv) 1903 { 1904 struct vmw_vga_topology_state *save; 1905 uint32_t i; 1906 1907 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); 1908 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); 1909 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); 1910 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1911 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, 1912 vmw_priv->vga_pitchlock); 1913 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1914 vmw_mmio_write(vmw_priv->vga_pitchlock, 1915 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); 1916 1917 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1918 return 0; 1919 1920 for (i = 0; i < vmw_priv->num_displays; ++i) { 1921 save = &vmw_priv->vga_save[i]; 1922 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); 1923 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary); 1924 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x); 1925 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y); 1926 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width); 1927 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height); 1928 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 1929 } 1930 1931 return 0; 1932 } 1933 1934 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 1935 uint32_t pitch, 1936 uint32_t height) 1937 { 1938 return ((u64) pitch * (u64) height) < (u64) 1939 ((dev_priv->active_display_unit == vmw_du_screen_target) ? 1940 dev_priv->prim_bb_mem : dev_priv->vram_size); 1941 } 1942 1943 1944 /** 1945 * Function called by DRM code called with vbl_lock held. 1946 */ 1947 u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 1948 { 1949 return 0; 1950 } 1951 1952 /** 1953 * Function called by DRM code called with vbl_lock held. 1954 */ 1955 int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe) 1956 { 1957 return -ENOSYS; 1958 } 1959 1960 /** 1961 * Function called by DRM code called with vbl_lock held. 1962 */ 1963 void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe) 1964 { 1965 } 1966 1967 1968 /* 1969 * Small shared kms functions. 1970 */ 1971 1972 static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, 1973 struct drm_vmw_rect *rects) 1974 { 1975 struct drm_device *dev = dev_priv->dev; 1976 struct vmw_display_unit *du; 1977 struct drm_connector *con; 1978 1979 mutex_lock(&dev->mode_config.mutex); 1980 1981 #if 0 1982 { 1983 unsigned int i; 1984 1985 DRM_INFO("%s: new layout ", __func__); 1986 for (i = 0; i < num; i++) 1987 DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, 1988 rects[i].w, rects[i].h); 1989 DRM_INFO("\n"); 1990 } 1991 #endif 1992 1993 list_for_each_entry(con, &dev->mode_config.connector_list, head) { 1994 du = vmw_connector_to_du(con); 1995 if (num > du->unit) { 1996 du->pref_width = rects[du->unit].w; 1997 du->pref_height = rects[du->unit].h; 1998 du->pref_active = true; 1999 du->gui_x = rects[du->unit].x; 2000 du->gui_y = rects[du->unit].y; 2001 drm_object_property_set_value 2002 (&con->base, dev->mode_config.suggested_x_property, 2003 du->gui_x); 2004 drm_object_property_set_value 2005 (&con->base, dev->mode_config.suggested_y_property, 2006 du->gui_y); 2007 } else { 2008 du->pref_width = 800; 2009 du->pref_height = 600; 2010 du->pref_active = false; 2011 drm_object_property_set_value 2012 (&con->base, dev->mode_config.suggested_x_property, 2013 0); 2014 drm_object_property_set_value 2015 (&con->base, dev->mode_config.suggested_y_property, 2016 0); 2017 } 2018 con->status = vmw_du_connector_detect(con, true); 2019 } 2020 2021 mutex_unlock(&dev->mode_config.mutex); 2022 drm_sysfs_hotplug_event(dev); 2023 2024 return 0; 2025 } 2026 2027 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 2028 u16 *r, u16 *g, u16 *b, 2029 uint32_t size, 2030 struct drm_modeset_acquire_ctx *ctx) 2031 { 2032 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 2033 int i; 2034 2035 for (i = 0; i < size; i++) { 2036 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i, 2037 r[i], g[i], b[i]); 2038 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8); 2039 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); 2040 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); 2041 } 2042 2043 return 0; 2044 } 2045 2046 int vmw_du_connector_dpms(struct drm_connector *connector, int mode) 2047 { 2048 return 0; 2049 } 2050 2051 enum drm_connector_status 2052 vmw_du_connector_detect(struct drm_connector *connector, bool force) 2053 { 2054 uint32_t num_displays; 2055 struct drm_device *dev = connector->dev; 2056 struct vmw_private *dev_priv = vmw_priv(dev); 2057 struct vmw_display_unit *du = vmw_connector_to_du(connector); 2058 2059 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); 2060 2061 return ((vmw_connector_to_du(connector)->unit < num_displays && 2062 du->pref_active) ? 2063 connector_status_connected : connector_status_disconnected); 2064 } 2065 2066 static struct drm_display_mode vmw_kms_connector_builtin[] = { 2067 /* 640x480@60Hz */ 2068 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 2069 752, 800, 0, 480, 489, 492, 525, 0, 2070 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2071 /* 800x600@60Hz */ 2072 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 2073 968, 1056, 0, 600, 601, 605, 628, 0, 2074 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2075 /* 1024x768@60Hz */ 2076 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 2077 1184, 1344, 0, 768, 771, 777, 806, 0, 2078 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2079 /* 1152x864@75Hz */ 2080 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 2081 1344, 1600, 0, 864, 865, 868, 900, 0, 2082 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2083 /* 1280x768@60Hz */ 2084 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 2085 1472, 1664, 0, 768, 771, 778, 798, 0, 2086 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2087 /* 1280x800@60Hz */ 2088 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, 2089 1480, 1680, 0, 800, 803, 809, 831, 0, 2090 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2091 /* 1280x960@60Hz */ 2092 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, 2093 1488, 1800, 0, 960, 961, 964, 1000, 0, 2094 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2095 /* 1280x1024@60Hz */ 2096 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, 2097 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, 2098 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2099 /* 1360x768@60Hz */ 2100 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, 2101 1536, 1792, 0, 768, 771, 777, 795, 0, 2102 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2103 /* 1440x1050@60Hz */ 2104 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, 2105 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, 2106 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2107 /* 1440x900@60Hz */ 2108 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, 2109 1672, 1904, 0, 900, 903, 909, 934, 0, 2110 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2111 /* 1600x1200@60Hz */ 2112 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, 2113 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, 2114 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2115 /* 1680x1050@60Hz */ 2116 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, 2117 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, 2118 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2119 /* 1792x1344@60Hz */ 2120 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, 2121 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, 2122 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2123 /* 1853x1392@60Hz */ 2124 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 2125 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, 2126 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2127 /* 1920x1200@60Hz */ 2128 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 2129 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, 2130 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2131 /* 1920x1440@60Hz */ 2132 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 2133 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, 2134 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2135 /* 2560x1600@60Hz */ 2136 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 2137 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, 2138 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2139 /* Terminate */ 2140 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, 2141 }; 2142 2143 /** 2144 * vmw_guess_mode_timing - Provide fake timings for a 2145 * 60Hz vrefresh mode. 2146 * 2147 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay 2148 * members filled in. 2149 */ 2150 void vmw_guess_mode_timing(struct drm_display_mode *mode) 2151 { 2152 mode->hsync_start = mode->hdisplay + 50; 2153 mode->hsync_end = mode->hsync_start + 50; 2154 mode->htotal = mode->hsync_end + 50; 2155 2156 mode->vsync_start = mode->vdisplay + 50; 2157 mode->vsync_end = mode->vsync_start + 50; 2158 mode->vtotal = mode->vsync_end + 50; 2159 2160 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6; 2161 mode->vrefresh = drm_mode_vrefresh(mode); 2162 } 2163 2164 2165 int vmw_du_connector_fill_modes(struct drm_connector *connector, 2166 uint32_t max_width, uint32_t max_height) 2167 { 2168 struct vmw_display_unit *du = vmw_connector_to_du(connector); 2169 struct drm_device *dev = connector->dev; 2170 struct vmw_private *dev_priv = vmw_priv(dev); 2171 struct drm_display_mode *mode = NULL; 2172 struct drm_display_mode *bmode; 2173 struct drm_display_mode prefmode = { DRM_MODE("preferred", 2174 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 2175 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2176 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) 2177 }; 2178 int i; 2179 u32 assumed_bpp = 4; 2180 2181 if (dev_priv->assume_16bpp) 2182 assumed_bpp = 2; 2183 2184 if (dev_priv->active_display_unit == vmw_du_screen_target) { 2185 max_width = min(max_width, dev_priv->stdu_max_width); 2186 max_width = min(max_width, dev_priv->texture_max_width); 2187 2188 max_height = min(max_height, dev_priv->stdu_max_height); 2189 max_height = min(max_height, dev_priv->texture_max_height); 2190 } 2191 2192 /* Add preferred mode */ 2193 mode = drm_mode_duplicate(dev, &prefmode); 2194 if (!mode) 2195 return 0; 2196 mode->hdisplay = du->pref_width; 2197 mode->vdisplay = du->pref_height; 2198 vmw_guess_mode_timing(mode); 2199 2200 if (vmw_kms_validate_mode_vram(dev_priv, 2201 mode->hdisplay * assumed_bpp, 2202 mode->vdisplay)) { 2203 drm_mode_probed_add(connector, mode); 2204 } else { 2205 drm_mode_destroy(dev, mode); 2206 mode = NULL; 2207 } 2208 2209 if (du->pref_mode) { 2210 list_del_init(&du->pref_mode->head); 2211 drm_mode_destroy(dev, du->pref_mode); 2212 } 2213 2214 /* mode might be null here, this is intended */ 2215 du->pref_mode = mode; 2216 2217 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { 2218 bmode = &vmw_kms_connector_builtin[i]; 2219 if (bmode->hdisplay > max_width || 2220 bmode->vdisplay > max_height) 2221 continue; 2222 2223 if (!vmw_kms_validate_mode_vram(dev_priv, 2224 bmode->hdisplay * assumed_bpp, 2225 bmode->vdisplay)) 2226 continue; 2227 2228 mode = drm_mode_duplicate(dev, bmode); 2229 if (!mode) 2230 return 0; 2231 mode->vrefresh = drm_mode_vrefresh(mode); 2232 2233 drm_mode_probed_add(connector, mode); 2234 } 2235 2236 drm_mode_connector_list_update(connector); 2237 /* Move the prefered mode first, help apps pick the right mode. */ 2238 drm_mode_sort(&connector->modes); 2239 2240 return 1; 2241 } 2242 2243 int vmw_du_connector_set_property(struct drm_connector *connector, 2244 struct drm_property *property, 2245 uint64_t val) 2246 { 2247 struct vmw_display_unit *du = vmw_connector_to_du(connector); 2248 struct vmw_private *dev_priv = vmw_priv(connector->dev); 2249 2250 if (property == dev_priv->implicit_placement_property) 2251 du->is_implicit = val; 2252 2253 return 0; 2254 } 2255 2256 2257 2258 /** 2259 * vmw_du_connector_atomic_set_property - Atomic version of get property 2260 * 2261 * @crtc - crtc the property is associated with 2262 * 2263 * Returns: 2264 * Zero on success, negative errno on failure. 2265 */ 2266 int 2267 vmw_du_connector_atomic_set_property(struct drm_connector *connector, 2268 struct drm_connector_state *state, 2269 struct drm_property *property, 2270 uint64_t val) 2271 { 2272 struct vmw_private *dev_priv = vmw_priv(connector->dev); 2273 struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state); 2274 struct vmw_display_unit *du = vmw_connector_to_du(connector); 2275 2276 2277 if (property == dev_priv->implicit_placement_property) { 2278 vcs->is_implicit = val; 2279 2280 /* 2281 * We should really be doing a drm_atomic_commit() to 2282 * commit the new state, but since this doesn't cause 2283 * an immedate state change, this is probably ok 2284 */ 2285 du->is_implicit = vcs->is_implicit; 2286 } else { 2287 return -EINVAL; 2288 } 2289 2290 return 0; 2291 } 2292 2293 2294 /** 2295 * vmw_du_connector_atomic_get_property - Atomic version of get property 2296 * 2297 * @connector - connector the property is associated with 2298 * 2299 * Returns: 2300 * Zero on success, negative errno on failure. 2301 */ 2302 int 2303 vmw_du_connector_atomic_get_property(struct drm_connector *connector, 2304 const struct drm_connector_state *state, 2305 struct drm_property *property, 2306 uint64_t *val) 2307 { 2308 struct vmw_private *dev_priv = vmw_priv(connector->dev); 2309 struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state); 2310 2311 if (property == dev_priv->implicit_placement_property) 2312 *val = vcs->is_implicit; 2313 else { 2314 DRM_ERROR("Invalid Property %s\n", property->name); 2315 return -EINVAL; 2316 } 2317 2318 return 0; 2319 } 2320 2321 2322 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 2323 struct drm_file *file_priv) 2324 { 2325 struct vmw_private *dev_priv = vmw_priv(dev); 2326 struct drm_vmw_update_layout_arg *arg = 2327 (struct drm_vmw_update_layout_arg *)data; 2328 void __user *user_rects; 2329 struct drm_vmw_rect *rects; 2330 unsigned rects_size; 2331 int ret; 2332 int i; 2333 u64 total_pixels = 0; 2334 struct drm_mode_config *mode_config = &dev->mode_config; 2335 struct drm_vmw_rect bounding_box = {0}; 2336 2337 if (!arg->num_outputs) { 2338 struct drm_vmw_rect def_rect = {0, 0, 800, 600}; 2339 vmw_du_update_layout(dev_priv, 1, &def_rect); 2340 return 0; 2341 } 2342 2343 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 2344 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), 2345 GFP_KERNEL); 2346 if (unlikely(!rects)) 2347 return -ENOMEM; 2348 2349 user_rects = (void __user *)(unsigned long)arg->rects; 2350 ret = copy_from_user(rects, user_rects, rects_size); 2351 if (unlikely(ret != 0)) { 2352 DRM_ERROR("Failed to get rects.\n"); 2353 ret = -EFAULT; 2354 goto out_free; 2355 } 2356 2357 for (i = 0; i < arg->num_outputs; ++i) { 2358 if (rects[i].x < 0 || 2359 rects[i].y < 0 || 2360 rects[i].x + rects[i].w > mode_config->max_width || 2361 rects[i].y + rects[i].h > mode_config->max_height) { 2362 DRM_ERROR("Invalid GUI layout.\n"); 2363 ret = -EINVAL; 2364 goto out_free; 2365 } 2366 2367 /* 2368 * bounding_box.w and bunding_box.h are used as 2369 * lower-right coordinates 2370 */ 2371 if (rects[i].x + rects[i].w > bounding_box.w) 2372 bounding_box.w = rects[i].x + rects[i].w; 2373 2374 if (rects[i].y + rects[i].h > bounding_box.h) 2375 bounding_box.h = rects[i].y + rects[i].h; 2376 2377 total_pixels += (u64) rects[i].w * (u64) rects[i].h; 2378 } 2379 2380 if (dev_priv->active_display_unit == vmw_du_screen_target) { 2381 /* 2382 * For Screen Targets, the limits for a toplogy are: 2383 * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem 2384 * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem 2385 */ 2386 u64 bb_mem = (u64) bounding_box.w * bounding_box.h * 4; 2387 u64 pixel_mem = total_pixels * 4; 2388 2389 if (bb_mem > dev_priv->prim_bb_mem) { 2390 DRM_ERROR("Topology is beyond supported limits.\n"); 2391 ret = -EINVAL; 2392 goto out_free; 2393 } 2394 2395 if (pixel_mem > dev_priv->prim_bb_mem) { 2396 DRM_ERROR("Combined output size too large\n"); 2397 ret = -EINVAL; 2398 goto out_free; 2399 } 2400 } 2401 2402 vmw_du_update_layout(dev_priv, arg->num_outputs, rects); 2403 2404 out_free: 2405 kfree(rects); 2406 return ret; 2407 } 2408 2409 /** 2410 * vmw_kms_helper_dirty - Helper to build commands and perform actions based 2411 * on a set of cliprects and a set of display units. 2412 * 2413 * @dev_priv: Pointer to a device private structure. 2414 * @framebuffer: Pointer to the framebuffer on which to perform the actions. 2415 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL. 2416 * Cliprects are given in framebuffer coordinates. 2417 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must 2418 * be NULL. Cliprects are given in source coordinates. 2419 * @dest_x: X coordinate offset for the crtc / destination clip rects. 2420 * @dest_y: Y coordinate offset for the crtc / destination clip rects. 2421 * @num_clips: Number of cliprects in the @clips or @vclips array. 2422 * @increment: Integer with which to increment the clip counter when looping. 2423 * Used to skip a predetermined number of clip rects. 2424 * @dirty: Closure structure. See the description of struct vmw_kms_dirty. 2425 */ 2426 int vmw_kms_helper_dirty(struct vmw_private *dev_priv, 2427 struct vmw_framebuffer *framebuffer, 2428 const struct drm_clip_rect *clips, 2429 const struct drm_vmw_rect *vclips, 2430 s32 dest_x, s32 dest_y, 2431 int num_clips, 2432 int increment, 2433 struct vmw_kms_dirty *dirty) 2434 { 2435 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 2436 struct drm_crtc *crtc; 2437 u32 num_units = 0; 2438 u32 i, k; 2439 2440 dirty->dev_priv = dev_priv; 2441 2442 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { 2443 if (crtc->primary->fb != &framebuffer->base) 2444 continue; 2445 units[num_units++] = vmw_crtc_to_du(crtc); 2446 } 2447 2448 for (k = 0; k < num_units; k++) { 2449 struct vmw_display_unit *unit = units[k]; 2450 s32 crtc_x = unit->crtc.x; 2451 s32 crtc_y = unit->crtc.y; 2452 s32 crtc_width = unit->crtc.mode.hdisplay; 2453 s32 crtc_height = unit->crtc.mode.vdisplay; 2454 const struct drm_clip_rect *clips_ptr = clips; 2455 const struct drm_vmw_rect *vclips_ptr = vclips; 2456 2457 dirty->unit = unit; 2458 if (dirty->fifo_reserve_size > 0) { 2459 dirty->cmd = vmw_fifo_reserve(dev_priv, 2460 dirty->fifo_reserve_size); 2461 if (!dirty->cmd) { 2462 DRM_ERROR("Couldn't reserve fifo space " 2463 "for dirty blits.\n"); 2464 return -ENOMEM; 2465 } 2466 memset(dirty->cmd, 0, dirty->fifo_reserve_size); 2467 } 2468 dirty->num_hits = 0; 2469 for (i = 0; i < num_clips; i++, clips_ptr += increment, 2470 vclips_ptr += increment) { 2471 s32 clip_left; 2472 s32 clip_top; 2473 2474 /* 2475 * Select clip array type. Note that integer type 2476 * in @clips is unsigned short, whereas in @vclips 2477 * it's 32-bit. 2478 */ 2479 if (clips) { 2480 dirty->fb_x = (s32) clips_ptr->x1; 2481 dirty->fb_y = (s32) clips_ptr->y1; 2482 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x - 2483 crtc_x; 2484 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y - 2485 crtc_y; 2486 } else { 2487 dirty->fb_x = vclips_ptr->x; 2488 dirty->fb_y = vclips_ptr->y; 2489 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w + 2490 dest_x - crtc_x; 2491 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h + 2492 dest_y - crtc_y; 2493 } 2494 2495 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x; 2496 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y; 2497 2498 /* Skip this clip if it's outside the crtc region */ 2499 if (dirty->unit_x1 >= crtc_width || 2500 dirty->unit_y1 >= crtc_height || 2501 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0) 2502 continue; 2503 2504 /* Clip right and bottom to crtc limits */ 2505 dirty->unit_x2 = min_t(s32, dirty->unit_x2, 2506 crtc_width); 2507 dirty->unit_y2 = min_t(s32, dirty->unit_y2, 2508 crtc_height); 2509 2510 /* Clip left and top to crtc limits */ 2511 clip_left = min_t(s32, dirty->unit_x1, 0); 2512 clip_top = min_t(s32, dirty->unit_y1, 0); 2513 dirty->unit_x1 -= clip_left; 2514 dirty->unit_y1 -= clip_top; 2515 dirty->fb_x -= clip_left; 2516 dirty->fb_y -= clip_top; 2517 2518 dirty->clip(dirty); 2519 } 2520 2521 dirty->fifo_commit(dirty); 2522 } 2523 2524 return 0; 2525 } 2526 2527 /** 2528 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before 2529 * command submission. 2530 * 2531 * @dev_priv. Pointer to a device private structure. 2532 * @buf: The buffer object 2533 * @interruptible: Whether to perform waits as interruptible. 2534 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false, 2535 * The buffer will be validated as a GMR. Already pinned buffers will not be 2536 * validated. 2537 * 2538 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if 2539 * interrupted by a signal. 2540 */ 2541 int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, 2542 struct vmw_dma_buffer *buf, 2543 bool interruptible, 2544 bool validate_as_mob) 2545 { 2546 struct ttm_buffer_object *bo = &buf->base; 2547 int ret; 2548 2549 ttm_bo_reserve(bo, false, false, NULL); 2550 ret = vmw_validate_single_buffer(dev_priv, bo, interruptible, 2551 validate_as_mob); 2552 if (ret) 2553 ttm_bo_unreserve(bo); 2554 2555 return ret; 2556 } 2557 2558 /** 2559 * vmw_kms_helper_buffer_revert - Undo the actions of 2560 * vmw_kms_helper_buffer_prepare. 2561 * 2562 * @res: Pointer to the buffer object. 2563 * 2564 * Helper to be used if an error forces the caller to undo the actions of 2565 * vmw_kms_helper_buffer_prepare. 2566 */ 2567 void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf) 2568 { 2569 if (buf) 2570 ttm_bo_unreserve(&buf->base); 2571 } 2572 2573 /** 2574 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after 2575 * kms command submission. 2576 * 2577 * @dev_priv: Pointer to a device private structure. 2578 * @file_priv: Pointer to a struct drm_file representing the caller's 2579 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely 2580 * if non-NULL, @user_fence_rep must be non-NULL. 2581 * @buf: The buffer object. 2582 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a 2583 * ref-counted fence pointer is returned here. 2584 * @user_fence_rep: Optional pointer to a user-space provided struct 2585 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the 2586 * function copies fence data to user-space in a fail-safe manner. 2587 */ 2588 void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, 2589 struct drm_file *file_priv, 2590 struct vmw_dma_buffer *buf, 2591 struct vmw_fence_obj **out_fence, 2592 struct drm_vmw_fence_rep __user * 2593 user_fence_rep) 2594 { 2595 struct vmw_fence_obj *fence; 2596 uint32_t handle; 2597 int ret; 2598 2599 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, 2600 file_priv ? &handle : NULL); 2601 if (buf) 2602 vmw_fence_single_bo(&buf->base, fence); 2603 if (file_priv) 2604 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), 2605 ret, user_fence_rep, fence, 2606 handle); 2607 if (out_fence) 2608 *out_fence = fence; 2609 else 2610 vmw_fence_obj_unreference(&fence); 2611 2612 vmw_kms_helper_buffer_revert(buf); 2613 } 2614 2615 2616 /** 2617 * vmw_kms_helper_resource_revert - Undo the actions of 2618 * vmw_kms_helper_resource_prepare. 2619 * 2620 * @res: Pointer to the resource. Typically a surface. 2621 * 2622 * Helper to be used if an error forces the caller to undo the actions of 2623 * vmw_kms_helper_resource_prepare. 2624 */ 2625 void vmw_kms_helper_resource_revert(struct vmw_resource *res) 2626 { 2627 vmw_kms_helper_buffer_revert(res->backup); 2628 vmw_resource_unreserve(res, false, NULL, 0); 2629 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 2630 } 2631 2632 /** 2633 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before 2634 * command submission. 2635 * 2636 * @res: Pointer to the resource. Typically a surface. 2637 * @interruptible: Whether to perform waits as interruptible. 2638 * 2639 * Reserves and validates also the backup buffer if a guest-backed resource. 2640 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if 2641 * interrupted by a signal. 2642 */ 2643 int vmw_kms_helper_resource_prepare(struct vmw_resource *res, 2644 bool interruptible) 2645 { 2646 int ret = 0; 2647 2648 if (interruptible) 2649 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); 2650 else 2651 mutex_lock(&res->dev_priv->cmdbuf_mutex); 2652 2653 if (unlikely(ret != 0)) 2654 return -ERESTARTSYS; 2655 2656 ret = vmw_resource_reserve(res, interruptible, false); 2657 if (ret) 2658 goto out_unlock; 2659 2660 if (res->backup) { 2661 ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup, 2662 interruptible, 2663 res->dev_priv->has_mob); 2664 if (ret) 2665 goto out_unreserve; 2666 } 2667 ret = vmw_resource_validate(res); 2668 if (ret) 2669 goto out_revert; 2670 return 0; 2671 2672 out_revert: 2673 vmw_kms_helper_buffer_revert(res->backup); 2674 out_unreserve: 2675 vmw_resource_unreserve(res, false, NULL, 0); 2676 out_unlock: 2677 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 2678 return ret; 2679 } 2680 2681 /** 2682 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after 2683 * kms command submission. 2684 * 2685 * @res: Pointer to the resource. Typically a surface. 2686 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a 2687 * ref-counted fence pointer is returned here. 2688 */ 2689 void vmw_kms_helper_resource_finish(struct vmw_resource *res, 2690 struct vmw_fence_obj **out_fence) 2691 { 2692 if (res->backup || out_fence) 2693 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, 2694 out_fence, NULL); 2695 2696 vmw_resource_unreserve(res, false, NULL, 0); 2697 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 2698 } 2699 2700 /** 2701 * vmw_kms_update_proxy - Helper function to update a proxy surface from 2702 * its backing MOB. 2703 * 2704 * @res: Pointer to the surface resource 2705 * @clips: Clip rects in framebuffer (surface) space. 2706 * @num_clips: Number of clips in @clips. 2707 * @increment: Integer with which to increment the clip counter when looping. 2708 * Used to skip a predetermined number of clip rects. 2709 * 2710 * This function makes sure the proxy surface is updated from its backing MOB 2711 * using the region given by @clips. The surface resource @res and its backing 2712 * MOB needs to be reserved and validated on call. 2713 */ 2714 int vmw_kms_update_proxy(struct vmw_resource *res, 2715 const struct drm_clip_rect *clips, 2716 unsigned num_clips, 2717 int increment) 2718 { 2719 struct vmw_private *dev_priv = res->dev_priv; 2720 struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size; 2721 struct { 2722 SVGA3dCmdHeader header; 2723 SVGA3dCmdUpdateGBImage body; 2724 } *cmd; 2725 SVGA3dBox *box; 2726 size_t copy_size = 0; 2727 int i; 2728 2729 if (!clips) 2730 return 0; 2731 2732 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); 2733 if (!cmd) { 2734 DRM_ERROR("Couldn't reserve fifo space for proxy surface " 2735 "update.\n"); 2736 return -ENOMEM; 2737 } 2738 2739 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) { 2740 box = &cmd->body.box; 2741 2742 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; 2743 cmd->header.size = sizeof(cmd->body); 2744 cmd->body.image.sid = res->id; 2745 cmd->body.image.face = 0; 2746 cmd->body.image.mipmap = 0; 2747 2748 if (clips->x1 > size->width || clips->x2 > size->width || 2749 clips->y1 > size->height || clips->y2 > size->height) { 2750 DRM_ERROR("Invalid clips outsize of framebuffer.\n"); 2751 return -EINVAL; 2752 } 2753 2754 box->x = clips->x1; 2755 box->y = clips->y1; 2756 box->z = 0; 2757 box->w = clips->x2 - clips->x1; 2758 box->h = clips->y2 - clips->y1; 2759 box->d = 1; 2760 2761 copy_size += sizeof(*cmd); 2762 } 2763 2764 vmw_fifo_commit(dev_priv, copy_size); 2765 2766 return 0; 2767 } 2768 2769 int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, 2770 unsigned unit, 2771 u32 max_width, 2772 u32 max_height, 2773 struct drm_connector **p_con, 2774 struct drm_crtc **p_crtc, 2775 struct drm_display_mode **p_mode) 2776 { 2777 struct drm_connector *con; 2778 struct vmw_display_unit *du; 2779 struct drm_display_mode *mode; 2780 int i = 0; 2781 2782 list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list, 2783 head) { 2784 if (i == unit) 2785 break; 2786 2787 ++i; 2788 } 2789 2790 if (i != unit) { 2791 DRM_ERROR("Could not find initial display unit.\n"); 2792 return -EINVAL; 2793 } 2794 2795 if (list_empty(&con->modes)) 2796 (void) vmw_du_connector_fill_modes(con, max_width, max_height); 2797 2798 if (list_empty(&con->modes)) { 2799 DRM_ERROR("Could not find initial display mode.\n"); 2800 return -EINVAL; 2801 } 2802 2803 du = vmw_connector_to_du(con); 2804 *p_con = con; 2805 *p_crtc = &du->crtc; 2806 2807 list_for_each_entry(mode, &con->modes, head) { 2808 if (mode->type & DRM_MODE_TYPE_PREFERRED) 2809 break; 2810 } 2811 2812 if (mode->type & DRM_MODE_TYPE_PREFERRED) 2813 *p_mode = mode; 2814 else { 2815 WARN_ONCE(true, "Could not find initial preferred mode.\n"); 2816 *p_mode = list_first_entry(&con->modes, 2817 struct drm_display_mode, 2818 head); 2819 } 2820 2821 return 0; 2822 } 2823 2824 /** 2825 * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer 2826 * 2827 * @dev_priv: Pointer to a device private struct. 2828 * @du: The display unit of the crtc. 2829 */ 2830 void vmw_kms_del_active(struct vmw_private *dev_priv, 2831 struct vmw_display_unit *du) 2832 { 2833 mutex_lock(&dev_priv->global_kms_state_mutex); 2834 if (du->active_implicit) { 2835 if (--(dev_priv->num_implicit) == 0) 2836 dev_priv->implicit_fb = NULL; 2837 du->active_implicit = false; 2838 } 2839 mutex_unlock(&dev_priv->global_kms_state_mutex); 2840 } 2841 2842 /** 2843 * vmw_kms_add_active - register a crtc binding to an implicit framebuffer 2844 * 2845 * @vmw_priv: Pointer to a device private struct. 2846 * @du: The display unit of the crtc. 2847 * @vfb: The implicit framebuffer 2848 * 2849 * Registers a binding to an implicit framebuffer. 2850 */ 2851 void vmw_kms_add_active(struct vmw_private *dev_priv, 2852 struct vmw_display_unit *du, 2853 struct vmw_framebuffer *vfb) 2854 { 2855 mutex_lock(&dev_priv->global_kms_state_mutex); 2856 WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb); 2857 2858 if (!du->active_implicit && du->is_implicit) { 2859 dev_priv->implicit_fb = vfb; 2860 du->active_implicit = true; 2861 dev_priv->num_implicit++; 2862 } 2863 mutex_unlock(&dev_priv->global_kms_state_mutex); 2864 } 2865 2866 /** 2867 * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc. 2868 * 2869 * @dev_priv: Pointer to device-private struct. 2870 * @crtc: The crtc we want to flip. 2871 * 2872 * Returns true or false depending whether it's OK to flip this crtc 2873 * based on the criterion that we must not have more than one implicit 2874 * frame-buffer at any one time. 2875 */ 2876 bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv, 2877 struct drm_crtc *crtc) 2878 { 2879 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 2880 bool ret; 2881 2882 mutex_lock(&dev_priv->global_kms_state_mutex); 2883 ret = !du->is_implicit || dev_priv->num_implicit == 1; 2884 mutex_unlock(&dev_priv->global_kms_state_mutex); 2885 2886 return ret; 2887 } 2888 2889 /** 2890 * vmw_kms_update_implicit_fb - Update the implicit fb. 2891 * 2892 * @dev_priv: Pointer to device-private struct. 2893 * @crtc: The crtc the new implicit frame-buffer is bound to. 2894 */ 2895 void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv, 2896 struct drm_crtc *crtc) 2897 { 2898 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 2899 struct vmw_framebuffer *vfb; 2900 2901 mutex_lock(&dev_priv->global_kms_state_mutex); 2902 2903 if (!du->is_implicit) 2904 goto out_unlock; 2905 2906 vfb = vmw_framebuffer_to_vfb(crtc->primary->fb); 2907 WARN_ON_ONCE(dev_priv->num_implicit != 1 && 2908 dev_priv->implicit_fb != vfb); 2909 2910 dev_priv->implicit_fb = vfb; 2911 out_unlock: 2912 mutex_unlock(&dev_priv->global_kms_state_mutex); 2913 } 2914 2915 /** 2916 * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement 2917 * property. 2918 * 2919 * @dev_priv: Pointer to a device private struct. 2920 * @immutable: Whether the property is immutable. 2921 * 2922 * Sets up the implicit placement property unless it's already set up. 2923 */ 2924 void 2925 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv, 2926 bool immutable) 2927 { 2928 if (dev_priv->implicit_placement_property) 2929 return; 2930 2931 dev_priv->implicit_placement_property = 2932 drm_property_create_range(dev_priv->dev, 2933 immutable ? 2934 DRM_MODE_PROP_IMMUTABLE : 0, 2935 "implicit_placement", 0, 1); 2936 2937 } 2938 2939 2940 /** 2941 * vmw_kms_set_config - Wrapper around drm_atomic_helper_set_config 2942 * 2943 * @set: The configuration to set. 2944 * 2945 * The vmwgfx Xorg driver doesn't assign the mode::type member, which 2946 * when drm_mode_set_crtcinfo is called as part of the configuration setting 2947 * causes it to return incorrect crtc dimensions causing severe problems in 2948 * the vmwgfx modesetting. So explicitly clear that member before calling 2949 * into drm_atomic_helper_set_config. 2950 */ 2951 int vmw_kms_set_config(struct drm_mode_set *set, 2952 struct drm_modeset_acquire_ctx *ctx) 2953 { 2954 if (set && set->mode) 2955 set->mode->type = 0; 2956 2957 return drm_atomic_helper_set_config(set, ctx); 2958 } 2959