1 /************************************************************************** 2 * 3 * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_kms.h" 29 #include <drm/drm_plane_helper.h> 30 31 32 #define vmw_crtc_to_sou(x) \ 33 container_of(x, struct vmw_screen_object_unit, base.crtc) 34 #define vmw_encoder_to_sou(x) \ 35 container_of(x, struct vmw_screen_object_unit, base.encoder) 36 #define vmw_connector_to_sou(x) \ 37 container_of(x, struct vmw_screen_object_unit, base.connector) 38 39 /** 40 * struct vmw_kms_sou_surface_dirty - Closure structure for 41 * blit surface to screen command. 42 * @base: The base type we derive from. Used by vmw_kms_helper_dirty(). 43 * @left: Left side of bounding box. 44 * @right: Right side of bounding box. 45 * @top: Top side of bounding box. 46 * @bottom: Bottom side of bounding box. 47 * @dst_x: Difference between source clip rects and framebuffer coordinates. 48 * @dst_y: Difference between source clip rects and framebuffer coordinates. 49 * @sid: Surface id of surface to copy from. 50 */ 51 struct vmw_kms_sou_surface_dirty { 52 struct vmw_kms_dirty base; 53 s32 left, right, top, bottom; 54 s32 dst_x, dst_y; 55 u32 sid; 56 }; 57 58 /* 59 * SVGA commands that are used by this code. Please see the device headers 60 * for explanation. 61 */ 62 struct vmw_kms_sou_readback_blit { 63 uint32 header; 64 SVGAFifoCmdBlitScreenToGMRFB body; 65 }; 66 67 struct vmw_kms_sou_dmabuf_blit { 68 uint32 header; 69 SVGAFifoCmdBlitGMRFBToScreen body; 70 }; 71 72 struct vmw_kms_sou_dirty_cmd { 73 SVGA3dCmdHeader header; 74 SVGA3dCmdBlitSurfaceToScreen body; 75 }; 76 77 78 /* 79 * Other structs. 80 */ 81 82 struct vmw_screen_object_display { 83 unsigned num_implicit; 84 85 struct vmw_framebuffer *implicit_fb; 86 SVGAFifoCmdDefineGMRFB cur; 87 struct vmw_dma_buffer *pinned_gmrfb; 88 }; 89 90 /** 91 * Display unit using screen objects. 92 */ 93 struct vmw_screen_object_unit { 94 struct vmw_display_unit base; 95 96 unsigned long buffer_size; /**< Size of allocated buffer */ 97 struct vmw_dma_buffer *buffer; /**< Backing store buffer */ 98 99 bool defined; 100 bool active_implicit; 101 }; 102 103 static void vmw_sou_destroy(struct vmw_screen_object_unit *sou) 104 { 105 vmw_du_cleanup(&sou->base); 106 kfree(sou); 107 } 108 109 110 /* 111 * Screen Object Display Unit CRTC functions 112 */ 113 114 static void vmw_sou_crtc_destroy(struct drm_crtc *crtc) 115 { 116 vmw_sou_destroy(vmw_crtc_to_sou(crtc)); 117 } 118 119 static void vmw_sou_del_active(struct vmw_private *vmw_priv, 120 struct vmw_screen_object_unit *sou) 121 { 122 struct vmw_screen_object_display *ld = vmw_priv->sou_priv; 123 124 if (sou->active_implicit) { 125 if (--(ld->num_implicit) == 0) 126 ld->implicit_fb = NULL; 127 sou->active_implicit = false; 128 } 129 } 130 131 static void vmw_sou_add_active(struct vmw_private *vmw_priv, 132 struct vmw_screen_object_unit *sou, 133 struct vmw_framebuffer *vfb) 134 { 135 struct vmw_screen_object_display *ld = vmw_priv->sou_priv; 136 137 BUG_ON(!ld->num_implicit && ld->implicit_fb); 138 139 if (!sou->active_implicit && sou->base.is_implicit) { 140 ld->implicit_fb = vfb; 141 sou->active_implicit = true; 142 ld->num_implicit++; 143 } 144 } 145 146 /** 147 * Send the fifo command to create a screen. 148 */ 149 static int vmw_sou_fifo_create(struct vmw_private *dev_priv, 150 struct vmw_screen_object_unit *sou, 151 uint32_t x, uint32_t y, 152 struct drm_display_mode *mode) 153 { 154 size_t fifo_size; 155 156 struct { 157 struct { 158 uint32_t cmdType; 159 } header; 160 SVGAScreenObject obj; 161 } *cmd; 162 163 BUG_ON(!sou->buffer); 164 165 fifo_size = sizeof(*cmd); 166 cmd = vmw_fifo_reserve(dev_priv, fifo_size); 167 /* The hardware has hung, nothing we can do about it here. */ 168 if (unlikely(cmd == NULL)) { 169 DRM_ERROR("Fifo reserve failed.\n"); 170 return -ENOMEM; 171 } 172 173 memset(cmd, 0, fifo_size); 174 cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN; 175 cmd->obj.structSize = sizeof(SVGAScreenObject); 176 cmd->obj.id = sou->base.unit; 177 cmd->obj.flags = SVGA_SCREEN_HAS_ROOT | 178 (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0); 179 cmd->obj.size.width = mode->hdisplay; 180 cmd->obj.size.height = mode->vdisplay; 181 if (sou->base.is_implicit) { 182 cmd->obj.root.x = x; 183 cmd->obj.root.y = y; 184 } else { 185 cmd->obj.root.x = sou->base.gui_x; 186 cmd->obj.root.y = sou->base.gui_y; 187 } 188 189 /* Ok to assume that buffer is pinned in vram */ 190 vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr); 191 cmd->obj.backingStore.pitch = mode->hdisplay * 4; 192 193 vmw_fifo_commit(dev_priv, fifo_size); 194 195 sou->defined = true; 196 197 return 0; 198 } 199 200 /** 201 * Send the fifo command to destroy a screen. 202 */ 203 static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv, 204 struct vmw_screen_object_unit *sou) 205 { 206 size_t fifo_size; 207 int ret; 208 209 struct { 210 struct { 211 uint32_t cmdType; 212 } header; 213 SVGAFifoCmdDestroyScreen body; 214 } *cmd; 215 216 /* no need to do anything */ 217 if (unlikely(!sou->defined)) 218 return 0; 219 220 fifo_size = sizeof(*cmd); 221 cmd = vmw_fifo_reserve(dev_priv, fifo_size); 222 /* the hardware has hung, nothing we can do about it here */ 223 if (unlikely(cmd == NULL)) { 224 DRM_ERROR("Fifo reserve failed.\n"); 225 return -ENOMEM; 226 } 227 228 memset(cmd, 0, fifo_size); 229 cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN; 230 cmd->body.screenId = sou->base.unit; 231 232 vmw_fifo_commit(dev_priv, fifo_size); 233 234 /* Force sync */ 235 ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ); 236 if (unlikely(ret != 0)) 237 DRM_ERROR("Failed to sync with HW"); 238 else 239 sou->defined = false; 240 241 return ret; 242 } 243 244 /** 245 * Free the backing store. 246 */ 247 static void vmw_sou_backing_free(struct vmw_private *dev_priv, 248 struct vmw_screen_object_unit *sou) 249 { 250 vmw_dmabuf_unreference(&sou->buffer); 251 sou->buffer_size = 0; 252 } 253 254 /** 255 * Allocate the backing store for the buffer. 256 */ 257 static int vmw_sou_backing_alloc(struct vmw_private *dev_priv, 258 struct vmw_screen_object_unit *sou, 259 unsigned long size) 260 { 261 int ret; 262 263 if (sou->buffer_size == size) 264 return 0; 265 266 if (sou->buffer) 267 vmw_sou_backing_free(dev_priv, sou); 268 269 sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL); 270 if (unlikely(sou->buffer == NULL)) 271 return -ENOMEM; 272 273 /* After we have alloced the backing store might not be able to 274 * resume the overlays, this is preferred to failing to alloc. 275 */ 276 vmw_overlay_pause_all(dev_priv); 277 ret = vmw_dmabuf_init(dev_priv, sou->buffer, size, 278 &vmw_vram_ne_placement, 279 false, &vmw_dmabuf_bo_free); 280 vmw_overlay_resume_all(dev_priv); 281 282 if (unlikely(ret != 0)) 283 sou->buffer = NULL; /* vmw_dmabuf_init frees on error */ 284 else 285 sou->buffer_size = size; 286 287 return ret; 288 } 289 290 static int vmw_sou_crtc_set_config(struct drm_mode_set *set) 291 { 292 struct vmw_private *dev_priv; 293 struct vmw_screen_object_unit *sou; 294 struct drm_connector *connector; 295 struct drm_display_mode *mode; 296 struct drm_encoder *encoder; 297 struct vmw_framebuffer *vfb; 298 struct drm_framebuffer *fb; 299 struct drm_crtc *crtc; 300 int ret = 0; 301 302 if (!set) 303 return -EINVAL; 304 305 if (!set->crtc) 306 return -EINVAL; 307 308 /* get the sou */ 309 crtc = set->crtc; 310 sou = vmw_crtc_to_sou(crtc); 311 vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL; 312 dev_priv = vmw_priv(crtc->dev); 313 314 if (set->num_connectors > 1) { 315 DRM_ERROR("Too many connectors\n"); 316 return -EINVAL; 317 } 318 319 if (set->num_connectors == 1 && 320 set->connectors[0] != &sou->base.connector) { 321 DRM_ERROR("Connector doesn't match %p %p\n", 322 set->connectors[0], &sou->base.connector); 323 return -EINVAL; 324 } 325 326 /* sou only supports one fb active at the time */ 327 if (sou->base.is_implicit && 328 dev_priv->sou_priv->implicit_fb && vfb && 329 !(dev_priv->sou_priv->num_implicit == 1 && 330 sou->active_implicit) && 331 dev_priv->sou_priv->implicit_fb != vfb) { 332 DRM_ERROR("Multiple framebuffers not supported\n"); 333 return -EINVAL; 334 } 335 336 /* since they always map one to one these are safe */ 337 connector = &sou->base.connector; 338 encoder = &sou->base.encoder; 339 340 /* should we turn the crtc off */ 341 if (set->num_connectors == 0 || !set->mode || !set->fb) { 342 ret = vmw_sou_fifo_destroy(dev_priv, sou); 343 /* the hardware has hung don't do anything more */ 344 if (unlikely(ret != 0)) 345 return ret; 346 347 connector->encoder = NULL; 348 encoder->crtc = NULL; 349 crtc->primary->fb = NULL; 350 crtc->x = 0; 351 crtc->y = 0; 352 crtc->enabled = false; 353 354 vmw_sou_del_active(dev_priv, sou); 355 356 vmw_sou_backing_free(dev_priv, sou); 357 358 return 0; 359 } 360 361 362 /* we now know we want to set a mode */ 363 mode = set->mode; 364 fb = set->fb; 365 366 if (set->x + mode->hdisplay > fb->width || 367 set->y + mode->vdisplay > fb->height) { 368 DRM_ERROR("set outside of framebuffer\n"); 369 return -EINVAL; 370 } 371 372 vmw_svga_enable(dev_priv); 373 374 if (mode->hdisplay != crtc->mode.hdisplay || 375 mode->vdisplay != crtc->mode.vdisplay) { 376 /* no need to check if depth is different, because backing 377 * store depth is forced to 4 by the device. 378 */ 379 380 ret = vmw_sou_fifo_destroy(dev_priv, sou); 381 /* the hardware has hung don't do anything more */ 382 if (unlikely(ret != 0)) 383 return ret; 384 385 vmw_sou_backing_free(dev_priv, sou); 386 } 387 388 if (!sou->buffer) { 389 /* forced to depth 4 by the device */ 390 size_t size = mode->hdisplay * mode->vdisplay * 4; 391 ret = vmw_sou_backing_alloc(dev_priv, sou, size); 392 if (unlikely(ret != 0)) 393 return ret; 394 } 395 396 ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode); 397 if (unlikely(ret != 0)) { 398 /* 399 * We are in a bit of a situation here, the hardware has 400 * hung and we may or may not have a buffer hanging of 401 * the screen object, best thing to do is not do anything 402 * if we where defined, if not just turn the crtc of. 403 * Not what userspace wants but it needs to htfu. 404 */ 405 if (sou->defined) 406 return ret; 407 408 connector->encoder = NULL; 409 encoder->crtc = NULL; 410 crtc->primary->fb = NULL; 411 crtc->x = 0; 412 crtc->y = 0; 413 crtc->enabled = false; 414 415 return ret; 416 } 417 418 vmw_sou_add_active(dev_priv, sou, vfb); 419 420 connector->encoder = encoder; 421 encoder->crtc = crtc; 422 crtc->mode = *mode; 423 crtc->primary->fb = fb; 424 crtc->x = set->x; 425 crtc->y = set->y; 426 crtc->enabled = true; 427 428 return 0; 429 } 430 431 /** 432 * Returns if this unit can be page flipped. 433 * Must be called with the mode_config mutex held. 434 */ 435 static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv, 436 struct drm_crtc *crtc) 437 { 438 struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc); 439 440 if (!sou->base.is_implicit) 441 return true; 442 443 if (dev_priv->sou_priv->num_implicit != 1) 444 return false; 445 446 return true; 447 } 448 449 /** 450 * Update the implicit fb to the current fb of this crtc. 451 * Must be called with the mode_config mutex held. 452 */ 453 static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv, 454 struct drm_crtc *crtc) 455 { 456 struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc); 457 458 BUG_ON(!sou->base.is_implicit); 459 460 dev_priv->sou_priv->implicit_fb = 461 vmw_framebuffer_to_vfb(sou->base.crtc.primary->fb); 462 } 463 464 static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc, 465 struct drm_framebuffer *fb, 466 struct drm_pending_vblank_event *event, 467 uint32_t flags) 468 { 469 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 470 struct drm_framebuffer *old_fb = crtc->primary->fb; 471 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb); 472 struct vmw_fence_obj *fence = NULL; 473 struct drm_clip_rect clips; 474 int ret; 475 476 /* require ScreenObject support for page flipping */ 477 if (!dev_priv->sou_priv) 478 return -ENOSYS; 479 480 if (!vmw_sou_screen_object_flippable(dev_priv, crtc)) 481 return -EINVAL; 482 483 crtc->primary->fb = fb; 484 485 /* do a full screen dirty update */ 486 clips.x1 = clips.y1 = 0; 487 clips.x2 = fb->width; 488 clips.y2 = fb->height; 489 490 if (vfb->dmabuf) 491 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, 492 &clips, 1, 1, 493 true, &fence); 494 else 495 ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, 496 &clips, NULL, NULL, 497 0, 0, 1, 1, &fence); 498 499 500 if (ret != 0) 501 goto out_no_fence; 502 if (!fence) { 503 ret = -EINVAL; 504 goto out_no_fence; 505 } 506 507 if (event) { 508 struct drm_file *file_priv = event->base.file_priv; 509 510 ret = vmw_event_fence_action_queue(file_priv, fence, 511 &event->base, 512 &event->event.tv_sec, 513 &event->event.tv_usec, 514 true); 515 } 516 517 /* 518 * No need to hold on to this now. The only cleanup 519 * we need to do if we fail is unref the fence. 520 */ 521 vmw_fence_obj_unreference(&fence); 522 523 if (vmw_crtc_to_du(crtc)->is_implicit) 524 vmw_sou_update_implicit_fb(dev_priv, crtc); 525 526 return ret; 527 528 out_no_fence: 529 crtc->primary->fb = old_fb; 530 return ret; 531 } 532 533 static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { 534 .cursor_set2 = vmw_du_crtc_cursor_set2, 535 .cursor_move = vmw_du_crtc_cursor_move, 536 .gamma_set = vmw_du_crtc_gamma_set, 537 .destroy = vmw_sou_crtc_destroy, 538 .set_config = vmw_sou_crtc_set_config, 539 .page_flip = vmw_sou_crtc_page_flip, 540 }; 541 542 /* 543 * Screen Object Display Unit encoder functions 544 */ 545 546 static void vmw_sou_encoder_destroy(struct drm_encoder *encoder) 547 { 548 vmw_sou_destroy(vmw_encoder_to_sou(encoder)); 549 } 550 551 static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = { 552 .destroy = vmw_sou_encoder_destroy, 553 }; 554 555 /* 556 * Screen Object Display Unit connector functions 557 */ 558 559 static void vmw_sou_connector_destroy(struct drm_connector *connector) 560 { 561 vmw_sou_destroy(vmw_connector_to_sou(connector)); 562 } 563 564 static const struct drm_connector_funcs vmw_sou_connector_funcs = { 565 .dpms = vmw_du_connector_dpms, 566 .set_property = vmw_du_connector_set_property, 567 .destroy = vmw_sou_connector_destroy, 568 }; 569 570 static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) 571 { 572 struct vmw_screen_object_unit *sou; 573 struct drm_device *dev = dev_priv->dev; 574 struct drm_connector *connector; 575 struct drm_encoder *encoder; 576 struct drm_crtc *crtc; 577 578 sou = kzalloc(sizeof(*sou), GFP_KERNEL); 579 if (!sou) 580 return -ENOMEM; 581 582 sou->base.unit = unit; 583 crtc = &sou->base.crtc; 584 encoder = &sou->base.encoder; 585 connector = &sou->base.connector; 586 587 sou->active_implicit = false; 588 589 sou->base.pref_active = (unit == 0); 590 sou->base.pref_width = dev_priv->initial_width; 591 sou->base.pref_height = dev_priv->initial_height; 592 sou->base.pref_mode = NULL; 593 sou->base.is_implicit = true; 594 595 drm_connector_init(dev, connector, &vmw_sou_connector_funcs, 596 DRM_MODE_CONNECTOR_VIRTUAL); 597 connector->status = vmw_du_connector_detect(connector, true); 598 599 drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs, 600 DRM_MODE_ENCODER_VIRTUAL, NULL); 601 drm_mode_connector_attach_encoder(connector, encoder); 602 encoder->possible_crtcs = (1 << unit); 603 encoder->possible_clones = 0; 604 605 (void) drm_connector_register(connector); 606 607 drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs); 608 609 drm_mode_crtc_set_gamma_size(crtc, 256); 610 611 drm_object_attach_property(&connector->base, 612 dev->mode_config.dirty_info_property, 613 1); 614 615 return 0; 616 } 617 618 int vmw_kms_sou_init_display(struct vmw_private *dev_priv) 619 { 620 struct drm_device *dev = dev_priv->dev; 621 int i, ret; 622 623 if (dev_priv->sou_priv) { 624 DRM_INFO("sou system already on\n"); 625 return -EINVAL; 626 } 627 628 if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) { 629 DRM_INFO("Not using screen objects," 630 " missing cap SCREEN_OBJECT_2\n"); 631 return -ENOSYS; 632 } 633 634 ret = -ENOMEM; 635 dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL); 636 if (unlikely(!dev_priv->sou_priv)) 637 goto err_no_mem; 638 639 dev_priv->sou_priv->num_implicit = 0; 640 dev_priv->sou_priv->implicit_fb = NULL; 641 642 ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); 643 if (unlikely(ret != 0)) 644 goto err_free; 645 646 ret = drm_mode_create_dirty_info_property(dev); 647 if (unlikely(ret != 0)) 648 goto err_vblank_cleanup; 649 650 for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) 651 vmw_sou_init(dev_priv, i); 652 653 dev_priv->active_display_unit = vmw_du_screen_object; 654 655 DRM_INFO("Screen Objects Display Unit initialized\n"); 656 657 return 0; 658 659 err_vblank_cleanup: 660 drm_vblank_cleanup(dev); 661 err_free: 662 kfree(dev_priv->sou_priv); 663 dev_priv->sou_priv = NULL; 664 err_no_mem: 665 return ret; 666 } 667 668 int vmw_kms_sou_close_display(struct vmw_private *dev_priv) 669 { 670 struct drm_device *dev = dev_priv->dev; 671 672 if (!dev_priv->sou_priv) 673 return -ENOSYS; 674 675 drm_vblank_cleanup(dev); 676 677 kfree(dev_priv->sou_priv); 678 679 return 0; 680 } 681 682 static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv, 683 struct vmw_framebuffer *framebuffer) 684 { 685 struct vmw_dma_buffer *buf = 686 container_of(framebuffer, struct vmw_framebuffer_dmabuf, 687 base)->buffer; 688 int depth = framebuffer->base.depth; 689 struct { 690 uint32_t header; 691 SVGAFifoCmdDefineGMRFB body; 692 } *cmd; 693 694 /* Emulate RGBA support, contrary to svga_reg.h this is not 695 * supported by hosts. This is only a problem if we are reading 696 * this value later and expecting what we uploaded back. 697 */ 698 if (depth == 32) 699 depth = 24; 700 701 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 702 if (!cmd) { 703 DRM_ERROR("Out of fifo space for dirty framebuffer command.\n"); 704 return -ENOMEM; 705 } 706 707 cmd->header = SVGA_CMD_DEFINE_GMRFB; 708 cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel; 709 cmd->body.format.colorDepth = depth; 710 cmd->body.format.reserved = 0; 711 cmd->body.bytesPerLine = framebuffer->base.pitches[0]; 712 /* Buffer is reserved in vram or GMR */ 713 vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr); 714 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 715 716 return 0; 717 } 718 719 /** 720 * vmw_sou_surface_fifo_commit - Callback to fill in and submit a 721 * blit surface to screen command. 722 * 723 * @dirty: The closure structure. 724 * 725 * Fills in the missing fields in the command, and translates the cliprects 726 * to match the destination bounding box encoded. 727 */ 728 static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty) 729 { 730 struct vmw_kms_sou_surface_dirty *sdirty = 731 container_of(dirty, typeof(*sdirty), base); 732 struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd; 733 s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x; 734 s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y; 735 size_t region_size = dirty->num_hits * sizeof(SVGASignedRect); 736 SVGASignedRect *blit = (SVGASignedRect *) &cmd[1]; 737 int i; 738 739 cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN; 740 cmd->header.size = sizeof(cmd->body) + region_size; 741 742 /* 743 * Use the destination bounding box to specify destination - and 744 * source bounding regions. 745 */ 746 cmd->body.destRect.left = sdirty->left; 747 cmd->body.destRect.right = sdirty->right; 748 cmd->body.destRect.top = sdirty->top; 749 cmd->body.destRect.bottom = sdirty->bottom; 750 751 cmd->body.srcRect.left = sdirty->left + trans_x; 752 cmd->body.srcRect.right = sdirty->right + trans_x; 753 cmd->body.srcRect.top = sdirty->top + trans_y; 754 cmd->body.srcRect.bottom = sdirty->bottom + trans_y; 755 756 cmd->body.srcImage.sid = sdirty->sid; 757 cmd->body.destScreenId = dirty->unit->unit; 758 759 /* Blits are relative to the destination rect. Translate. */ 760 for (i = 0; i < dirty->num_hits; ++i, ++blit) { 761 blit->left -= sdirty->left; 762 blit->right -= sdirty->left; 763 blit->top -= sdirty->top; 764 blit->bottom -= sdirty->top; 765 } 766 767 vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd)); 768 769 sdirty->left = sdirty->top = S32_MAX; 770 sdirty->right = sdirty->bottom = S32_MIN; 771 } 772 773 /** 774 * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect. 775 * 776 * @dirty: The closure structure 777 * 778 * Encodes a SVGASignedRect cliprect and updates the bounding box of the 779 * BLIT_SURFACE_TO_SCREEN command. 780 */ 781 static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty) 782 { 783 struct vmw_kms_sou_surface_dirty *sdirty = 784 container_of(dirty, typeof(*sdirty), base); 785 struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd; 786 SVGASignedRect *blit = (SVGASignedRect *) &cmd[1]; 787 788 /* Destination rect. */ 789 blit += dirty->num_hits; 790 blit->left = dirty->unit_x1; 791 blit->top = dirty->unit_y1; 792 blit->right = dirty->unit_x2; 793 blit->bottom = dirty->unit_y2; 794 795 /* Destination bounding box */ 796 sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1); 797 sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1); 798 sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2); 799 sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2); 800 801 dirty->num_hits++; 802 } 803 804 /** 805 * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer 806 * 807 * @dev_priv: Pointer to the device private structure. 808 * @framebuffer: Pointer to the surface-buffer backed framebuffer. 809 * @clips: Array of clip rects. Either @clips or @vclips must be NULL. 810 * @vclips: Alternate array of clip rects. Either @clips or @vclips must 811 * be NULL. 812 * @srf: Pointer to surface to blit from. If NULL, the surface attached 813 * to @framebuffer will be used. 814 * @dest_x: X coordinate offset to align @srf with framebuffer coordinates. 815 * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates. 816 * @num_clips: Number of clip rects in @clips. 817 * @inc: Increment to use when looping over @clips. 818 * @out_fence: If non-NULL, will return a ref-counted pointer to a 819 * struct vmw_fence_obj. The returned fence pointer may be NULL in which 820 * case the device has already synchronized. 821 * 822 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if 823 * interrupted. 824 */ 825 int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, 826 struct vmw_framebuffer *framebuffer, 827 struct drm_clip_rect *clips, 828 struct drm_vmw_rect *vclips, 829 struct vmw_resource *srf, 830 s32 dest_x, 831 s32 dest_y, 832 unsigned num_clips, int inc, 833 struct vmw_fence_obj **out_fence) 834 { 835 struct vmw_framebuffer_surface *vfbs = 836 container_of(framebuffer, typeof(*vfbs), base); 837 struct vmw_kms_sou_surface_dirty sdirty; 838 int ret; 839 840 if (!srf) 841 srf = &vfbs->surface->res; 842 843 ret = vmw_kms_helper_resource_prepare(srf, true); 844 if (ret) 845 return ret; 846 847 sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit; 848 sdirty.base.clip = vmw_sou_surface_clip; 849 sdirty.base.dev_priv = dev_priv; 850 sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) + 851 sizeof(SVGASignedRect) * num_clips; 852 853 sdirty.sid = srf->id; 854 sdirty.left = sdirty.top = S32_MAX; 855 sdirty.right = sdirty.bottom = S32_MIN; 856 sdirty.dst_x = dest_x; 857 sdirty.dst_y = dest_y; 858 859 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 860 dest_x, dest_y, num_clips, inc, 861 &sdirty.base); 862 vmw_kms_helper_resource_finish(srf, out_fence); 863 864 return ret; 865 } 866 867 /** 868 * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips. 869 * 870 * @dirty: The closure structure. 871 * 872 * Commits a previously built command buffer of readback clips. 873 */ 874 static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty) 875 { 876 vmw_fifo_commit(dirty->dev_priv, 877 sizeof(struct vmw_kms_sou_dmabuf_blit) * 878 dirty->num_hits); 879 } 880 881 /** 882 * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect. 883 * 884 * @dirty: The closure structure 885 * 886 * Encodes a BLIT_GMRFB_TO_SCREEN cliprect. 887 */ 888 static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) 889 { 890 struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd; 891 892 blit += dirty->num_hits; 893 blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; 894 blit->body.destScreenId = dirty->unit->unit; 895 blit->body.srcOrigin.x = dirty->fb_x; 896 blit->body.srcOrigin.y = dirty->fb_y; 897 blit->body.destRect.left = dirty->unit_x1; 898 blit->body.destRect.top = dirty->unit_y1; 899 blit->body.destRect.right = dirty->unit_x2; 900 blit->body.destRect.bottom = dirty->unit_y2; 901 dirty->num_hits++; 902 } 903 904 /** 905 * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer 906 * 907 * @dev_priv: Pointer to the device private structure. 908 * @framebuffer: Pointer to the dma-buffer backed framebuffer. 909 * @clips: Array of clip rects. 910 * @num_clips: Number of clip rects in @clips. 911 * @increment: Increment to use when looping over @clips. 912 * @interruptible: Whether to perform waits interruptible if possible. 913 * @out_fence: If non-NULL, will return a ref-counted pointer to a 914 * struct vmw_fence_obj. The returned fence pointer may be NULL in which 915 * case the device has already synchronized. 916 * 917 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if 918 * interrupted. 919 */ 920 int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, 921 struct vmw_framebuffer *framebuffer, 922 struct drm_clip_rect *clips, 923 unsigned num_clips, int increment, 924 bool interruptible, 925 struct vmw_fence_obj **out_fence) 926 { 927 struct vmw_dma_buffer *buf = 928 container_of(framebuffer, struct vmw_framebuffer_dmabuf, 929 base)->buffer; 930 struct vmw_kms_dirty dirty; 931 int ret; 932 933 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible, 934 false); 935 if (ret) 936 return ret; 937 938 ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer); 939 if (unlikely(ret != 0)) 940 goto out_revert; 941 942 dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit; 943 dirty.clip = vmw_sou_dmabuf_clip; 944 dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) * 945 num_clips; 946 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, NULL, 947 0, 0, num_clips, increment, &dirty); 948 vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL); 949 950 return ret; 951 952 out_revert: 953 vmw_kms_helper_buffer_revert(buf); 954 955 return ret; 956 } 957 958 959 /** 960 * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips. 961 * 962 * @dirty: The closure structure. 963 * 964 * Commits a previously built command buffer of readback clips. 965 */ 966 static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty) 967 { 968 vmw_fifo_commit(dirty->dev_priv, 969 sizeof(struct vmw_kms_sou_readback_blit) * 970 dirty->num_hits); 971 } 972 973 /** 974 * vmw_sou_readback_clip - Callback to encode a readback cliprect. 975 * 976 * @dirty: The closure structure 977 * 978 * Encodes a BLIT_SCREEN_TO_GMRFB cliprect. 979 */ 980 static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty) 981 { 982 struct vmw_kms_sou_readback_blit *blit = dirty->cmd; 983 984 blit += dirty->num_hits; 985 blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB; 986 blit->body.srcScreenId = dirty->unit->unit; 987 blit->body.destOrigin.x = dirty->fb_x; 988 blit->body.destOrigin.y = dirty->fb_y; 989 blit->body.srcRect.left = dirty->unit_x1; 990 blit->body.srcRect.top = dirty->unit_y1; 991 blit->body.srcRect.right = dirty->unit_x2; 992 blit->body.srcRect.bottom = dirty->unit_y2; 993 dirty->num_hits++; 994 } 995 996 /** 997 * vmw_kms_sou_readback - Perform a readback from the screen object system to 998 * a dma-buffer backed framebuffer. 999 * 1000 * @dev_priv: Pointer to the device private structure. 1001 * @file_priv: Pointer to a struct drm_file identifying the caller. 1002 * Must be set to NULL if @user_fence_rep is NULL. 1003 * @vfb: Pointer to the dma-buffer backed framebuffer. 1004 * @user_fence_rep: User-space provided structure for fence information. 1005 * Must be set to non-NULL if @file_priv is non-NULL. 1006 * @vclips: Array of clip rects. 1007 * @num_clips: Number of clip rects in @vclips. 1008 * 1009 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if 1010 * interrupted. 1011 */ 1012 int vmw_kms_sou_readback(struct vmw_private *dev_priv, 1013 struct drm_file *file_priv, 1014 struct vmw_framebuffer *vfb, 1015 struct drm_vmw_fence_rep __user *user_fence_rep, 1016 struct drm_vmw_rect *vclips, 1017 uint32_t num_clips) 1018 { 1019 struct vmw_dma_buffer *buf = 1020 container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; 1021 struct vmw_kms_dirty dirty; 1022 int ret; 1023 1024 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false); 1025 if (ret) 1026 return ret; 1027 1028 ret = do_dmabuf_define_gmrfb(dev_priv, vfb); 1029 if (unlikely(ret != 0)) 1030 goto out_revert; 1031 1032 dirty.fifo_commit = vmw_sou_readback_fifo_commit; 1033 dirty.clip = vmw_sou_readback_clip; 1034 dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) * 1035 num_clips; 1036 ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips, 1037 0, 0, num_clips, 1, &dirty); 1038 vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL, 1039 user_fence_rep); 1040 1041 return ret; 1042 1043 out_revert: 1044 vmw_kms_helper_buffer_revert(buf); 1045 1046 return ret; 1047 } 1048