1 /************************************************************************** 2 * 3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_kms.h" 29 30 /* Might need a hrtimer here? */ 31 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 32 33 34 void vmw_display_unit_cleanup(struct vmw_display_unit *du) 35 { 36 if (du->cursor_surface) 37 vmw_surface_unreference(&du->cursor_surface); 38 if (du->cursor_dmabuf) 39 vmw_dmabuf_unreference(&du->cursor_dmabuf); 40 drm_crtc_cleanup(&du->crtc); 41 drm_encoder_cleanup(&du->encoder); 42 drm_connector_cleanup(&du->connector); 43 } 44 45 /* 46 * Display Unit Cursor functions 47 */ 48 49 int vmw_cursor_update_image(struct vmw_private *dev_priv, 50 u32 *image, u32 width, u32 height, 51 u32 hotspotX, u32 hotspotY) 52 { 53 struct { 54 u32 cmd; 55 SVGAFifoCmdDefineAlphaCursor cursor; 56 } *cmd; 57 u32 image_size = width * height * 4; 58 u32 cmd_size = sizeof(*cmd) + image_size; 59 60 if (!image) 61 return -EINVAL; 62 63 cmd = vmw_fifo_reserve(dev_priv, cmd_size); 64 if (unlikely(cmd == NULL)) { 65 DRM_ERROR("Fifo reserve failed.\n"); 66 return -ENOMEM; 67 } 68 69 memset(cmd, 0, sizeof(*cmd)); 70 71 memcpy(&cmd[1], image, image_size); 72 73 cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR); 74 cmd->cursor.id = cpu_to_le32(0); 75 cmd->cursor.width = cpu_to_le32(width); 76 cmd->cursor.height = cpu_to_le32(height); 77 cmd->cursor.hotspotX = cpu_to_le32(hotspotX); 78 cmd->cursor.hotspotY = cpu_to_le32(hotspotY); 79 80 vmw_fifo_commit(dev_priv, cmd_size); 81 82 return 0; 83 } 84 85 void vmw_cursor_update_position(struct vmw_private *dev_priv, 86 bool show, int x, int y) 87 { 88 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 89 uint32_t count; 90 91 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); 92 iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X); 93 iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y); 94 count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT); 95 iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); 96 } 97 98 int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 99 uint32_t handle, uint32_t width, uint32_t height) 100 { 101 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 102 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 103 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 104 struct vmw_surface *surface = NULL; 105 struct vmw_dma_buffer *dmabuf = NULL; 106 int ret; 107 108 if (handle) { 109 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, 110 handle, &surface); 111 if (!ret) { 112 if (!surface->snooper.image) { 113 DRM_ERROR("surface not suitable for cursor\n"); 114 return -EINVAL; 115 } 116 } else { 117 ret = vmw_user_dmabuf_lookup(tfile, 118 handle, &dmabuf); 119 if (ret) { 120 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret); 121 return -EINVAL; 122 } 123 } 124 } 125 126 /* takedown old cursor */ 127 if (du->cursor_surface) { 128 du->cursor_surface->snooper.crtc = NULL; 129 vmw_surface_unreference(&du->cursor_surface); 130 } 131 if (du->cursor_dmabuf) 132 vmw_dmabuf_unreference(&du->cursor_dmabuf); 133 134 /* setup new image */ 135 if (surface) { 136 /* vmw_user_surface_lookup takes one reference */ 137 du->cursor_surface = surface; 138 139 du->cursor_surface->snooper.crtc = crtc; 140 du->cursor_age = du->cursor_surface->snooper.age; 141 vmw_cursor_update_image(dev_priv, surface->snooper.image, 142 64, 64, du->hotspot_x, du->hotspot_y); 143 } else if (dmabuf) { 144 struct ttm_bo_kmap_obj map; 145 unsigned long kmap_offset; 146 unsigned long kmap_num; 147 void *virtual; 148 bool dummy; 149 150 /* vmw_user_surface_lookup takes one reference */ 151 du->cursor_dmabuf = dmabuf; 152 153 kmap_offset = 0; 154 kmap_num = (64*64*4) >> PAGE_SHIFT; 155 156 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0); 157 if (unlikely(ret != 0)) { 158 DRM_ERROR("reserve failed\n"); 159 return -EINVAL; 160 } 161 162 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map); 163 if (unlikely(ret != 0)) 164 goto err_unreserve; 165 166 virtual = ttm_kmap_obj_virtual(&map, &dummy); 167 vmw_cursor_update_image(dev_priv, virtual, 64, 64, 168 du->hotspot_x, du->hotspot_y); 169 170 ttm_bo_kunmap(&map); 171 err_unreserve: 172 ttm_bo_unreserve(&dmabuf->base); 173 174 } else { 175 vmw_cursor_update_position(dev_priv, false, 0, 0); 176 return 0; 177 } 178 179 vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y); 180 181 return 0; 182 } 183 184 int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 185 { 186 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 187 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 188 bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false; 189 190 du->cursor_x = x + crtc->x; 191 du->cursor_y = y + crtc->y; 192 193 vmw_cursor_update_position(dev_priv, shown, 194 du->cursor_x, du->cursor_y); 195 196 return 0; 197 } 198 199 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 200 struct ttm_object_file *tfile, 201 struct ttm_buffer_object *bo, 202 SVGA3dCmdHeader *header) 203 { 204 struct ttm_bo_kmap_obj map; 205 unsigned long kmap_offset; 206 unsigned long kmap_num; 207 SVGA3dCopyBox *box; 208 unsigned box_count; 209 void *virtual; 210 bool dummy; 211 struct vmw_dma_cmd { 212 SVGA3dCmdHeader header; 213 SVGA3dCmdSurfaceDMA dma; 214 } *cmd; 215 int ret; 216 217 cmd = container_of(header, struct vmw_dma_cmd, header); 218 219 /* No snooper installed */ 220 if (!srf->snooper.image) 221 return; 222 223 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { 224 DRM_ERROR("face and mipmap for cursors should never != 0\n"); 225 return; 226 } 227 228 if (cmd->header.size < 64) { 229 DRM_ERROR("at least one full copy box must be given\n"); 230 return; 231 } 232 233 box = (SVGA3dCopyBox *)&cmd[1]; 234 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / 235 sizeof(SVGA3dCopyBox); 236 237 if (cmd->dma.guest.pitch != (64 * 4) || 238 cmd->dma.guest.ptr.offset % PAGE_SIZE || 239 box->x != 0 || box->y != 0 || box->z != 0 || 240 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || 241 box->w != 64 || box->h != 64 || box->d != 1 || 242 box_count != 1) { 243 /* TODO handle none page aligned offsets */ 244 /* TODO handle partial uploads and pitch != 256 */ 245 /* TODO handle more then one copy (size != 64) */ 246 DRM_ERROR("lazy programer, cant handle wierd stuff\n"); 247 return; 248 } 249 250 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; 251 kmap_num = (64*64*4) >> PAGE_SHIFT; 252 253 ret = ttm_bo_reserve(bo, true, false, false, 0); 254 if (unlikely(ret != 0)) { 255 DRM_ERROR("reserve failed\n"); 256 return; 257 } 258 259 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 260 if (unlikely(ret != 0)) 261 goto err_unreserve; 262 263 virtual = ttm_kmap_obj_virtual(&map, &dummy); 264 265 memcpy(srf->snooper.image, virtual, 64*64*4); 266 srf->snooper.age++; 267 268 /* we can't call this function from this function since execbuf has 269 * reserved fifo space. 270 * 271 * if (srf->snooper.crtc) 272 * vmw_ldu_crtc_cursor_update_image(dev_priv, 273 * srf->snooper.image, 64, 64, 274 * du->hotspot_x, du->hotspot_y); 275 */ 276 277 ttm_bo_kunmap(&map); 278 err_unreserve: 279 ttm_bo_unreserve(bo); 280 } 281 282 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) 283 { 284 struct drm_device *dev = dev_priv->dev; 285 struct vmw_display_unit *du; 286 struct drm_crtc *crtc; 287 288 mutex_lock(&dev->mode_config.mutex); 289 290 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 291 du = vmw_crtc_to_du(crtc); 292 if (!du->cursor_surface || 293 du->cursor_age == du->cursor_surface->snooper.age) 294 continue; 295 296 du->cursor_age = du->cursor_surface->snooper.age; 297 vmw_cursor_update_image(dev_priv, 298 du->cursor_surface->snooper.image, 299 64, 64, du->hotspot_x, du->hotspot_y); 300 } 301 302 mutex_unlock(&dev->mode_config.mutex); 303 } 304 305 /* 306 * Generic framebuffer code 307 */ 308 309 int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, 310 struct drm_file *file_priv, 311 unsigned int *handle) 312 { 313 if (handle) 314 handle = 0; 315 316 return 0; 317 } 318 319 /* 320 * Surface framebuffer code 321 */ 322 323 #define vmw_framebuffer_to_vfbs(x) \ 324 container_of(x, struct vmw_framebuffer_surface, base.base) 325 326 struct vmw_framebuffer_surface { 327 struct vmw_framebuffer base; 328 struct vmw_surface *surface; 329 struct delayed_work d_work; 330 struct mutex work_lock; 331 bool present_fs; 332 }; 333 334 void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 335 { 336 struct vmw_framebuffer_surface *vfb = 337 vmw_framebuffer_to_vfbs(framebuffer); 338 339 cancel_delayed_work_sync(&vfb->d_work); 340 drm_framebuffer_cleanup(framebuffer); 341 vmw_surface_unreference(&vfb->surface); 342 343 kfree(framebuffer); 344 } 345 346 static void vmw_framebuffer_present_fs_callback(struct work_struct *work) 347 { 348 struct delayed_work *d_work = 349 container_of(work, struct delayed_work, work); 350 struct vmw_framebuffer_surface *vfbs = 351 container_of(d_work, struct vmw_framebuffer_surface, d_work); 352 struct vmw_surface *surf = vfbs->surface; 353 struct drm_framebuffer *framebuffer = &vfbs->base.base; 354 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 355 356 struct { 357 SVGA3dCmdHeader header; 358 SVGA3dCmdPresent body; 359 SVGA3dCopyRect cr; 360 } *cmd; 361 362 mutex_lock(&vfbs->work_lock); 363 if (!vfbs->present_fs) 364 goto out_unlock; 365 366 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 367 if (unlikely(cmd == NULL)) 368 goto out_resched; 369 370 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT); 371 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr)); 372 cmd->body.sid = cpu_to_le32(surf->res.id); 373 cmd->cr.x = cpu_to_le32(0); 374 cmd->cr.y = cpu_to_le32(0); 375 cmd->cr.srcx = cmd->cr.x; 376 cmd->cr.srcy = cmd->cr.y; 377 cmd->cr.w = cpu_to_le32(framebuffer->width); 378 cmd->cr.h = cpu_to_le32(framebuffer->height); 379 vfbs->present_fs = false; 380 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 381 out_resched: 382 /** 383 * Will not re-add if already pending. 384 */ 385 schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE); 386 out_unlock: 387 mutex_unlock(&vfbs->work_lock); 388 } 389 390 391 int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, 392 unsigned flags, unsigned color, 393 struct drm_clip_rect *clips, 394 unsigned num_clips) 395 { 396 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 397 struct vmw_framebuffer_surface *vfbs = 398 vmw_framebuffer_to_vfbs(framebuffer); 399 struct vmw_surface *surf = vfbs->surface; 400 struct drm_clip_rect norect; 401 SVGA3dCopyRect *cr; 402 int i, inc = 1; 403 404 struct { 405 SVGA3dCmdHeader header; 406 SVGA3dCmdPresent body; 407 SVGA3dCopyRect cr; 408 } *cmd; 409 410 if (!num_clips || 411 !(dev_priv->fifo.capabilities & 412 SVGA_FIFO_CAP_SCREEN_OBJECT)) { 413 int ret; 414 415 mutex_lock(&vfbs->work_lock); 416 vfbs->present_fs = true; 417 ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE); 418 mutex_unlock(&vfbs->work_lock); 419 if (ret) { 420 /** 421 * No work pending, Force immediate present. 422 */ 423 vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); 424 } 425 return 0; 426 } 427 428 if (!num_clips) { 429 num_clips = 1; 430 clips = &norect; 431 norect.x1 = norect.y1 = 0; 432 norect.x2 = framebuffer->width; 433 norect.y2 = framebuffer->height; 434 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { 435 num_clips /= 2; 436 inc = 2; /* skip source rects */ 437 } 438 439 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); 440 if (unlikely(cmd == NULL)) { 441 DRM_ERROR("Fifo reserve failed.\n"); 442 return -ENOMEM; 443 } 444 445 memset(cmd, 0, sizeof(*cmd)); 446 447 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT); 448 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr)); 449 cmd->body.sid = cpu_to_le32(surf->res.id); 450 451 for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) { 452 cr->x = cpu_to_le16(clips->x1); 453 cr->y = cpu_to_le16(clips->y1); 454 cr->srcx = cr->x; 455 cr->srcy = cr->y; 456 cr->w = cpu_to_le16(clips->x2 - clips->x1); 457 cr->h = cpu_to_le16(clips->y2 - clips->y1); 458 } 459 460 vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); 461 462 return 0; 463 } 464 465 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { 466 .destroy = vmw_framebuffer_surface_destroy, 467 .dirty = vmw_framebuffer_surface_dirty, 468 .create_handle = vmw_framebuffer_create_handle, 469 }; 470 471 int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 472 struct vmw_surface *surface, 473 struct vmw_framebuffer **out, 474 unsigned width, unsigned height) 475 476 { 477 struct drm_device *dev = dev_priv->dev; 478 struct vmw_framebuffer_surface *vfbs; 479 int ret; 480 481 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); 482 if (!vfbs) { 483 ret = -ENOMEM; 484 goto out_err1; 485 } 486 487 ret = drm_framebuffer_init(dev, &vfbs->base.base, 488 &vmw_framebuffer_surface_funcs); 489 if (ret) 490 goto out_err2; 491 492 if (!vmw_surface_reference(surface)) { 493 DRM_ERROR("failed to reference surface %p\n", surface); 494 goto out_err3; 495 } 496 497 /* XXX get the first 3 from the surface info */ 498 vfbs->base.base.bits_per_pixel = 32; 499 vfbs->base.base.pitch = width * 32 / 4; 500 vfbs->base.base.depth = 24; 501 vfbs->base.base.width = width; 502 vfbs->base.base.height = height; 503 vfbs->base.pin = NULL; 504 vfbs->base.unpin = NULL; 505 vfbs->surface = surface; 506 mutex_init(&vfbs->work_lock); 507 INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); 508 *out = &vfbs->base; 509 510 return 0; 511 512 out_err3: 513 drm_framebuffer_cleanup(&vfbs->base.base); 514 out_err2: 515 kfree(vfbs); 516 out_err1: 517 return ret; 518 } 519 520 /* 521 * Dmabuf framebuffer code 522 */ 523 524 #define vmw_framebuffer_to_vfbd(x) \ 525 container_of(x, struct vmw_framebuffer_dmabuf, base.base) 526 527 struct vmw_framebuffer_dmabuf { 528 struct vmw_framebuffer base; 529 struct vmw_dma_buffer *buffer; 530 }; 531 532 void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) 533 { 534 struct vmw_framebuffer_dmabuf *vfbd = 535 vmw_framebuffer_to_vfbd(framebuffer); 536 537 drm_framebuffer_cleanup(framebuffer); 538 vmw_dmabuf_unreference(&vfbd->buffer); 539 540 kfree(vfbd); 541 } 542 543 int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, 544 unsigned flags, unsigned color, 545 struct drm_clip_rect *clips, 546 unsigned num_clips) 547 { 548 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 549 struct drm_clip_rect norect; 550 struct { 551 uint32_t header; 552 SVGAFifoCmdUpdate body; 553 } *cmd; 554 int i, increment = 1; 555 556 if (!num_clips || 557 !(dev_priv->fifo.capabilities & 558 SVGA_FIFO_CAP_SCREEN_OBJECT)) { 559 num_clips = 1; 560 clips = &norect; 561 norect.x1 = norect.y1 = 0; 562 norect.x2 = framebuffer->width; 563 norect.y2 = framebuffer->height; 564 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { 565 num_clips /= 2; 566 increment = 2; 567 } 568 569 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); 570 if (unlikely(cmd == NULL)) { 571 DRM_ERROR("Fifo reserve failed.\n"); 572 return -ENOMEM; 573 } 574 575 for (i = 0; i < num_clips; i++, clips += increment) { 576 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); 577 cmd[i].body.x = cpu_to_le32(clips[i].x1); 578 cmd[i].body.y = cpu_to_le32(clips[i].y1); 579 cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1); 580 cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1); 581 } 582 583 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); 584 585 return 0; 586 } 587 588 static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { 589 .destroy = vmw_framebuffer_dmabuf_destroy, 590 .dirty = vmw_framebuffer_dmabuf_dirty, 591 .create_handle = vmw_framebuffer_create_handle, 592 }; 593 594 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) 595 { 596 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 597 struct vmw_framebuffer_dmabuf *vfbd = 598 vmw_framebuffer_to_vfbd(&vfb->base); 599 int ret; 600 601 vmw_overlay_pause_all(dev_priv); 602 603 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); 604 605 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { 606 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); 607 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0); 608 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); 609 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); 610 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); 611 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); 612 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); 613 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 614 615 vmw_write(dev_priv, SVGA_REG_ENABLE, 1); 616 vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width); 617 vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height); 618 vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel); 619 vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth); 620 vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000); 621 vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); 622 vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff); 623 } else 624 WARN_ON(true); 625 626 vmw_overlay_resume_all(dev_priv); 627 628 return 0; 629 } 630 631 static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) 632 { 633 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 634 struct vmw_framebuffer_dmabuf *vfbd = 635 vmw_framebuffer_to_vfbd(&vfb->base); 636 637 if (!vfbd->buffer) { 638 WARN_ON(!vfbd->buffer); 639 return 0; 640 } 641 642 return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer); 643 } 644 645 int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, 646 struct vmw_dma_buffer *dmabuf, 647 struct vmw_framebuffer **out, 648 unsigned width, unsigned height) 649 650 { 651 struct drm_device *dev = dev_priv->dev; 652 struct vmw_framebuffer_dmabuf *vfbd; 653 int ret; 654 655 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); 656 if (!vfbd) { 657 ret = -ENOMEM; 658 goto out_err1; 659 } 660 661 ret = drm_framebuffer_init(dev, &vfbd->base.base, 662 &vmw_framebuffer_dmabuf_funcs); 663 if (ret) 664 goto out_err2; 665 666 if (!vmw_dmabuf_reference(dmabuf)) { 667 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf); 668 goto out_err3; 669 } 670 671 /* XXX get the first 3 from the surface info */ 672 vfbd->base.base.bits_per_pixel = 32; 673 vfbd->base.base.pitch = width * 32 / 4; 674 vfbd->base.base.depth = 24; 675 vfbd->base.base.width = width; 676 vfbd->base.base.height = height; 677 vfbd->base.pin = vmw_framebuffer_dmabuf_pin; 678 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; 679 vfbd->buffer = dmabuf; 680 *out = &vfbd->base; 681 682 return 0; 683 684 out_err3: 685 drm_framebuffer_cleanup(&vfbd->base.base); 686 out_err2: 687 kfree(vfbd); 688 out_err1: 689 return ret; 690 } 691 692 /* 693 * Generic Kernel modesetting functions 694 */ 695 696 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, 697 struct drm_file *file_priv, 698 struct drm_mode_fb_cmd *mode_cmd) 699 { 700 struct vmw_private *dev_priv = vmw_priv(dev); 701 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 702 struct vmw_framebuffer *vfb = NULL; 703 struct vmw_surface *surface = NULL; 704 struct vmw_dma_buffer *bo = NULL; 705 int ret; 706 707 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, 708 mode_cmd->handle, &surface); 709 if (ret) 710 goto try_dmabuf; 711 712 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, 713 mode_cmd->width, mode_cmd->height); 714 715 /* vmw_user_surface_lookup takes one ref so does new_fb */ 716 vmw_surface_unreference(&surface); 717 718 if (ret) { 719 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); 720 return NULL; 721 } 722 return &vfb->base; 723 724 try_dmabuf: 725 DRM_INFO("%s: trying buffer\n", __func__); 726 727 ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo); 728 if (ret) { 729 DRM_ERROR("failed to find buffer: %i\n", ret); 730 return NULL; 731 } 732 733 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, 734 mode_cmd->width, mode_cmd->height); 735 736 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */ 737 vmw_dmabuf_unreference(&bo); 738 739 if (ret) { 740 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); 741 return NULL; 742 } 743 744 return &vfb->base; 745 } 746 747 static int vmw_kms_fb_changed(struct drm_device *dev) 748 { 749 return 0; 750 } 751 752 static struct drm_mode_config_funcs vmw_kms_funcs = { 753 .fb_create = vmw_kms_fb_create, 754 .fb_changed = vmw_kms_fb_changed, 755 }; 756 757 int vmw_kms_init(struct vmw_private *dev_priv) 758 { 759 struct drm_device *dev = dev_priv->dev; 760 int ret; 761 762 drm_mode_config_init(dev); 763 dev->mode_config.funcs = &vmw_kms_funcs; 764 dev->mode_config.min_width = 640; 765 dev->mode_config.min_height = 480; 766 dev->mode_config.max_width = 2048; 767 dev->mode_config.max_height = 2048; 768 769 ret = vmw_kms_init_legacy_display_system(dev_priv); 770 771 return 0; 772 } 773 774 int vmw_kms_close(struct vmw_private *dev_priv) 775 { 776 /* 777 * Docs says we should take the lock before calling this function 778 * but since it destroys encoders and our destructor calls 779 * drm_encoder_cleanup which takes the lock we deadlock. 780 */ 781 drm_mode_config_cleanup(dev_priv->dev); 782 vmw_kms_close_legacy_display_system(dev_priv); 783 return 0; 784 } 785 786 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 787 struct drm_file *file_priv) 788 { 789 struct drm_vmw_cursor_bypass_arg *arg = data; 790 struct vmw_display_unit *du; 791 struct drm_mode_object *obj; 792 struct drm_crtc *crtc; 793 int ret = 0; 794 795 796 mutex_lock(&dev->mode_config.mutex); 797 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { 798 799 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 800 du = vmw_crtc_to_du(crtc); 801 du->hotspot_x = arg->xhot; 802 du->hotspot_y = arg->yhot; 803 } 804 805 mutex_unlock(&dev->mode_config.mutex); 806 return 0; 807 } 808 809 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC); 810 if (!obj) { 811 ret = -EINVAL; 812 goto out; 813 } 814 815 crtc = obj_to_crtc(obj); 816 du = vmw_crtc_to_du(crtc); 817 818 du->hotspot_x = arg->xhot; 819 du->hotspot_y = arg->yhot; 820 821 out: 822 mutex_unlock(&dev->mode_config.mutex); 823 824 return ret; 825 } 826 827 int vmw_kms_save_vga(struct vmw_private *vmw_priv) 828 { 829 /* 830 * setup a single multimon monitor with the size 831 * of 0x0, this stops the UI from resizing when we 832 * change the framebuffer size 833 */ 834 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { 835 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); 836 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); 837 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); 838 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); 839 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); 840 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); 841 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); 842 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 843 } 844 845 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); 846 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); 847 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); 848 vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); 849 vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); 850 vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); 851 vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); 852 vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); 853 854 return 0; 855 } 856 857 int vmw_kms_restore_vga(struct vmw_private *vmw_priv) 858 { 859 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); 860 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); 861 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); 862 vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); 863 vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); 864 vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); 865 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); 866 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); 867 868 /* TODO check for multimon */ 869 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); 870 871 return 0; 872 } 873