1 /************************************************************************** 2 * 3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_kms.h" 29 30 31 /* Might need a hrtimer here? */ 32 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 33 34 35 struct vmw_clip_rect { 36 int x1, x2, y1, y2; 37 }; 38 39 /** 40 * Clip @num_rects number of @rects against @clip storing the 41 * results in @out_rects and the number of passed rects in @out_num. 42 */ 43 void vmw_clip_cliprects(struct drm_clip_rect *rects, 44 int num_rects, 45 struct vmw_clip_rect clip, 46 SVGASignedRect *out_rects, 47 int *out_num) 48 { 49 int i, k; 50 51 for (i = 0, k = 0; i < num_rects; i++) { 52 int x1 = max_t(int, clip.x1, rects[i].x1); 53 int y1 = max_t(int, clip.y1, rects[i].y1); 54 int x2 = min_t(int, clip.x2, rects[i].x2); 55 int y2 = min_t(int, clip.y2, rects[i].y2); 56 57 if (x1 >= x2) 58 continue; 59 if (y1 >= y2) 60 continue; 61 62 out_rects[k].left = x1; 63 out_rects[k].top = y1; 64 out_rects[k].right = x2; 65 out_rects[k].bottom = y2; 66 k++; 67 } 68 69 *out_num = k; 70 } 71 72 void vmw_display_unit_cleanup(struct vmw_display_unit *du) 73 { 74 if (du->cursor_surface) 75 vmw_surface_unreference(&du->cursor_surface); 76 if (du->cursor_dmabuf) 77 vmw_dmabuf_unreference(&du->cursor_dmabuf); 78 drm_crtc_cleanup(&du->crtc); 79 drm_encoder_cleanup(&du->encoder); 80 drm_connector_cleanup(&du->connector); 81 } 82 83 /* 84 * Display Unit Cursor functions 85 */ 86 87 int vmw_cursor_update_image(struct vmw_private *dev_priv, 88 u32 *image, u32 width, u32 height, 89 u32 hotspotX, u32 hotspotY) 90 { 91 struct { 92 u32 cmd; 93 SVGAFifoCmdDefineAlphaCursor cursor; 94 } *cmd; 95 u32 image_size = width * height * 4; 96 u32 cmd_size = sizeof(*cmd) + image_size; 97 98 if (!image) 99 return -EINVAL; 100 101 cmd = vmw_fifo_reserve(dev_priv, cmd_size); 102 if (unlikely(cmd == NULL)) { 103 DRM_ERROR("Fifo reserve failed.\n"); 104 return -ENOMEM; 105 } 106 107 memset(cmd, 0, sizeof(*cmd)); 108 109 memcpy(&cmd[1], image, image_size); 110 111 cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR); 112 cmd->cursor.id = cpu_to_le32(0); 113 cmd->cursor.width = cpu_to_le32(width); 114 cmd->cursor.height = cpu_to_le32(height); 115 cmd->cursor.hotspotX = cpu_to_le32(hotspotX); 116 cmd->cursor.hotspotY = cpu_to_le32(hotspotY); 117 118 vmw_fifo_commit(dev_priv, cmd_size); 119 120 return 0; 121 } 122 123 int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, 124 struct vmw_dma_buffer *dmabuf, 125 u32 width, u32 height, 126 u32 hotspotX, u32 hotspotY) 127 { 128 struct ttm_bo_kmap_obj map; 129 unsigned long kmap_offset; 130 unsigned long kmap_num; 131 void *virtual; 132 bool dummy; 133 int ret; 134 135 kmap_offset = 0; 136 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; 137 138 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0); 139 if (unlikely(ret != 0)) { 140 DRM_ERROR("reserve failed\n"); 141 return -EINVAL; 142 } 143 144 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map); 145 if (unlikely(ret != 0)) 146 goto err_unreserve; 147 148 virtual = ttm_kmap_obj_virtual(&map, &dummy); 149 ret = vmw_cursor_update_image(dev_priv, virtual, width, height, 150 hotspotX, hotspotY); 151 152 ttm_bo_kunmap(&map); 153 err_unreserve: 154 ttm_bo_unreserve(&dmabuf->base); 155 156 return ret; 157 } 158 159 160 void vmw_cursor_update_position(struct vmw_private *dev_priv, 161 bool show, int x, int y) 162 { 163 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 164 uint32_t count; 165 166 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); 167 iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X); 168 iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y); 169 count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT); 170 iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); 171 } 172 173 int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 174 uint32_t handle, uint32_t width, uint32_t height) 175 { 176 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 177 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 178 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 179 struct vmw_surface *surface = NULL; 180 struct vmw_dma_buffer *dmabuf = NULL; 181 int ret; 182 183 /* A lot of the code assumes this */ 184 if (handle && (width != 64 || height != 64)) 185 return -EINVAL; 186 187 if (handle) { 188 ret = vmw_user_lookup_handle(dev_priv, tfile, 189 handle, &surface, &dmabuf); 190 if (ret) { 191 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret); 192 return -EINVAL; 193 } 194 } 195 196 /* need to do this before taking down old image */ 197 if (surface && !surface->snooper.image) { 198 DRM_ERROR("surface not suitable for cursor\n"); 199 vmw_surface_unreference(&surface); 200 return -EINVAL; 201 } 202 203 /* takedown old cursor */ 204 if (du->cursor_surface) { 205 du->cursor_surface->snooper.crtc = NULL; 206 vmw_surface_unreference(&du->cursor_surface); 207 } 208 if (du->cursor_dmabuf) 209 vmw_dmabuf_unreference(&du->cursor_dmabuf); 210 211 /* setup new image */ 212 if (surface) { 213 /* vmw_user_surface_lookup takes one reference */ 214 du->cursor_surface = surface; 215 216 du->cursor_surface->snooper.crtc = crtc; 217 du->cursor_age = du->cursor_surface->snooper.age; 218 vmw_cursor_update_image(dev_priv, surface->snooper.image, 219 64, 64, du->hotspot_x, du->hotspot_y); 220 } else if (dmabuf) { 221 /* vmw_user_surface_lookup takes one reference */ 222 du->cursor_dmabuf = dmabuf; 223 224 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height, 225 du->hotspot_x, du->hotspot_y); 226 } else { 227 vmw_cursor_update_position(dev_priv, false, 0, 0); 228 return 0; 229 } 230 231 vmw_cursor_update_position(dev_priv, true, 232 du->cursor_x + du->hotspot_x, 233 du->cursor_y + du->hotspot_y); 234 235 return 0; 236 } 237 238 int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 239 { 240 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 241 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 242 bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false; 243 244 du->cursor_x = x + crtc->x; 245 du->cursor_y = y + crtc->y; 246 247 vmw_cursor_update_position(dev_priv, shown, 248 du->cursor_x + du->hotspot_x, 249 du->cursor_y + du->hotspot_y); 250 251 return 0; 252 } 253 254 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 255 struct ttm_object_file *tfile, 256 struct ttm_buffer_object *bo, 257 SVGA3dCmdHeader *header) 258 { 259 struct ttm_bo_kmap_obj map; 260 unsigned long kmap_offset; 261 unsigned long kmap_num; 262 SVGA3dCopyBox *box; 263 unsigned box_count; 264 void *virtual; 265 bool dummy; 266 struct vmw_dma_cmd { 267 SVGA3dCmdHeader header; 268 SVGA3dCmdSurfaceDMA dma; 269 } *cmd; 270 int i, ret; 271 272 cmd = container_of(header, struct vmw_dma_cmd, header); 273 274 /* No snooper installed */ 275 if (!srf->snooper.image) 276 return; 277 278 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { 279 DRM_ERROR("face and mipmap for cursors should never != 0\n"); 280 return; 281 } 282 283 if (cmd->header.size < 64) { 284 DRM_ERROR("at least one full copy box must be given\n"); 285 return; 286 } 287 288 box = (SVGA3dCopyBox *)&cmd[1]; 289 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / 290 sizeof(SVGA3dCopyBox); 291 292 if (cmd->dma.guest.ptr.offset % PAGE_SIZE || 293 box->x != 0 || box->y != 0 || box->z != 0 || 294 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || 295 box->d != 1 || box_count != 1) { 296 /* TODO handle none page aligned offsets */ 297 /* TODO handle more dst & src != 0 */ 298 /* TODO handle more then one copy */ 299 DRM_ERROR("Cant snoop dma request for cursor!\n"); 300 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", 301 box->srcx, box->srcy, box->srcz, 302 box->x, box->y, box->z, 303 box->w, box->h, box->d, box_count, 304 cmd->dma.guest.ptr.offset); 305 return; 306 } 307 308 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; 309 kmap_num = (64*64*4) >> PAGE_SHIFT; 310 311 ret = ttm_bo_reserve(bo, true, false, false, 0); 312 if (unlikely(ret != 0)) { 313 DRM_ERROR("reserve failed\n"); 314 return; 315 } 316 317 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 318 if (unlikely(ret != 0)) 319 goto err_unreserve; 320 321 virtual = ttm_kmap_obj_virtual(&map, &dummy); 322 323 if (box->w == 64 && cmd->dma.guest.pitch == 64*4) { 324 memcpy(srf->snooper.image, virtual, 64*64*4); 325 } else { 326 /* Image is unsigned pointer. */ 327 for (i = 0; i < box->h; i++) 328 memcpy(srf->snooper.image + i * 64, 329 virtual + i * cmd->dma.guest.pitch, 330 box->w * 4); 331 } 332 333 srf->snooper.age++; 334 335 /* we can't call this function from this function since execbuf has 336 * reserved fifo space. 337 * 338 * if (srf->snooper.crtc) 339 * vmw_ldu_crtc_cursor_update_image(dev_priv, 340 * srf->snooper.image, 64, 64, 341 * du->hotspot_x, du->hotspot_y); 342 */ 343 344 ttm_bo_kunmap(&map); 345 err_unreserve: 346 ttm_bo_unreserve(bo); 347 } 348 349 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) 350 { 351 struct drm_device *dev = dev_priv->dev; 352 struct vmw_display_unit *du; 353 struct drm_crtc *crtc; 354 355 mutex_lock(&dev->mode_config.mutex); 356 357 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 358 du = vmw_crtc_to_du(crtc); 359 if (!du->cursor_surface || 360 du->cursor_age == du->cursor_surface->snooper.age) 361 continue; 362 363 du->cursor_age = du->cursor_surface->snooper.age; 364 vmw_cursor_update_image(dev_priv, 365 du->cursor_surface->snooper.image, 366 64, 64, du->hotspot_x, du->hotspot_y); 367 } 368 369 mutex_unlock(&dev->mode_config.mutex); 370 } 371 372 /* 373 * Generic framebuffer code 374 */ 375 376 int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, 377 struct drm_file *file_priv, 378 unsigned int *handle) 379 { 380 if (handle) 381 *handle = 0; 382 383 return 0; 384 } 385 386 /* 387 * Surface framebuffer code 388 */ 389 390 #define vmw_framebuffer_to_vfbs(x) \ 391 container_of(x, struct vmw_framebuffer_surface, base.base) 392 393 struct vmw_framebuffer_surface { 394 struct vmw_framebuffer base; 395 struct vmw_surface *surface; 396 struct vmw_dma_buffer *buffer; 397 struct list_head head; 398 struct drm_master *master; 399 }; 400 401 void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 402 { 403 struct vmw_framebuffer_surface *vfbs = 404 vmw_framebuffer_to_vfbs(framebuffer); 405 struct vmw_master *vmaster = vmw_master(vfbs->master); 406 407 408 mutex_lock(&vmaster->fb_surf_mutex); 409 list_del(&vfbs->head); 410 mutex_unlock(&vmaster->fb_surf_mutex); 411 412 drm_master_put(&vfbs->master); 413 drm_framebuffer_cleanup(framebuffer); 414 vmw_surface_unreference(&vfbs->surface); 415 ttm_base_object_unref(&vfbs->base.user_obj); 416 417 kfree(vfbs); 418 } 419 420 static int do_surface_dirty_sou(struct vmw_private *dev_priv, 421 struct drm_file *file_priv, 422 struct vmw_framebuffer *framebuffer, 423 unsigned flags, unsigned color, 424 struct drm_clip_rect *clips, 425 unsigned num_clips, int inc, 426 struct vmw_fence_obj **out_fence) 427 { 428 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 429 struct drm_clip_rect *clips_ptr; 430 struct drm_clip_rect *tmp; 431 struct drm_crtc *crtc; 432 size_t fifo_size; 433 int i, num_units; 434 int ret = 0; /* silence warning */ 435 int left, right, top, bottom; 436 437 struct { 438 SVGA3dCmdHeader header; 439 SVGA3dCmdBlitSurfaceToScreen body; 440 } *cmd; 441 SVGASignedRect *blits; 442 443 num_units = 0; 444 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, 445 head) { 446 if (crtc->fb != &framebuffer->base) 447 continue; 448 units[num_units++] = vmw_crtc_to_du(crtc); 449 } 450 451 BUG_ON(!clips || !num_clips); 452 453 tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL); 454 if (unlikely(tmp == NULL)) { 455 DRM_ERROR("Temporary cliprect memory alloc failed.\n"); 456 return -ENOMEM; 457 } 458 459 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips; 460 cmd = kzalloc(fifo_size, GFP_KERNEL); 461 if (unlikely(cmd == NULL)) { 462 DRM_ERROR("Temporary fifo memory alloc failed.\n"); 463 ret = -ENOMEM; 464 goto out_free_tmp; 465 } 466 467 /* setup blits pointer */ 468 blits = (SVGASignedRect *)&cmd[1]; 469 470 /* initial clip region */ 471 left = clips->x1; 472 right = clips->x2; 473 top = clips->y1; 474 bottom = clips->y2; 475 476 /* skip the first clip rect */ 477 for (i = 1, clips_ptr = clips + inc; 478 i < num_clips; i++, clips_ptr += inc) { 479 left = min_t(int, left, (int)clips_ptr->x1); 480 right = max_t(int, right, (int)clips_ptr->x2); 481 top = min_t(int, top, (int)clips_ptr->y1); 482 bottom = max_t(int, bottom, (int)clips_ptr->y2); 483 } 484 485 /* only need to do this once */ 486 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN); 487 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); 488 489 cmd->body.srcRect.left = left; 490 cmd->body.srcRect.right = right; 491 cmd->body.srcRect.top = top; 492 cmd->body.srcRect.bottom = bottom; 493 494 clips_ptr = clips; 495 for (i = 0; i < num_clips; i++, clips_ptr += inc) { 496 tmp[i].x1 = clips_ptr->x1 - left; 497 tmp[i].x2 = clips_ptr->x2 - left; 498 tmp[i].y1 = clips_ptr->y1 - top; 499 tmp[i].y2 = clips_ptr->y2 - top; 500 } 501 502 /* do per unit writing, reuse fifo for each */ 503 for (i = 0; i < num_units; i++) { 504 struct vmw_display_unit *unit = units[i]; 505 struct vmw_clip_rect clip; 506 int num; 507 508 clip.x1 = left - unit->crtc.x; 509 clip.y1 = top - unit->crtc.y; 510 clip.x2 = right - unit->crtc.x; 511 clip.y2 = bottom - unit->crtc.y; 512 513 /* skip any crtcs that misses the clip region */ 514 if (clip.x1 >= unit->crtc.mode.hdisplay || 515 clip.y1 >= unit->crtc.mode.vdisplay || 516 clip.x2 <= 0 || clip.y2 <= 0) 517 continue; 518 519 /* 520 * In order for the clip rects to be correctly scaled 521 * the src and dest rects needs to be the same size. 522 */ 523 cmd->body.destRect.left = clip.x1; 524 cmd->body.destRect.right = clip.x2; 525 cmd->body.destRect.top = clip.y1; 526 cmd->body.destRect.bottom = clip.y2; 527 528 /* create a clip rect of the crtc in dest coords */ 529 clip.x2 = unit->crtc.mode.hdisplay - clip.x1; 530 clip.y2 = unit->crtc.mode.vdisplay - clip.y1; 531 clip.x1 = 0 - clip.x1; 532 clip.y1 = 0 - clip.y1; 533 534 /* need to reset sid as it is changed by execbuf */ 535 cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle); 536 cmd->body.destScreenId = unit->unit; 537 538 /* clip and write blits to cmd stream */ 539 vmw_clip_cliprects(tmp, num_clips, clip, blits, &num); 540 541 /* if no cliprects hit skip this */ 542 if (num == 0) 543 continue; 544 545 /* only return the last fence */ 546 if (out_fence && *out_fence) 547 vmw_fence_obj_unreference(out_fence); 548 549 /* recalculate package length */ 550 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num; 551 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); 552 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, 553 fifo_size, 0, NULL, out_fence); 554 555 if (unlikely(ret != 0)) 556 break; 557 } 558 559 560 kfree(cmd); 561 out_free_tmp: 562 kfree(tmp); 563 564 return ret; 565 } 566 567 int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, 568 struct drm_file *file_priv, 569 unsigned flags, unsigned color, 570 struct drm_clip_rect *clips, 571 unsigned num_clips) 572 { 573 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 574 struct vmw_master *vmaster = vmw_master(file_priv->master); 575 struct vmw_framebuffer_surface *vfbs = 576 vmw_framebuffer_to_vfbs(framebuffer); 577 struct drm_clip_rect norect; 578 int ret, inc = 1; 579 580 if (unlikely(vfbs->master != file_priv->master)) 581 return -EINVAL; 582 583 /* Require ScreenObject support for 3D */ 584 if (!dev_priv->sou_priv) 585 return -EINVAL; 586 587 ret = ttm_read_lock(&vmaster->lock, true); 588 if (unlikely(ret != 0)) 589 return ret; 590 591 if (!num_clips) { 592 num_clips = 1; 593 clips = &norect; 594 norect.x1 = norect.y1 = 0; 595 norect.x2 = framebuffer->width; 596 norect.y2 = framebuffer->height; 597 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { 598 num_clips /= 2; 599 inc = 2; /* skip source rects */ 600 } 601 602 ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base, 603 flags, color, 604 clips, num_clips, inc, NULL); 605 606 ttm_read_unlock(&vmaster->lock); 607 return 0; 608 } 609 610 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { 611 .destroy = vmw_framebuffer_surface_destroy, 612 .dirty = vmw_framebuffer_surface_dirty, 613 .create_handle = vmw_framebuffer_create_handle, 614 }; 615 616 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 617 struct drm_file *file_priv, 618 struct vmw_surface *surface, 619 struct vmw_framebuffer **out, 620 const struct drm_mode_fb_cmd 621 *mode_cmd) 622 623 { 624 struct drm_device *dev = dev_priv->dev; 625 struct vmw_framebuffer_surface *vfbs; 626 enum SVGA3dSurfaceFormat format; 627 struct vmw_master *vmaster = vmw_master(file_priv->master); 628 int ret; 629 630 /* 3D is only supported on HWv8 hosts which supports screen objects */ 631 if (!dev_priv->sou_priv) 632 return -ENOSYS; 633 634 /* 635 * Sanity checks. 636 */ 637 638 /* Surface must be marked as a scanout. */ 639 if (unlikely(!surface->scanout)) 640 return -EINVAL; 641 642 if (unlikely(surface->mip_levels[0] != 1 || 643 surface->num_sizes != 1 || 644 surface->sizes[0].width < mode_cmd->width || 645 surface->sizes[0].height < mode_cmd->height || 646 surface->sizes[0].depth != 1)) { 647 DRM_ERROR("Incompatible surface dimensions " 648 "for requested mode.\n"); 649 return -EINVAL; 650 } 651 652 switch (mode_cmd->depth) { 653 case 32: 654 format = SVGA3D_A8R8G8B8; 655 break; 656 case 24: 657 format = SVGA3D_X8R8G8B8; 658 break; 659 case 16: 660 format = SVGA3D_R5G6B5; 661 break; 662 case 15: 663 format = SVGA3D_A1R5G5B5; 664 break; 665 case 8: 666 format = SVGA3D_LUMINANCE8; 667 break; 668 default: 669 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); 670 return -EINVAL; 671 } 672 673 if (unlikely(format != surface->format)) { 674 DRM_ERROR("Invalid surface format for requested mode.\n"); 675 return -EINVAL; 676 } 677 678 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); 679 if (!vfbs) { 680 ret = -ENOMEM; 681 goto out_err1; 682 } 683 684 ret = drm_framebuffer_init(dev, &vfbs->base.base, 685 &vmw_framebuffer_surface_funcs); 686 if (ret) 687 goto out_err2; 688 689 if (!vmw_surface_reference(surface)) { 690 DRM_ERROR("failed to reference surface %p\n", surface); 691 goto out_err3; 692 } 693 694 /* XXX get the first 3 from the surface info */ 695 vfbs->base.base.bits_per_pixel = mode_cmd->bpp; 696 vfbs->base.base.pitches[0] = mode_cmd->pitch; 697 vfbs->base.base.depth = mode_cmd->depth; 698 vfbs->base.base.width = mode_cmd->width; 699 vfbs->base.base.height = mode_cmd->height; 700 vfbs->surface = surface; 701 vfbs->base.user_handle = mode_cmd->handle; 702 vfbs->master = drm_master_get(file_priv->master); 703 704 mutex_lock(&vmaster->fb_surf_mutex); 705 list_add_tail(&vfbs->head, &vmaster->fb_surf); 706 mutex_unlock(&vmaster->fb_surf_mutex); 707 708 *out = &vfbs->base; 709 710 return 0; 711 712 out_err3: 713 drm_framebuffer_cleanup(&vfbs->base.base); 714 out_err2: 715 kfree(vfbs); 716 out_err1: 717 return ret; 718 } 719 720 /* 721 * Dmabuf framebuffer code 722 */ 723 724 #define vmw_framebuffer_to_vfbd(x) \ 725 container_of(x, struct vmw_framebuffer_dmabuf, base.base) 726 727 struct vmw_framebuffer_dmabuf { 728 struct vmw_framebuffer base; 729 struct vmw_dma_buffer *buffer; 730 }; 731 732 void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) 733 { 734 struct vmw_framebuffer_dmabuf *vfbd = 735 vmw_framebuffer_to_vfbd(framebuffer); 736 737 drm_framebuffer_cleanup(framebuffer); 738 vmw_dmabuf_unreference(&vfbd->buffer); 739 ttm_base_object_unref(&vfbd->base.user_obj); 740 741 kfree(vfbd); 742 } 743 744 static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv, 745 struct vmw_framebuffer *framebuffer, 746 unsigned flags, unsigned color, 747 struct drm_clip_rect *clips, 748 unsigned num_clips, int increment) 749 { 750 size_t fifo_size; 751 int i; 752 753 struct { 754 uint32_t header; 755 SVGAFifoCmdUpdate body; 756 } *cmd; 757 758 fifo_size = sizeof(*cmd) * num_clips; 759 cmd = vmw_fifo_reserve(dev_priv, fifo_size); 760 if (unlikely(cmd == NULL)) { 761 DRM_ERROR("Fifo reserve failed.\n"); 762 return -ENOMEM; 763 } 764 765 memset(cmd, 0, fifo_size); 766 for (i = 0; i < num_clips; i++, clips += increment) { 767 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); 768 cmd[i].body.x = cpu_to_le32(clips->x1); 769 cmd[i].body.y = cpu_to_le32(clips->y1); 770 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1); 771 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1); 772 } 773 774 vmw_fifo_commit(dev_priv, fifo_size); 775 return 0; 776 } 777 778 static int do_dmabuf_define_gmrfb(struct drm_file *file_priv, 779 struct vmw_private *dev_priv, 780 struct vmw_framebuffer *framebuffer) 781 { 782 int depth = framebuffer->base.depth; 783 size_t fifo_size; 784 int ret; 785 786 struct { 787 uint32_t header; 788 SVGAFifoCmdDefineGMRFB body; 789 } *cmd; 790 791 /* Emulate RGBA support, contrary to svga_reg.h this is not 792 * supported by hosts. This is only a problem if we are reading 793 * this value later and expecting what we uploaded back. 794 */ 795 if (depth == 32) 796 depth = 24; 797 798 fifo_size = sizeof(*cmd); 799 cmd = kmalloc(fifo_size, GFP_KERNEL); 800 if (unlikely(cmd == NULL)) { 801 DRM_ERROR("Failed to allocate temporary cmd buffer.\n"); 802 return -ENOMEM; 803 } 804 805 memset(cmd, 0, fifo_size); 806 cmd->header = SVGA_CMD_DEFINE_GMRFB; 807 cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel; 808 cmd->body.format.colorDepth = depth; 809 cmd->body.format.reserved = 0; 810 cmd->body.bytesPerLine = framebuffer->base.pitches[0]; 811 cmd->body.ptr.gmrId = framebuffer->user_handle; 812 cmd->body.ptr.offset = 0; 813 814 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, 815 fifo_size, 0, NULL, NULL); 816 817 kfree(cmd); 818 819 return ret; 820 } 821 822 static int do_dmabuf_dirty_sou(struct drm_file *file_priv, 823 struct vmw_private *dev_priv, 824 struct vmw_framebuffer *framebuffer, 825 unsigned flags, unsigned color, 826 struct drm_clip_rect *clips, 827 unsigned num_clips, int increment, 828 struct vmw_fence_obj **out_fence) 829 { 830 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 831 struct drm_clip_rect *clips_ptr; 832 int i, k, num_units, ret; 833 struct drm_crtc *crtc; 834 size_t fifo_size; 835 836 struct { 837 uint32_t header; 838 SVGAFifoCmdBlitGMRFBToScreen body; 839 } *blits; 840 841 ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer); 842 if (unlikely(ret != 0)) 843 return ret; /* define_gmrfb prints warnings */ 844 845 fifo_size = sizeof(*blits) * num_clips; 846 blits = kmalloc(fifo_size, GFP_KERNEL); 847 if (unlikely(blits == NULL)) { 848 DRM_ERROR("Failed to allocate temporary cmd buffer.\n"); 849 return -ENOMEM; 850 } 851 852 num_units = 0; 853 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { 854 if (crtc->fb != &framebuffer->base) 855 continue; 856 units[num_units++] = vmw_crtc_to_du(crtc); 857 } 858 859 for (k = 0; k < num_units; k++) { 860 struct vmw_display_unit *unit = units[k]; 861 int hit_num = 0; 862 863 clips_ptr = clips; 864 for (i = 0; i < num_clips; i++, clips_ptr += increment) { 865 int clip_x1 = clips_ptr->x1 - unit->crtc.x; 866 int clip_y1 = clips_ptr->y1 - unit->crtc.y; 867 int clip_x2 = clips_ptr->x2 - unit->crtc.x; 868 int clip_y2 = clips_ptr->y2 - unit->crtc.y; 869 int move_x, move_y; 870 871 /* skip any crtcs that misses the clip region */ 872 if (clip_x1 >= unit->crtc.mode.hdisplay || 873 clip_y1 >= unit->crtc.mode.vdisplay || 874 clip_x2 <= 0 || clip_y2 <= 0) 875 continue; 876 877 /* clip size to crtc size */ 878 clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay); 879 clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay); 880 881 /* translate both src and dest to bring clip into screen */ 882 move_x = min_t(int, clip_x1, 0); 883 move_y = min_t(int, clip_y1, 0); 884 885 /* actual translate done here */ 886 blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; 887 blits[hit_num].body.destScreenId = unit->unit; 888 blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x; 889 blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y; 890 blits[hit_num].body.destRect.left = clip_x1 - move_x; 891 blits[hit_num].body.destRect.top = clip_y1 - move_y; 892 blits[hit_num].body.destRect.right = clip_x2; 893 blits[hit_num].body.destRect.bottom = clip_y2; 894 hit_num++; 895 } 896 897 /* no clips hit the crtc */ 898 if (hit_num == 0) 899 continue; 900 901 /* only return the last fence */ 902 if (out_fence && *out_fence) 903 vmw_fence_obj_unreference(out_fence); 904 905 fifo_size = sizeof(*blits) * hit_num; 906 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits, 907 fifo_size, 0, NULL, out_fence); 908 909 if (unlikely(ret != 0)) 910 break; 911 } 912 913 kfree(blits); 914 915 return ret; 916 } 917 918 int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, 919 struct drm_file *file_priv, 920 unsigned flags, unsigned color, 921 struct drm_clip_rect *clips, 922 unsigned num_clips) 923 { 924 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 925 struct vmw_master *vmaster = vmw_master(file_priv->master); 926 struct vmw_framebuffer_dmabuf *vfbd = 927 vmw_framebuffer_to_vfbd(framebuffer); 928 struct drm_clip_rect norect; 929 int ret, increment = 1; 930 931 ret = ttm_read_lock(&vmaster->lock, true); 932 if (unlikely(ret != 0)) 933 return ret; 934 935 if (!num_clips) { 936 num_clips = 1; 937 clips = &norect; 938 norect.x1 = norect.y1 = 0; 939 norect.x2 = framebuffer->width; 940 norect.y2 = framebuffer->height; 941 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { 942 num_clips /= 2; 943 increment = 2; 944 } 945 946 if (dev_priv->ldu_priv) { 947 ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base, 948 flags, color, 949 clips, num_clips, increment); 950 } else { 951 ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base, 952 flags, color, 953 clips, num_clips, increment, NULL); 954 } 955 956 ttm_read_unlock(&vmaster->lock); 957 return ret; 958 } 959 960 static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { 961 .destroy = vmw_framebuffer_dmabuf_destroy, 962 .dirty = vmw_framebuffer_dmabuf_dirty, 963 .create_handle = vmw_framebuffer_create_handle, 964 }; 965 966 /** 967 * Pin the dmabuffer to the start of vram. 968 */ 969 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) 970 { 971 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 972 struct vmw_framebuffer_dmabuf *vfbd = 973 vmw_framebuffer_to_vfbd(&vfb->base); 974 int ret; 975 976 /* This code should not be used with screen objects */ 977 BUG_ON(dev_priv->sou_priv); 978 979 vmw_overlay_pause_all(dev_priv); 980 981 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false); 982 983 vmw_overlay_resume_all(dev_priv); 984 985 WARN_ON(ret != 0); 986 987 return 0; 988 } 989 990 static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) 991 { 992 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 993 struct vmw_framebuffer_dmabuf *vfbd = 994 vmw_framebuffer_to_vfbd(&vfb->base); 995 996 if (!vfbd->buffer) { 997 WARN_ON(!vfbd->buffer); 998 return 0; 999 } 1000 1001 return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false); 1002 } 1003 1004 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, 1005 struct vmw_dma_buffer *dmabuf, 1006 struct vmw_framebuffer **out, 1007 const struct drm_mode_fb_cmd 1008 *mode_cmd) 1009 1010 { 1011 struct drm_device *dev = dev_priv->dev; 1012 struct vmw_framebuffer_dmabuf *vfbd; 1013 unsigned int requested_size; 1014 int ret; 1015 1016 requested_size = mode_cmd->height * mode_cmd->pitch; 1017 if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) { 1018 DRM_ERROR("Screen buffer object size is too small " 1019 "for requested mode.\n"); 1020 return -EINVAL; 1021 } 1022 1023 /* Limited framebuffer color depth support for screen objects */ 1024 if (dev_priv->sou_priv) { 1025 switch (mode_cmd->depth) { 1026 case 32: 1027 case 24: 1028 /* Only support 32 bpp for 32 and 24 depth fbs */ 1029 if (mode_cmd->bpp == 32) 1030 break; 1031 1032 DRM_ERROR("Invalid color depth/bbp: %d %d\n", 1033 mode_cmd->depth, mode_cmd->bpp); 1034 return -EINVAL; 1035 case 16: 1036 case 15: 1037 /* Only support 16 bpp for 16 and 15 depth fbs */ 1038 if (mode_cmd->bpp == 16) 1039 break; 1040 1041 DRM_ERROR("Invalid color depth/bbp: %d %d\n", 1042 mode_cmd->depth, mode_cmd->bpp); 1043 return -EINVAL; 1044 default: 1045 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); 1046 return -EINVAL; 1047 } 1048 } 1049 1050 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); 1051 if (!vfbd) { 1052 ret = -ENOMEM; 1053 goto out_err1; 1054 } 1055 1056 ret = drm_framebuffer_init(dev, &vfbd->base.base, 1057 &vmw_framebuffer_dmabuf_funcs); 1058 if (ret) 1059 goto out_err2; 1060 1061 if (!vmw_dmabuf_reference(dmabuf)) { 1062 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf); 1063 goto out_err3; 1064 } 1065 1066 vfbd->base.base.bits_per_pixel = mode_cmd->bpp; 1067 vfbd->base.base.pitches[0] = mode_cmd->pitch; 1068 vfbd->base.base.depth = mode_cmd->depth; 1069 vfbd->base.base.width = mode_cmd->width; 1070 vfbd->base.base.height = mode_cmd->height; 1071 if (!dev_priv->sou_priv) { 1072 vfbd->base.pin = vmw_framebuffer_dmabuf_pin; 1073 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; 1074 } 1075 vfbd->base.dmabuf = true; 1076 vfbd->buffer = dmabuf; 1077 vfbd->base.user_handle = mode_cmd->handle; 1078 *out = &vfbd->base; 1079 1080 return 0; 1081 1082 out_err3: 1083 drm_framebuffer_cleanup(&vfbd->base.base); 1084 out_err2: 1085 kfree(vfbd); 1086 out_err1: 1087 return ret; 1088 } 1089 1090 /* 1091 * Generic Kernel modesetting functions 1092 */ 1093 1094 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, 1095 struct drm_file *file_priv, 1096 struct drm_mode_fb_cmd2 *mode_cmd2) 1097 { 1098 struct vmw_private *dev_priv = vmw_priv(dev); 1099 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1100 struct vmw_framebuffer *vfb = NULL; 1101 struct vmw_surface *surface = NULL; 1102 struct vmw_dma_buffer *bo = NULL; 1103 struct ttm_base_object *user_obj; 1104 struct drm_mode_fb_cmd mode_cmd; 1105 int ret; 1106 1107 mode_cmd.width = mode_cmd2->width; 1108 mode_cmd.height = mode_cmd2->height; 1109 mode_cmd.pitch = mode_cmd2->pitches[0]; 1110 mode_cmd.handle = mode_cmd2->handles[0]; 1111 drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth, 1112 &mode_cmd.bpp); 1113 1114 /** 1115 * This code should be conditioned on Screen Objects not being used. 1116 * If screen objects are used, we can allocate a GMR to hold the 1117 * requested framebuffer. 1118 */ 1119 1120 if (!vmw_kms_validate_mode_vram(dev_priv, 1121 mode_cmd.pitch, 1122 mode_cmd.height)) { 1123 DRM_ERROR("VRAM size is too small for requested mode.\n"); 1124 return ERR_PTR(-ENOMEM); 1125 } 1126 1127 /* 1128 * Take a reference on the user object of the resource 1129 * backing the kms fb. This ensures that user-space handle 1130 * lookups on that resource will always work as long as 1131 * it's registered with a kms framebuffer. This is important, 1132 * since vmw_execbuf_process identifies resources in the 1133 * command stream using user-space handles. 1134 */ 1135 1136 user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle); 1137 if (unlikely(user_obj == NULL)) { 1138 DRM_ERROR("Could not locate requested kms frame buffer.\n"); 1139 return ERR_PTR(-ENOENT); 1140 } 1141 1142 /** 1143 * End conditioned code. 1144 */ 1145 1146 /* returns either a dmabuf or surface */ 1147 ret = vmw_user_lookup_handle(dev_priv, tfile, 1148 mode_cmd.handle, 1149 &surface, &bo); 1150 if (ret) 1151 goto err_out; 1152 1153 /* Create the new framebuffer depending one what we got back */ 1154 if (bo) 1155 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, 1156 &mode_cmd); 1157 else if (surface) 1158 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, 1159 surface, &vfb, &mode_cmd); 1160 else 1161 BUG(); 1162 1163 err_out: 1164 /* vmw_user_lookup_handle takes one ref so does new_fb */ 1165 if (bo) 1166 vmw_dmabuf_unreference(&bo); 1167 if (surface) 1168 vmw_surface_unreference(&surface); 1169 1170 if (ret) { 1171 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); 1172 ttm_base_object_unref(&user_obj); 1173 return ERR_PTR(ret); 1174 } else 1175 vfb->user_obj = user_obj; 1176 1177 return &vfb->base; 1178 } 1179 1180 static const struct drm_mode_config_funcs vmw_kms_funcs = { 1181 .fb_create = vmw_kms_fb_create, 1182 }; 1183 1184 int vmw_kms_present(struct vmw_private *dev_priv, 1185 struct drm_file *file_priv, 1186 struct vmw_framebuffer *vfb, 1187 struct vmw_surface *surface, 1188 uint32_t sid, 1189 int32_t destX, int32_t destY, 1190 struct drm_vmw_rect *clips, 1191 uint32_t num_clips) 1192 { 1193 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 1194 struct drm_clip_rect *tmp; 1195 struct drm_crtc *crtc; 1196 size_t fifo_size; 1197 int i, k, num_units; 1198 int ret = 0; /* silence warning */ 1199 int left, right, top, bottom; 1200 1201 struct { 1202 SVGA3dCmdHeader header; 1203 SVGA3dCmdBlitSurfaceToScreen body; 1204 } *cmd; 1205 SVGASignedRect *blits; 1206 1207 num_units = 0; 1208 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { 1209 if (crtc->fb != &vfb->base) 1210 continue; 1211 units[num_units++] = vmw_crtc_to_du(crtc); 1212 } 1213 1214 BUG_ON(surface == NULL); 1215 BUG_ON(!clips || !num_clips); 1216 1217 tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL); 1218 if (unlikely(tmp == NULL)) { 1219 DRM_ERROR("Temporary cliprect memory alloc failed.\n"); 1220 return -ENOMEM; 1221 } 1222 1223 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips; 1224 cmd = kmalloc(fifo_size, GFP_KERNEL); 1225 if (unlikely(cmd == NULL)) { 1226 DRM_ERROR("Failed to allocate temporary fifo memory.\n"); 1227 ret = -ENOMEM; 1228 goto out_free_tmp; 1229 } 1230 1231 left = clips->x; 1232 right = clips->x + clips->w; 1233 top = clips->y; 1234 bottom = clips->y + clips->h; 1235 1236 for (i = 1; i < num_clips; i++) { 1237 left = min_t(int, left, (int)clips[i].x); 1238 right = max_t(int, right, (int)clips[i].x + clips[i].w); 1239 top = min_t(int, top, (int)clips[i].y); 1240 bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h); 1241 } 1242 1243 /* only need to do this once */ 1244 memset(cmd, 0, fifo_size); 1245 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN); 1246 1247 blits = (SVGASignedRect *)&cmd[1]; 1248 1249 cmd->body.srcRect.left = left; 1250 cmd->body.srcRect.right = right; 1251 cmd->body.srcRect.top = top; 1252 cmd->body.srcRect.bottom = bottom; 1253 1254 for (i = 0; i < num_clips; i++) { 1255 tmp[i].x1 = clips[i].x - left; 1256 tmp[i].x2 = clips[i].x + clips[i].w - left; 1257 tmp[i].y1 = clips[i].y - top; 1258 tmp[i].y2 = clips[i].y + clips[i].h - top; 1259 } 1260 1261 for (k = 0; k < num_units; k++) { 1262 struct vmw_display_unit *unit = units[k]; 1263 struct vmw_clip_rect clip; 1264 int num; 1265 1266 clip.x1 = left + destX - unit->crtc.x; 1267 clip.y1 = top + destY - unit->crtc.y; 1268 clip.x2 = right + destX - unit->crtc.x; 1269 clip.y2 = bottom + destY - unit->crtc.y; 1270 1271 /* skip any crtcs that misses the clip region */ 1272 if (clip.x1 >= unit->crtc.mode.hdisplay || 1273 clip.y1 >= unit->crtc.mode.vdisplay || 1274 clip.x2 <= 0 || clip.y2 <= 0) 1275 continue; 1276 1277 /* 1278 * In order for the clip rects to be correctly scaled 1279 * the src and dest rects needs to be the same size. 1280 */ 1281 cmd->body.destRect.left = clip.x1; 1282 cmd->body.destRect.right = clip.x2; 1283 cmd->body.destRect.top = clip.y1; 1284 cmd->body.destRect.bottom = clip.y2; 1285 1286 /* create a clip rect of the crtc in dest coords */ 1287 clip.x2 = unit->crtc.mode.hdisplay - clip.x1; 1288 clip.y2 = unit->crtc.mode.vdisplay - clip.y1; 1289 clip.x1 = 0 - clip.x1; 1290 clip.y1 = 0 - clip.y1; 1291 1292 /* need to reset sid as it is changed by execbuf */ 1293 cmd->body.srcImage.sid = sid; 1294 cmd->body.destScreenId = unit->unit; 1295 1296 /* clip and write blits to cmd stream */ 1297 vmw_clip_cliprects(tmp, num_clips, clip, blits, &num); 1298 1299 /* if no cliprects hit skip this */ 1300 if (num == 0) 1301 continue; 1302 1303 /* recalculate package length */ 1304 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num; 1305 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); 1306 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, 1307 fifo_size, 0, NULL, NULL); 1308 1309 if (unlikely(ret != 0)) 1310 break; 1311 } 1312 1313 kfree(cmd); 1314 out_free_tmp: 1315 kfree(tmp); 1316 1317 return ret; 1318 } 1319 1320 int vmw_kms_readback(struct vmw_private *dev_priv, 1321 struct drm_file *file_priv, 1322 struct vmw_framebuffer *vfb, 1323 struct drm_vmw_fence_rep __user *user_fence_rep, 1324 struct drm_vmw_rect *clips, 1325 uint32_t num_clips) 1326 { 1327 struct vmw_framebuffer_dmabuf *vfbd = 1328 vmw_framebuffer_to_vfbd(&vfb->base); 1329 struct vmw_dma_buffer *dmabuf = vfbd->buffer; 1330 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 1331 struct drm_crtc *crtc; 1332 size_t fifo_size; 1333 int i, k, ret, num_units, blits_pos; 1334 1335 struct { 1336 uint32_t header; 1337 SVGAFifoCmdDefineGMRFB body; 1338 } *cmd; 1339 struct { 1340 uint32_t header; 1341 SVGAFifoCmdBlitScreenToGMRFB body; 1342 } *blits; 1343 1344 num_units = 0; 1345 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { 1346 if (crtc->fb != &vfb->base) 1347 continue; 1348 units[num_units++] = vmw_crtc_to_du(crtc); 1349 } 1350 1351 BUG_ON(dmabuf == NULL); 1352 BUG_ON(!clips || !num_clips); 1353 1354 /* take a safe guess at fifo size */ 1355 fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units; 1356 cmd = kmalloc(fifo_size, GFP_KERNEL); 1357 if (unlikely(cmd == NULL)) { 1358 DRM_ERROR("Failed to allocate temporary fifo memory.\n"); 1359 return -ENOMEM; 1360 } 1361 1362 memset(cmd, 0, fifo_size); 1363 cmd->header = SVGA_CMD_DEFINE_GMRFB; 1364 cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel; 1365 cmd->body.format.colorDepth = vfb->base.depth; 1366 cmd->body.format.reserved = 0; 1367 cmd->body.bytesPerLine = vfb->base.pitches[0]; 1368 cmd->body.ptr.gmrId = vfb->user_handle; 1369 cmd->body.ptr.offset = 0; 1370 1371 blits = (void *)&cmd[1]; 1372 blits_pos = 0; 1373 for (i = 0; i < num_units; i++) { 1374 struct drm_vmw_rect *c = clips; 1375 for (k = 0; k < num_clips; k++, c++) { 1376 /* transform clip coords to crtc origin based coords */ 1377 int clip_x1 = c->x - units[i]->crtc.x; 1378 int clip_x2 = c->x - units[i]->crtc.x + c->w; 1379 int clip_y1 = c->y - units[i]->crtc.y; 1380 int clip_y2 = c->y - units[i]->crtc.y + c->h; 1381 int dest_x = c->x; 1382 int dest_y = c->y; 1383 1384 /* compensate for clipping, we negate 1385 * a negative number and add that. 1386 */ 1387 if (clip_x1 < 0) 1388 dest_x += -clip_x1; 1389 if (clip_y1 < 0) 1390 dest_y += -clip_y1; 1391 1392 /* clip */ 1393 clip_x1 = max(clip_x1, 0); 1394 clip_y1 = max(clip_y1, 0); 1395 clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay); 1396 clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay); 1397 1398 /* and cull any rects that misses the crtc */ 1399 if (clip_x1 >= units[i]->crtc.mode.hdisplay || 1400 clip_y1 >= units[i]->crtc.mode.vdisplay || 1401 clip_x2 <= 0 || clip_y2 <= 0) 1402 continue; 1403 1404 blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB; 1405 blits[blits_pos].body.srcScreenId = units[i]->unit; 1406 blits[blits_pos].body.destOrigin.x = dest_x; 1407 blits[blits_pos].body.destOrigin.y = dest_y; 1408 1409 blits[blits_pos].body.srcRect.left = clip_x1; 1410 blits[blits_pos].body.srcRect.top = clip_y1; 1411 blits[blits_pos].body.srcRect.right = clip_x2; 1412 blits[blits_pos].body.srcRect.bottom = clip_y2; 1413 blits_pos++; 1414 } 1415 } 1416 /* reset size here and use calculated exact size from loops */ 1417 fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos; 1418 1419 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size, 1420 0, user_fence_rep, NULL); 1421 1422 kfree(cmd); 1423 1424 return ret; 1425 } 1426 1427 int vmw_kms_init(struct vmw_private *dev_priv) 1428 { 1429 struct drm_device *dev = dev_priv->dev; 1430 int ret; 1431 1432 drm_mode_config_init(dev); 1433 dev->mode_config.funcs = &vmw_kms_funcs; 1434 dev->mode_config.min_width = 1; 1435 dev->mode_config.min_height = 1; 1436 /* assumed largest fb size */ 1437 dev->mode_config.max_width = 8192; 1438 dev->mode_config.max_height = 8192; 1439 1440 ret = vmw_kms_init_screen_object_display(dev_priv); 1441 if (ret) /* Fallback */ 1442 (void)vmw_kms_init_legacy_display_system(dev_priv); 1443 1444 return 0; 1445 } 1446 1447 int vmw_kms_close(struct vmw_private *dev_priv) 1448 { 1449 /* 1450 * Docs says we should take the lock before calling this function 1451 * but since it destroys encoders and our destructor calls 1452 * drm_encoder_cleanup which takes the lock we deadlock. 1453 */ 1454 drm_mode_config_cleanup(dev_priv->dev); 1455 if (dev_priv->sou_priv) 1456 vmw_kms_close_screen_object_display(dev_priv); 1457 else 1458 vmw_kms_close_legacy_display_system(dev_priv); 1459 return 0; 1460 } 1461 1462 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 1463 struct drm_file *file_priv) 1464 { 1465 struct drm_vmw_cursor_bypass_arg *arg = data; 1466 struct vmw_display_unit *du; 1467 struct drm_mode_object *obj; 1468 struct drm_crtc *crtc; 1469 int ret = 0; 1470 1471 1472 mutex_lock(&dev->mode_config.mutex); 1473 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { 1474 1475 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1476 du = vmw_crtc_to_du(crtc); 1477 du->hotspot_x = arg->xhot; 1478 du->hotspot_y = arg->yhot; 1479 } 1480 1481 mutex_unlock(&dev->mode_config.mutex); 1482 return 0; 1483 } 1484 1485 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC); 1486 if (!obj) { 1487 ret = -EINVAL; 1488 goto out; 1489 } 1490 1491 crtc = obj_to_crtc(obj); 1492 du = vmw_crtc_to_du(crtc); 1493 1494 du->hotspot_x = arg->xhot; 1495 du->hotspot_y = arg->yhot; 1496 1497 out: 1498 mutex_unlock(&dev->mode_config.mutex); 1499 1500 return ret; 1501 } 1502 1503 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 1504 unsigned width, unsigned height, unsigned pitch, 1505 unsigned bpp, unsigned depth) 1506 { 1507 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1508 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); 1509 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1510 iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); 1511 vmw_write(vmw_priv, SVGA_REG_WIDTH, width); 1512 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); 1513 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp); 1514 1515 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) { 1516 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n", 1517 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH)); 1518 return -EINVAL; 1519 } 1520 1521 return 0; 1522 } 1523 1524 int vmw_kms_save_vga(struct vmw_private *vmw_priv) 1525 { 1526 struct vmw_vga_topology_state *save; 1527 uint32_t i; 1528 1529 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); 1530 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); 1531 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); 1532 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1533 vmw_priv->vga_pitchlock = 1534 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); 1535 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1536 vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt + 1537 SVGA_FIFO_PITCHLOCK); 1538 1539 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1540 return 0; 1541 1542 vmw_priv->num_displays = vmw_read(vmw_priv, 1543 SVGA_REG_NUM_GUEST_DISPLAYS); 1544 1545 if (vmw_priv->num_displays == 0) 1546 vmw_priv->num_displays = 1; 1547 1548 for (i = 0; i < vmw_priv->num_displays; ++i) { 1549 save = &vmw_priv->vga_save[i]; 1550 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); 1551 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY); 1552 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X); 1553 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y); 1554 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); 1555 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); 1556 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 1557 if (i == 0 && vmw_priv->num_displays == 1 && 1558 save->width == 0 && save->height == 0) { 1559 1560 /* 1561 * It should be fairly safe to assume that these 1562 * values are uninitialized. 1563 */ 1564 1565 save->width = vmw_priv->vga_width - save->pos_x; 1566 save->height = vmw_priv->vga_height - save->pos_y; 1567 } 1568 } 1569 1570 return 0; 1571 } 1572 1573 int vmw_kms_restore_vga(struct vmw_private *vmw_priv) 1574 { 1575 struct vmw_vga_topology_state *save; 1576 uint32_t i; 1577 1578 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); 1579 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); 1580 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); 1581 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1582 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, 1583 vmw_priv->vga_pitchlock); 1584 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1585 iowrite32(vmw_priv->vga_pitchlock, 1586 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); 1587 1588 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1589 return 0; 1590 1591 for (i = 0; i < vmw_priv->num_displays; ++i) { 1592 save = &vmw_priv->vga_save[i]; 1593 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); 1594 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary); 1595 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x); 1596 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y); 1597 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width); 1598 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height); 1599 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 1600 } 1601 1602 return 0; 1603 } 1604 1605 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 1606 uint32_t pitch, 1607 uint32_t height) 1608 { 1609 return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; 1610 } 1611 1612 1613 /** 1614 * Function called by DRM code called with vbl_lock held. 1615 */ 1616 u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) 1617 { 1618 return 0; 1619 } 1620 1621 /** 1622 * Function called by DRM code called with vbl_lock held. 1623 */ 1624 int vmw_enable_vblank(struct drm_device *dev, int crtc) 1625 { 1626 return -ENOSYS; 1627 } 1628 1629 /** 1630 * Function called by DRM code called with vbl_lock held. 1631 */ 1632 void vmw_disable_vblank(struct drm_device *dev, int crtc) 1633 { 1634 } 1635 1636 1637 /* 1638 * Small shared kms functions. 1639 */ 1640 1641 int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, 1642 struct drm_vmw_rect *rects) 1643 { 1644 struct drm_device *dev = dev_priv->dev; 1645 struct vmw_display_unit *du; 1646 struct drm_connector *con; 1647 1648 mutex_lock(&dev->mode_config.mutex); 1649 1650 #if 0 1651 { 1652 unsigned int i; 1653 1654 DRM_INFO("%s: new layout ", __func__); 1655 for (i = 0; i < num; i++) 1656 DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, 1657 rects[i].w, rects[i].h); 1658 DRM_INFO("\n"); 1659 } 1660 #endif 1661 1662 list_for_each_entry(con, &dev->mode_config.connector_list, head) { 1663 du = vmw_connector_to_du(con); 1664 if (num > du->unit) { 1665 du->pref_width = rects[du->unit].w; 1666 du->pref_height = rects[du->unit].h; 1667 du->pref_active = true; 1668 du->gui_x = rects[du->unit].x; 1669 du->gui_y = rects[du->unit].y; 1670 } else { 1671 du->pref_width = 800; 1672 du->pref_height = 600; 1673 du->pref_active = false; 1674 } 1675 con->status = vmw_du_connector_detect(con, true); 1676 } 1677 1678 mutex_unlock(&dev->mode_config.mutex); 1679 1680 return 0; 1681 } 1682 1683 int vmw_du_page_flip(struct drm_crtc *crtc, 1684 struct drm_framebuffer *fb, 1685 struct drm_pending_vblank_event *event) 1686 { 1687 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 1688 struct drm_framebuffer *old_fb = crtc->fb; 1689 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb); 1690 struct drm_file *file_priv ; 1691 struct vmw_fence_obj *fence = NULL; 1692 struct drm_clip_rect clips; 1693 int ret; 1694 1695 if (event == NULL) 1696 return -EINVAL; 1697 1698 /* require ScreenObject support for page flipping */ 1699 if (!dev_priv->sou_priv) 1700 return -ENOSYS; 1701 1702 file_priv = event->base.file_priv; 1703 if (!vmw_kms_screen_object_flippable(dev_priv, crtc)) 1704 return -EINVAL; 1705 1706 crtc->fb = fb; 1707 1708 /* do a full screen dirty update */ 1709 clips.x1 = clips.y1 = 0; 1710 clips.x2 = fb->width; 1711 clips.y2 = fb->height; 1712 1713 if (vfb->dmabuf) 1714 ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb, 1715 0, 0, &clips, 1, 1, &fence); 1716 else 1717 ret = do_surface_dirty_sou(dev_priv, file_priv, vfb, 1718 0, 0, &clips, 1, 1, &fence); 1719 1720 1721 if (ret != 0) 1722 goto out_no_fence; 1723 if (!fence) { 1724 ret = -EINVAL; 1725 goto out_no_fence; 1726 } 1727 1728 ret = vmw_event_fence_action_queue(file_priv, fence, 1729 &event->base, 1730 &event->event.tv_sec, 1731 &event->event.tv_usec, 1732 true); 1733 1734 /* 1735 * No need to hold on to this now. The only cleanup 1736 * we need to do if we fail is unref the fence. 1737 */ 1738 vmw_fence_obj_unreference(&fence); 1739 1740 if (vmw_crtc_to_du(crtc)->is_implicit) 1741 vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc); 1742 1743 return ret; 1744 1745 out_no_fence: 1746 crtc->fb = old_fb; 1747 return ret; 1748 } 1749 1750 1751 void vmw_du_crtc_save(struct drm_crtc *crtc) 1752 { 1753 } 1754 1755 void vmw_du_crtc_restore(struct drm_crtc *crtc) 1756 { 1757 } 1758 1759 void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 1760 u16 *r, u16 *g, u16 *b, 1761 uint32_t start, uint32_t size) 1762 { 1763 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 1764 int i; 1765 1766 for (i = 0; i < size; i++) { 1767 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i, 1768 r[i], g[i], b[i]); 1769 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8); 1770 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); 1771 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); 1772 } 1773 } 1774 1775 void vmw_du_connector_dpms(struct drm_connector *connector, int mode) 1776 { 1777 } 1778 1779 void vmw_du_connector_save(struct drm_connector *connector) 1780 { 1781 } 1782 1783 void vmw_du_connector_restore(struct drm_connector *connector) 1784 { 1785 } 1786 1787 enum drm_connector_status 1788 vmw_du_connector_detect(struct drm_connector *connector, bool force) 1789 { 1790 uint32_t num_displays; 1791 struct drm_device *dev = connector->dev; 1792 struct vmw_private *dev_priv = vmw_priv(dev); 1793 struct vmw_display_unit *du = vmw_connector_to_du(connector); 1794 1795 mutex_lock(&dev_priv->hw_mutex); 1796 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); 1797 mutex_unlock(&dev_priv->hw_mutex); 1798 1799 return ((vmw_connector_to_du(connector)->unit < num_displays && 1800 du->pref_active) ? 1801 connector_status_connected : connector_status_disconnected); 1802 } 1803 1804 static struct drm_display_mode vmw_kms_connector_builtin[] = { 1805 /* 640x480@60Hz */ 1806 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 1807 752, 800, 0, 480, 489, 492, 525, 0, 1808 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 1809 /* 800x600@60Hz */ 1810 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 1811 968, 1056, 0, 600, 601, 605, 628, 0, 1812 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1813 /* 1024x768@60Hz */ 1814 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1815 1184, 1344, 0, 768, 771, 777, 806, 0, 1816 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 1817 /* 1152x864@75Hz */ 1818 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 1819 1344, 1600, 0, 864, 865, 868, 900, 0, 1820 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1821 /* 1280x768@60Hz */ 1822 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 1823 1472, 1664, 0, 768, 771, 778, 798, 0, 1824 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1825 /* 1280x800@60Hz */ 1826 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, 1827 1480, 1680, 0, 800, 803, 809, 831, 0, 1828 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, 1829 /* 1280x960@60Hz */ 1830 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, 1831 1488, 1800, 0, 960, 961, 964, 1000, 0, 1832 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1833 /* 1280x1024@60Hz */ 1834 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, 1835 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, 1836 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1837 /* 1360x768@60Hz */ 1838 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, 1839 1536, 1792, 0, 768, 771, 777, 795, 0, 1840 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1841 /* 1440x1050@60Hz */ 1842 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, 1843 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, 1844 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1845 /* 1440x900@60Hz */ 1846 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, 1847 1672, 1904, 0, 900, 903, 909, 934, 0, 1848 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1849 /* 1600x1200@60Hz */ 1850 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, 1851 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, 1852 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1853 /* 1680x1050@60Hz */ 1854 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, 1855 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, 1856 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1857 /* 1792x1344@60Hz */ 1858 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, 1859 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, 1860 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1861 /* 1853x1392@60Hz */ 1862 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 1863 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, 1864 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1865 /* 1920x1200@60Hz */ 1866 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 1867 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, 1868 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1869 /* 1920x1440@60Hz */ 1870 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 1871 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, 1872 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1873 /* 2560x1600@60Hz */ 1874 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 1875 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, 1876 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1877 /* Terminate */ 1878 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, 1879 }; 1880 1881 /** 1882 * vmw_guess_mode_timing - Provide fake timings for a 1883 * 60Hz vrefresh mode. 1884 * 1885 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay 1886 * members filled in. 1887 */ 1888 static void vmw_guess_mode_timing(struct drm_display_mode *mode) 1889 { 1890 mode->hsync_start = mode->hdisplay + 50; 1891 mode->hsync_end = mode->hsync_start + 50; 1892 mode->htotal = mode->hsync_end + 50; 1893 1894 mode->vsync_start = mode->vdisplay + 50; 1895 mode->vsync_end = mode->vsync_start + 50; 1896 mode->vtotal = mode->vsync_end + 50; 1897 1898 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6; 1899 mode->vrefresh = drm_mode_vrefresh(mode); 1900 } 1901 1902 1903 int vmw_du_connector_fill_modes(struct drm_connector *connector, 1904 uint32_t max_width, uint32_t max_height) 1905 { 1906 struct vmw_display_unit *du = vmw_connector_to_du(connector); 1907 struct drm_device *dev = connector->dev; 1908 struct vmw_private *dev_priv = vmw_priv(dev); 1909 struct drm_display_mode *mode = NULL; 1910 struct drm_display_mode *bmode; 1911 struct drm_display_mode prefmode = { DRM_MODE("preferred", 1912 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 1913 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1914 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) 1915 }; 1916 int i; 1917 1918 /* Add preferred mode */ 1919 { 1920 mode = drm_mode_duplicate(dev, &prefmode); 1921 if (!mode) 1922 return 0; 1923 mode->hdisplay = du->pref_width; 1924 mode->vdisplay = du->pref_height; 1925 vmw_guess_mode_timing(mode); 1926 1927 if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, 1928 mode->vdisplay)) { 1929 drm_mode_probed_add(connector, mode); 1930 } else { 1931 drm_mode_destroy(dev, mode); 1932 mode = NULL; 1933 } 1934 1935 if (du->pref_mode) { 1936 list_del_init(&du->pref_mode->head); 1937 drm_mode_destroy(dev, du->pref_mode); 1938 } 1939 1940 /* mode might be null here, this is intended */ 1941 du->pref_mode = mode; 1942 } 1943 1944 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { 1945 bmode = &vmw_kms_connector_builtin[i]; 1946 if (bmode->hdisplay > max_width || 1947 bmode->vdisplay > max_height) 1948 continue; 1949 1950 if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2, 1951 bmode->vdisplay)) 1952 continue; 1953 1954 mode = drm_mode_duplicate(dev, bmode); 1955 if (!mode) 1956 return 0; 1957 mode->vrefresh = drm_mode_vrefresh(mode); 1958 1959 drm_mode_probed_add(connector, mode); 1960 } 1961 1962 /* Move the prefered mode first, help apps pick the right mode. */ 1963 if (du->pref_mode) 1964 list_move(&du->pref_mode->head, &connector->probed_modes); 1965 1966 drm_mode_connector_list_update(connector); 1967 1968 return 1; 1969 } 1970 1971 int vmw_du_connector_set_property(struct drm_connector *connector, 1972 struct drm_property *property, 1973 uint64_t val) 1974 { 1975 return 0; 1976 } 1977 1978 1979 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 1980 struct drm_file *file_priv) 1981 { 1982 struct vmw_private *dev_priv = vmw_priv(dev); 1983 struct drm_vmw_update_layout_arg *arg = 1984 (struct drm_vmw_update_layout_arg *)data; 1985 struct vmw_master *vmaster = vmw_master(file_priv->master); 1986 void __user *user_rects; 1987 struct drm_vmw_rect *rects; 1988 unsigned rects_size; 1989 int ret; 1990 int i; 1991 struct drm_mode_config *mode_config = &dev->mode_config; 1992 1993 ret = ttm_read_lock(&vmaster->lock, true); 1994 if (unlikely(ret != 0)) 1995 return ret; 1996 1997 if (!arg->num_outputs) { 1998 struct drm_vmw_rect def_rect = {0, 0, 800, 600}; 1999 vmw_du_update_layout(dev_priv, 1, &def_rect); 2000 goto out_unlock; 2001 } 2002 2003 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 2004 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), 2005 GFP_KERNEL); 2006 if (unlikely(!rects)) { 2007 ret = -ENOMEM; 2008 goto out_unlock; 2009 } 2010 2011 user_rects = (void __user *)(unsigned long)arg->rects; 2012 ret = copy_from_user(rects, user_rects, rects_size); 2013 if (unlikely(ret != 0)) { 2014 DRM_ERROR("Failed to get rects.\n"); 2015 ret = -EFAULT; 2016 goto out_free; 2017 } 2018 2019 for (i = 0; i < arg->num_outputs; ++i) { 2020 if (rects[i].x < 0 || 2021 rects[i].y < 0 || 2022 rects[i].x + rects[i].w > mode_config->max_width || 2023 rects[i].y + rects[i].h > mode_config->max_height) { 2024 DRM_ERROR("Invalid GUI layout.\n"); 2025 ret = -EINVAL; 2026 goto out_free; 2027 } 2028 } 2029 2030 vmw_du_update_layout(dev_priv, arg->num_outputs, rects); 2031 2032 out_free: 2033 kfree(rects); 2034 out_unlock: 2035 ttm_read_unlock(&vmaster->lock); 2036 return ret; 2037 } 2038