1 /************************************************************************** 2 * 3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include "vmwgfx_kms.h" 29 30 31 /* Might need a hrtimer here? */ 32 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 33 34 35 struct vmw_clip_rect { 36 int x1, x2, y1, y2; 37 }; 38 39 /** 40 * Clip @num_rects number of @rects against @clip storing the 41 * results in @out_rects and the number of passed rects in @out_num. 42 */ 43 void vmw_clip_cliprects(struct drm_clip_rect *rects, 44 int num_rects, 45 struct vmw_clip_rect clip, 46 SVGASignedRect *out_rects, 47 int *out_num) 48 { 49 int i, k; 50 51 for (i = 0, k = 0; i < num_rects; i++) { 52 int x1 = max_t(int, clip.x1, rects[i].x1); 53 int y1 = max_t(int, clip.y1, rects[i].y1); 54 int x2 = min_t(int, clip.x2, rects[i].x2); 55 int y2 = min_t(int, clip.y2, rects[i].y2); 56 57 if (x1 >= x2) 58 continue; 59 if (y1 >= y2) 60 continue; 61 62 out_rects[k].left = x1; 63 out_rects[k].top = y1; 64 out_rects[k].right = x2; 65 out_rects[k].bottom = y2; 66 k++; 67 } 68 69 *out_num = k; 70 } 71 72 void vmw_display_unit_cleanup(struct vmw_display_unit *du) 73 { 74 if (du->cursor_surface) 75 vmw_surface_unreference(&du->cursor_surface); 76 if (du->cursor_dmabuf) 77 vmw_dmabuf_unreference(&du->cursor_dmabuf); 78 drm_crtc_cleanup(&du->crtc); 79 drm_encoder_cleanup(&du->encoder); 80 drm_connector_cleanup(&du->connector); 81 } 82 83 /* 84 * Display Unit Cursor functions 85 */ 86 87 int vmw_cursor_update_image(struct vmw_private *dev_priv, 88 u32 *image, u32 width, u32 height, 89 u32 hotspotX, u32 hotspotY) 90 { 91 struct { 92 u32 cmd; 93 SVGAFifoCmdDefineAlphaCursor cursor; 94 } *cmd; 95 u32 image_size = width * height * 4; 96 u32 cmd_size = sizeof(*cmd) + image_size; 97 98 if (!image) 99 return -EINVAL; 100 101 cmd = vmw_fifo_reserve(dev_priv, cmd_size); 102 if (unlikely(cmd == NULL)) { 103 DRM_ERROR("Fifo reserve failed.\n"); 104 return -ENOMEM; 105 } 106 107 memset(cmd, 0, sizeof(*cmd)); 108 109 memcpy(&cmd[1], image, image_size); 110 111 cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR); 112 cmd->cursor.id = cpu_to_le32(0); 113 cmd->cursor.width = cpu_to_le32(width); 114 cmd->cursor.height = cpu_to_le32(height); 115 cmd->cursor.hotspotX = cpu_to_le32(hotspotX); 116 cmd->cursor.hotspotY = cpu_to_le32(hotspotY); 117 118 vmw_fifo_commit(dev_priv, cmd_size); 119 120 return 0; 121 } 122 123 int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv, 124 struct vmw_dma_buffer *dmabuf, 125 u32 width, u32 height, 126 u32 hotspotX, u32 hotspotY) 127 { 128 struct ttm_bo_kmap_obj map; 129 unsigned long kmap_offset; 130 unsigned long kmap_num; 131 void *virtual; 132 bool dummy; 133 int ret; 134 135 kmap_offset = 0; 136 kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT; 137 138 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0); 139 if (unlikely(ret != 0)) { 140 DRM_ERROR("reserve failed\n"); 141 return -EINVAL; 142 } 143 144 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map); 145 if (unlikely(ret != 0)) 146 goto err_unreserve; 147 148 virtual = ttm_kmap_obj_virtual(&map, &dummy); 149 ret = vmw_cursor_update_image(dev_priv, virtual, width, height, 150 hotspotX, hotspotY); 151 152 ttm_bo_kunmap(&map); 153 err_unreserve: 154 ttm_bo_unreserve(&dmabuf->base); 155 156 return ret; 157 } 158 159 160 void vmw_cursor_update_position(struct vmw_private *dev_priv, 161 bool show, int x, int y) 162 { 163 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 164 uint32_t count; 165 166 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); 167 iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X); 168 iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y); 169 count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT); 170 iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); 171 } 172 173 int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, 174 uint32_t handle, uint32_t width, uint32_t height) 175 { 176 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 177 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 178 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 179 struct vmw_surface *surface = NULL; 180 struct vmw_dma_buffer *dmabuf = NULL; 181 int ret; 182 183 /* A lot of the code assumes this */ 184 if (handle && (width != 64 || height != 64)) 185 return -EINVAL; 186 187 if (handle) { 188 ret = vmw_user_lookup_handle(dev_priv, tfile, 189 handle, &surface, &dmabuf); 190 if (ret) { 191 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret); 192 return -EINVAL; 193 } 194 } 195 196 /* need to do this before taking down old image */ 197 if (surface && !surface->snooper.image) { 198 DRM_ERROR("surface not suitable for cursor\n"); 199 vmw_surface_unreference(&surface); 200 return -EINVAL; 201 } 202 203 /* takedown old cursor */ 204 if (du->cursor_surface) { 205 du->cursor_surface->snooper.crtc = NULL; 206 vmw_surface_unreference(&du->cursor_surface); 207 } 208 if (du->cursor_dmabuf) 209 vmw_dmabuf_unreference(&du->cursor_dmabuf); 210 211 /* setup new image */ 212 if (surface) { 213 /* vmw_user_surface_lookup takes one reference */ 214 du->cursor_surface = surface; 215 216 du->cursor_surface->snooper.crtc = crtc; 217 du->cursor_age = du->cursor_surface->snooper.age; 218 vmw_cursor_update_image(dev_priv, surface->snooper.image, 219 64, 64, du->hotspot_x, du->hotspot_y); 220 } else if (dmabuf) { 221 /* vmw_user_surface_lookup takes one reference */ 222 du->cursor_dmabuf = dmabuf; 223 224 ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height, 225 du->hotspot_x, du->hotspot_y); 226 } else { 227 vmw_cursor_update_position(dev_priv, false, 0, 0); 228 return 0; 229 } 230 231 vmw_cursor_update_position(dev_priv, true, 232 du->cursor_x + du->hotspot_x, 233 du->cursor_y + du->hotspot_y); 234 235 return 0; 236 } 237 238 int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 239 { 240 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 241 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 242 bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false; 243 244 du->cursor_x = x + crtc->x; 245 du->cursor_y = y + crtc->y; 246 247 vmw_cursor_update_position(dev_priv, shown, 248 du->cursor_x + du->hotspot_x, 249 du->cursor_y + du->hotspot_y); 250 251 return 0; 252 } 253 254 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 255 struct ttm_object_file *tfile, 256 struct ttm_buffer_object *bo, 257 SVGA3dCmdHeader *header) 258 { 259 struct ttm_bo_kmap_obj map; 260 unsigned long kmap_offset; 261 unsigned long kmap_num; 262 SVGA3dCopyBox *box; 263 unsigned box_count; 264 void *virtual; 265 bool dummy; 266 struct vmw_dma_cmd { 267 SVGA3dCmdHeader header; 268 SVGA3dCmdSurfaceDMA dma; 269 } *cmd; 270 int i, ret; 271 272 cmd = container_of(header, struct vmw_dma_cmd, header); 273 274 /* No snooper installed */ 275 if (!srf->snooper.image) 276 return; 277 278 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { 279 DRM_ERROR("face and mipmap for cursors should never != 0\n"); 280 return; 281 } 282 283 if (cmd->header.size < 64) { 284 DRM_ERROR("at least one full copy box must be given\n"); 285 return; 286 } 287 288 box = (SVGA3dCopyBox *)&cmd[1]; 289 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / 290 sizeof(SVGA3dCopyBox); 291 292 if (cmd->dma.guest.ptr.offset % PAGE_SIZE || 293 box->x != 0 || box->y != 0 || box->z != 0 || 294 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || 295 box->d != 1 || box_count != 1) { 296 /* TODO handle none page aligned offsets */ 297 /* TODO handle more dst & src != 0 */ 298 /* TODO handle more then one copy */ 299 DRM_ERROR("Cant snoop dma request for cursor!\n"); 300 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", 301 box->srcx, box->srcy, box->srcz, 302 box->x, box->y, box->z, 303 box->w, box->h, box->d, box_count, 304 cmd->dma.guest.ptr.offset); 305 return; 306 } 307 308 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; 309 kmap_num = (64*64*4) >> PAGE_SHIFT; 310 311 ret = ttm_bo_reserve(bo, true, false, false, 0); 312 if (unlikely(ret != 0)) { 313 DRM_ERROR("reserve failed\n"); 314 return; 315 } 316 317 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 318 if (unlikely(ret != 0)) 319 goto err_unreserve; 320 321 virtual = ttm_kmap_obj_virtual(&map, &dummy); 322 323 if (box->w == 64 && cmd->dma.guest.pitch == 64*4) { 324 memcpy(srf->snooper.image, virtual, 64*64*4); 325 } else { 326 /* Image is unsigned pointer. */ 327 for (i = 0; i < box->h; i++) 328 memcpy(srf->snooper.image + i * 64, 329 virtual + i * cmd->dma.guest.pitch, 330 box->w * 4); 331 } 332 333 srf->snooper.age++; 334 335 /* we can't call this function from this function since execbuf has 336 * reserved fifo space. 337 * 338 * if (srf->snooper.crtc) 339 * vmw_ldu_crtc_cursor_update_image(dev_priv, 340 * srf->snooper.image, 64, 64, 341 * du->hotspot_x, du->hotspot_y); 342 */ 343 344 ttm_bo_kunmap(&map); 345 err_unreserve: 346 ttm_bo_unreserve(bo); 347 } 348 349 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) 350 { 351 struct drm_device *dev = dev_priv->dev; 352 struct vmw_display_unit *du; 353 struct drm_crtc *crtc; 354 355 mutex_lock(&dev->mode_config.mutex); 356 357 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 358 du = vmw_crtc_to_du(crtc); 359 if (!du->cursor_surface || 360 du->cursor_age == du->cursor_surface->snooper.age) 361 continue; 362 363 du->cursor_age = du->cursor_surface->snooper.age; 364 vmw_cursor_update_image(dev_priv, 365 du->cursor_surface->snooper.image, 366 64, 64, du->hotspot_x, du->hotspot_y); 367 } 368 369 mutex_unlock(&dev->mode_config.mutex); 370 } 371 372 /* 373 * Generic framebuffer code 374 */ 375 376 int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, 377 struct drm_file *file_priv, 378 unsigned int *handle) 379 { 380 if (handle) 381 *handle = 0; 382 383 return 0; 384 } 385 386 /* 387 * Surface framebuffer code 388 */ 389 390 #define vmw_framebuffer_to_vfbs(x) \ 391 container_of(x, struct vmw_framebuffer_surface, base.base) 392 393 struct vmw_framebuffer_surface { 394 struct vmw_framebuffer base; 395 struct vmw_surface *surface; 396 struct vmw_dma_buffer *buffer; 397 struct list_head head; 398 struct drm_master *master; 399 }; 400 401 void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 402 { 403 struct vmw_framebuffer_surface *vfbs = 404 vmw_framebuffer_to_vfbs(framebuffer); 405 struct vmw_master *vmaster = vmw_master(vfbs->master); 406 407 408 mutex_lock(&vmaster->fb_surf_mutex); 409 list_del(&vfbs->head); 410 mutex_unlock(&vmaster->fb_surf_mutex); 411 412 drm_master_put(&vfbs->master); 413 drm_framebuffer_cleanup(framebuffer); 414 vmw_surface_unreference(&vfbs->surface); 415 ttm_base_object_unref(&vfbs->base.user_obj); 416 417 kfree(vfbs); 418 } 419 420 static int do_surface_dirty_sou(struct vmw_private *dev_priv, 421 struct drm_file *file_priv, 422 struct vmw_framebuffer *framebuffer, 423 unsigned flags, unsigned color, 424 struct drm_clip_rect *clips, 425 unsigned num_clips, int inc, 426 struct vmw_fence_obj **out_fence) 427 { 428 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 429 struct drm_clip_rect *clips_ptr; 430 struct drm_clip_rect *tmp; 431 struct drm_crtc *crtc; 432 size_t fifo_size; 433 int i, num_units; 434 int ret = 0; /* silence warning */ 435 int left, right, top, bottom; 436 437 struct { 438 SVGA3dCmdHeader header; 439 SVGA3dCmdBlitSurfaceToScreen body; 440 } *cmd; 441 SVGASignedRect *blits; 442 443 num_units = 0; 444 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, 445 head) { 446 if (crtc->fb != &framebuffer->base) 447 continue; 448 units[num_units++] = vmw_crtc_to_du(crtc); 449 } 450 451 BUG_ON(!clips || !num_clips); 452 453 tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL); 454 if (unlikely(tmp == NULL)) { 455 DRM_ERROR("Temporary cliprect memory alloc failed.\n"); 456 return -ENOMEM; 457 } 458 459 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips; 460 cmd = kzalloc(fifo_size, GFP_KERNEL); 461 if (unlikely(cmd == NULL)) { 462 DRM_ERROR("Temporary fifo memory alloc failed.\n"); 463 ret = -ENOMEM; 464 goto out_free_tmp; 465 } 466 467 /* setup blits pointer */ 468 blits = (SVGASignedRect *)&cmd[1]; 469 470 /* initial clip region */ 471 left = clips->x1; 472 right = clips->x2; 473 top = clips->y1; 474 bottom = clips->y2; 475 476 /* skip the first clip rect */ 477 for (i = 1, clips_ptr = clips + inc; 478 i < num_clips; i++, clips_ptr += inc) { 479 left = min_t(int, left, (int)clips_ptr->x1); 480 right = max_t(int, right, (int)clips_ptr->x2); 481 top = min_t(int, top, (int)clips_ptr->y1); 482 bottom = max_t(int, bottom, (int)clips_ptr->y2); 483 } 484 485 /* only need to do this once */ 486 memset(cmd, 0, fifo_size); 487 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN); 488 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); 489 490 cmd->body.srcRect.left = left; 491 cmd->body.srcRect.right = right; 492 cmd->body.srcRect.top = top; 493 cmd->body.srcRect.bottom = bottom; 494 495 clips_ptr = clips; 496 for (i = 0; i < num_clips; i++, clips_ptr += inc) { 497 tmp[i].x1 = clips_ptr->x1 - left; 498 tmp[i].x2 = clips_ptr->x2 - left; 499 tmp[i].y1 = clips_ptr->y1 - top; 500 tmp[i].y2 = clips_ptr->y2 - top; 501 } 502 503 /* do per unit writing, reuse fifo for each */ 504 for (i = 0; i < num_units; i++) { 505 struct vmw_display_unit *unit = units[i]; 506 struct vmw_clip_rect clip; 507 int num; 508 509 clip.x1 = left - unit->crtc.x; 510 clip.y1 = top - unit->crtc.y; 511 clip.x2 = right - unit->crtc.x; 512 clip.y2 = bottom - unit->crtc.y; 513 514 /* skip any crtcs that misses the clip region */ 515 if (clip.x1 >= unit->crtc.mode.hdisplay || 516 clip.y1 >= unit->crtc.mode.vdisplay || 517 clip.x2 <= 0 || clip.y2 <= 0) 518 continue; 519 520 /* 521 * In order for the clip rects to be correctly scaled 522 * the src and dest rects needs to be the same size. 523 */ 524 cmd->body.destRect.left = clip.x1; 525 cmd->body.destRect.right = clip.x2; 526 cmd->body.destRect.top = clip.y1; 527 cmd->body.destRect.bottom = clip.y2; 528 529 /* create a clip rect of the crtc in dest coords */ 530 clip.x2 = unit->crtc.mode.hdisplay - clip.x1; 531 clip.y2 = unit->crtc.mode.vdisplay - clip.y1; 532 clip.x1 = 0 - clip.x1; 533 clip.y1 = 0 - clip.y1; 534 535 /* need to reset sid as it is changed by execbuf */ 536 cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle); 537 cmd->body.destScreenId = unit->unit; 538 539 /* clip and write blits to cmd stream */ 540 vmw_clip_cliprects(tmp, num_clips, clip, blits, &num); 541 542 /* if no cliprects hit skip this */ 543 if (num == 0) 544 continue; 545 546 /* only return the last fence */ 547 if (out_fence && *out_fence) 548 vmw_fence_obj_unreference(out_fence); 549 550 /* recalculate package length */ 551 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num; 552 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); 553 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, 554 fifo_size, 0, NULL, out_fence); 555 556 if (unlikely(ret != 0)) 557 break; 558 } 559 560 561 kfree(cmd); 562 out_free_tmp: 563 kfree(tmp); 564 565 return ret; 566 } 567 568 int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, 569 struct drm_file *file_priv, 570 unsigned flags, unsigned color, 571 struct drm_clip_rect *clips, 572 unsigned num_clips) 573 { 574 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 575 struct vmw_master *vmaster = vmw_master(file_priv->master); 576 struct vmw_framebuffer_surface *vfbs = 577 vmw_framebuffer_to_vfbs(framebuffer); 578 struct drm_clip_rect norect; 579 int ret, inc = 1; 580 581 if (unlikely(vfbs->master != file_priv->master)) 582 return -EINVAL; 583 584 /* Require ScreenObject support for 3D */ 585 if (!dev_priv->sou_priv) 586 return -EINVAL; 587 588 ret = ttm_read_lock(&vmaster->lock, true); 589 if (unlikely(ret != 0)) 590 return ret; 591 592 if (!num_clips) { 593 num_clips = 1; 594 clips = &norect; 595 norect.x1 = norect.y1 = 0; 596 norect.x2 = framebuffer->width; 597 norect.y2 = framebuffer->height; 598 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { 599 num_clips /= 2; 600 inc = 2; /* skip source rects */ 601 } 602 603 ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base, 604 flags, color, 605 clips, num_clips, inc, NULL); 606 607 ttm_read_unlock(&vmaster->lock); 608 return 0; 609 } 610 611 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { 612 .destroy = vmw_framebuffer_surface_destroy, 613 .dirty = vmw_framebuffer_surface_dirty, 614 .create_handle = vmw_framebuffer_create_handle, 615 }; 616 617 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 618 struct drm_file *file_priv, 619 struct vmw_surface *surface, 620 struct vmw_framebuffer **out, 621 const struct drm_mode_fb_cmd 622 *mode_cmd) 623 624 { 625 struct drm_device *dev = dev_priv->dev; 626 struct vmw_framebuffer_surface *vfbs; 627 enum SVGA3dSurfaceFormat format; 628 struct vmw_master *vmaster = vmw_master(file_priv->master); 629 int ret; 630 631 /* 3D is only supported on HWv8 hosts which supports screen objects */ 632 if (!dev_priv->sou_priv) 633 return -ENOSYS; 634 635 /* 636 * Sanity checks. 637 */ 638 639 /* Surface must be marked as a scanout. */ 640 if (unlikely(!surface->scanout)) 641 return -EINVAL; 642 643 if (unlikely(surface->mip_levels[0] != 1 || 644 surface->num_sizes != 1 || 645 surface->sizes[0].width < mode_cmd->width || 646 surface->sizes[0].height < mode_cmd->height || 647 surface->sizes[0].depth != 1)) { 648 DRM_ERROR("Incompatible surface dimensions " 649 "for requested mode.\n"); 650 return -EINVAL; 651 } 652 653 switch (mode_cmd->depth) { 654 case 32: 655 format = SVGA3D_A8R8G8B8; 656 break; 657 case 24: 658 format = SVGA3D_X8R8G8B8; 659 break; 660 case 16: 661 format = SVGA3D_R5G6B5; 662 break; 663 case 15: 664 format = SVGA3D_A1R5G5B5; 665 break; 666 case 8: 667 format = SVGA3D_LUMINANCE8; 668 break; 669 default: 670 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); 671 return -EINVAL; 672 } 673 674 if (unlikely(format != surface->format)) { 675 DRM_ERROR("Invalid surface format for requested mode.\n"); 676 return -EINVAL; 677 } 678 679 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); 680 if (!vfbs) { 681 ret = -ENOMEM; 682 goto out_err1; 683 } 684 685 ret = drm_framebuffer_init(dev, &vfbs->base.base, 686 &vmw_framebuffer_surface_funcs); 687 if (ret) 688 goto out_err2; 689 690 if (!vmw_surface_reference(surface)) { 691 DRM_ERROR("failed to reference surface %p\n", surface); 692 goto out_err3; 693 } 694 695 /* XXX get the first 3 from the surface info */ 696 vfbs->base.base.bits_per_pixel = mode_cmd->bpp; 697 vfbs->base.base.pitches[0] = mode_cmd->pitch; 698 vfbs->base.base.depth = mode_cmd->depth; 699 vfbs->base.base.width = mode_cmd->width; 700 vfbs->base.base.height = mode_cmd->height; 701 vfbs->surface = surface; 702 vfbs->base.user_handle = mode_cmd->handle; 703 vfbs->master = drm_master_get(file_priv->master); 704 705 mutex_lock(&vmaster->fb_surf_mutex); 706 list_add_tail(&vfbs->head, &vmaster->fb_surf); 707 mutex_unlock(&vmaster->fb_surf_mutex); 708 709 *out = &vfbs->base; 710 711 return 0; 712 713 out_err3: 714 drm_framebuffer_cleanup(&vfbs->base.base); 715 out_err2: 716 kfree(vfbs); 717 out_err1: 718 return ret; 719 } 720 721 /* 722 * Dmabuf framebuffer code 723 */ 724 725 #define vmw_framebuffer_to_vfbd(x) \ 726 container_of(x, struct vmw_framebuffer_dmabuf, base.base) 727 728 struct vmw_framebuffer_dmabuf { 729 struct vmw_framebuffer base; 730 struct vmw_dma_buffer *buffer; 731 }; 732 733 void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) 734 { 735 struct vmw_framebuffer_dmabuf *vfbd = 736 vmw_framebuffer_to_vfbd(framebuffer); 737 738 drm_framebuffer_cleanup(framebuffer); 739 vmw_dmabuf_unreference(&vfbd->buffer); 740 ttm_base_object_unref(&vfbd->base.user_obj); 741 742 kfree(vfbd); 743 } 744 745 static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv, 746 struct vmw_framebuffer *framebuffer, 747 unsigned flags, unsigned color, 748 struct drm_clip_rect *clips, 749 unsigned num_clips, int increment) 750 { 751 size_t fifo_size; 752 int i; 753 754 struct { 755 uint32_t header; 756 SVGAFifoCmdUpdate body; 757 } *cmd; 758 759 fifo_size = sizeof(*cmd) * num_clips; 760 cmd = vmw_fifo_reserve(dev_priv, fifo_size); 761 if (unlikely(cmd == NULL)) { 762 DRM_ERROR("Fifo reserve failed.\n"); 763 return -ENOMEM; 764 } 765 766 memset(cmd, 0, fifo_size); 767 for (i = 0; i < num_clips; i++, clips += increment) { 768 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); 769 cmd[i].body.x = cpu_to_le32(clips->x1); 770 cmd[i].body.y = cpu_to_le32(clips->y1); 771 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1); 772 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1); 773 } 774 775 vmw_fifo_commit(dev_priv, fifo_size); 776 return 0; 777 } 778 779 static int do_dmabuf_define_gmrfb(struct drm_file *file_priv, 780 struct vmw_private *dev_priv, 781 struct vmw_framebuffer *framebuffer) 782 { 783 int depth = framebuffer->base.depth; 784 size_t fifo_size; 785 int ret; 786 787 struct { 788 uint32_t header; 789 SVGAFifoCmdDefineGMRFB body; 790 } *cmd; 791 792 /* Emulate RGBA support, contrary to svga_reg.h this is not 793 * supported by hosts. This is only a problem if we are reading 794 * this value later and expecting what we uploaded back. 795 */ 796 if (depth == 32) 797 depth = 24; 798 799 fifo_size = sizeof(*cmd); 800 cmd = kmalloc(fifo_size, GFP_KERNEL); 801 if (unlikely(cmd == NULL)) { 802 DRM_ERROR("Failed to allocate temporary cmd buffer.\n"); 803 return -ENOMEM; 804 } 805 806 memset(cmd, 0, fifo_size); 807 cmd->header = SVGA_CMD_DEFINE_GMRFB; 808 cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel; 809 cmd->body.format.colorDepth = depth; 810 cmd->body.format.reserved = 0; 811 cmd->body.bytesPerLine = framebuffer->base.pitches[0]; 812 cmd->body.ptr.gmrId = framebuffer->user_handle; 813 cmd->body.ptr.offset = 0; 814 815 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, 816 fifo_size, 0, NULL, NULL); 817 818 kfree(cmd); 819 820 return ret; 821 } 822 823 static int do_dmabuf_dirty_sou(struct drm_file *file_priv, 824 struct vmw_private *dev_priv, 825 struct vmw_framebuffer *framebuffer, 826 unsigned flags, unsigned color, 827 struct drm_clip_rect *clips, 828 unsigned num_clips, int increment, 829 struct vmw_fence_obj **out_fence) 830 { 831 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 832 struct drm_clip_rect *clips_ptr; 833 int i, k, num_units, ret; 834 struct drm_crtc *crtc; 835 size_t fifo_size; 836 837 struct { 838 uint32_t header; 839 SVGAFifoCmdBlitGMRFBToScreen body; 840 } *blits; 841 842 ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer); 843 if (unlikely(ret != 0)) 844 return ret; /* define_gmrfb prints warnings */ 845 846 fifo_size = sizeof(*blits) * num_clips; 847 blits = kmalloc(fifo_size, GFP_KERNEL); 848 if (unlikely(blits == NULL)) { 849 DRM_ERROR("Failed to allocate temporary cmd buffer.\n"); 850 return -ENOMEM; 851 } 852 853 num_units = 0; 854 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { 855 if (crtc->fb != &framebuffer->base) 856 continue; 857 units[num_units++] = vmw_crtc_to_du(crtc); 858 } 859 860 for (k = 0; k < num_units; k++) { 861 struct vmw_display_unit *unit = units[k]; 862 int hit_num = 0; 863 864 clips_ptr = clips; 865 for (i = 0; i < num_clips; i++, clips_ptr += increment) { 866 int clip_x1 = clips_ptr->x1 - unit->crtc.x; 867 int clip_y1 = clips_ptr->y1 - unit->crtc.y; 868 int clip_x2 = clips_ptr->x2 - unit->crtc.x; 869 int clip_y2 = clips_ptr->y2 - unit->crtc.y; 870 int move_x, move_y; 871 872 /* skip any crtcs that misses the clip region */ 873 if (clip_x1 >= unit->crtc.mode.hdisplay || 874 clip_y1 >= unit->crtc.mode.vdisplay || 875 clip_x2 <= 0 || clip_y2 <= 0) 876 continue; 877 878 /* clip size to crtc size */ 879 clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay); 880 clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay); 881 882 /* translate both src and dest to bring clip into screen */ 883 move_x = min_t(int, clip_x1, 0); 884 move_y = min_t(int, clip_y1, 0); 885 886 /* actual translate done here */ 887 blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN; 888 blits[hit_num].body.destScreenId = unit->unit; 889 blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x; 890 blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y; 891 blits[hit_num].body.destRect.left = clip_x1 - move_x; 892 blits[hit_num].body.destRect.top = clip_y1 - move_y; 893 blits[hit_num].body.destRect.right = clip_x2; 894 blits[hit_num].body.destRect.bottom = clip_y2; 895 hit_num++; 896 } 897 898 /* no clips hit the crtc */ 899 if (hit_num == 0) 900 continue; 901 902 /* only return the last fence */ 903 if (out_fence && *out_fence) 904 vmw_fence_obj_unreference(out_fence); 905 906 fifo_size = sizeof(*blits) * hit_num; 907 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits, 908 fifo_size, 0, NULL, out_fence); 909 910 if (unlikely(ret != 0)) 911 break; 912 } 913 914 kfree(blits); 915 916 return ret; 917 } 918 919 int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, 920 struct drm_file *file_priv, 921 unsigned flags, unsigned color, 922 struct drm_clip_rect *clips, 923 unsigned num_clips) 924 { 925 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 926 struct vmw_master *vmaster = vmw_master(file_priv->master); 927 struct vmw_framebuffer_dmabuf *vfbd = 928 vmw_framebuffer_to_vfbd(framebuffer); 929 struct drm_clip_rect norect; 930 int ret, increment = 1; 931 932 ret = ttm_read_lock(&vmaster->lock, true); 933 if (unlikely(ret != 0)) 934 return ret; 935 936 if (!num_clips) { 937 num_clips = 1; 938 clips = &norect; 939 norect.x1 = norect.y1 = 0; 940 norect.x2 = framebuffer->width; 941 norect.y2 = framebuffer->height; 942 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { 943 num_clips /= 2; 944 increment = 2; 945 } 946 947 if (dev_priv->ldu_priv) { 948 ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base, 949 flags, color, 950 clips, num_clips, increment); 951 } else { 952 ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base, 953 flags, color, 954 clips, num_clips, increment, NULL); 955 } 956 957 ttm_read_unlock(&vmaster->lock); 958 return ret; 959 } 960 961 static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { 962 .destroy = vmw_framebuffer_dmabuf_destroy, 963 .dirty = vmw_framebuffer_dmabuf_dirty, 964 .create_handle = vmw_framebuffer_create_handle, 965 }; 966 967 /** 968 * Pin the dmabuffer to the start of vram. 969 */ 970 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) 971 { 972 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 973 struct vmw_framebuffer_dmabuf *vfbd = 974 vmw_framebuffer_to_vfbd(&vfb->base); 975 int ret; 976 977 /* This code should not be used with screen objects */ 978 BUG_ON(dev_priv->sou_priv); 979 980 vmw_overlay_pause_all(dev_priv); 981 982 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false); 983 984 vmw_overlay_resume_all(dev_priv); 985 986 WARN_ON(ret != 0); 987 988 return 0; 989 } 990 991 static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) 992 { 993 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 994 struct vmw_framebuffer_dmabuf *vfbd = 995 vmw_framebuffer_to_vfbd(&vfb->base); 996 997 if (!vfbd->buffer) { 998 WARN_ON(!vfbd->buffer); 999 return 0; 1000 } 1001 1002 return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false); 1003 } 1004 1005 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, 1006 struct vmw_dma_buffer *dmabuf, 1007 struct vmw_framebuffer **out, 1008 const struct drm_mode_fb_cmd 1009 *mode_cmd) 1010 1011 { 1012 struct drm_device *dev = dev_priv->dev; 1013 struct vmw_framebuffer_dmabuf *vfbd; 1014 unsigned int requested_size; 1015 int ret; 1016 1017 requested_size = mode_cmd->height * mode_cmd->pitch; 1018 if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) { 1019 DRM_ERROR("Screen buffer object size is too small " 1020 "for requested mode.\n"); 1021 return -EINVAL; 1022 } 1023 1024 /* Limited framebuffer color depth support for screen objects */ 1025 if (dev_priv->sou_priv) { 1026 switch (mode_cmd->depth) { 1027 case 32: 1028 case 24: 1029 /* Only support 32 bpp for 32 and 24 depth fbs */ 1030 if (mode_cmd->bpp == 32) 1031 break; 1032 1033 DRM_ERROR("Invalid color depth/bbp: %d %d\n", 1034 mode_cmd->depth, mode_cmd->bpp); 1035 return -EINVAL; 1036 case 16: 1037 case 15: 1038 /* Only support 16 bpp for 16 and 15 depth fbs */ 1039 if (mode_cmd->bpp == 16) 1040 break; 1041 1042 DRM_ERROR("Invalid color depth/bbp: %d %d\n", 1043 mode_cmd->depth, mode_cmd->bpp); 1044 return -EINVAL; 1045 default: 1046 DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); 1047 return -EINVAL; 1048 } 1049 } 1050 1051 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); 1052 if (!vfbd) { 1053 ret = -ENOMEM; 1054 goto out_err1; 1055 } 1056 1057 ret = drm_framebuffer_init(dev, &vfbd->base.base, 1058 &vmw_framebuffer_dmabuf_funcs); 1059 if (ret) 1060 goto out_err2; 1061 1062 if (!vmw_dmabuf_reference(dmabuf)) { 1063 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf); 1064 goto out_err3; 1065 } 1066 1067 vfbd->base.base.bits_per_pixel = mode_cmd->bpp; 1068 vfbd->base.base.pitches[0] = mode_cmd->pitch; 1069 vfbd->base.base.depth = mode_cmd->depth; 1070 vfbd->base.base.width = mode_cmd->width; 1071 vfbd->base.base.height = mode_cmd->height; 1072 if (!dev_priv->sou_priv) { 1073 vfbd->base.pin = vmw_framebuffer_dmabuf_pin; 1074 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; 1075 } 1076 vfbd->base.dmabuf = true; 1077 vfbd->buffer = dmabuf; 1078 vfbd->base.user_handle = mode_cmd->handle; 1079 *out = &vfbd->base; 1080 1081 return 0; 1082 1083 out_err3: 1084 drm_framebuffer_cleanup(&vfbd->base.base); 1085 out_err2: 1086 kfree(vfbd); 1087 out_err1: 1088 return ret; 1089 } 1090 1091 /* 1092 * Generic Kernel modesetting functions 1093 */ 1094 1095 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, 1096 struct drm_file *file_priv, 1097 struct drm_mode_fb_cmd2 *mode_cmd2) 1098 { 1099 struct vmw_private *dev_priv = vmw_priv(dev); 1100 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1101 struct vmw_framebuffer *vfb = NULL; 1102 struct vmw_surface *surface = NULL; 1103 struct vmw_dma_buffer *bo = NULL; 1104 struct ttm_base_object *user_obj; 1105 struct drm_mode_fb_cmd mode_cmd; 1106 int ret; 1107 1108 mode_cmd.width = mode_cmd2->width; 1109 mode_cmd.height = mode_cmd2->height; 1110 mode_cmd.pitch = mode_cmd2->pitches[0]; 1111 mode_cmd.handle = mode_cmd2->handles[0]; 1112 drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth, 1113 &mode_cmd.bpp); 1114 1115 /** 1116 * This code should be conditioned on Screen Objects not being used. 1117 * If screen objects are used, we can allocate a GMR to hold the 1118 * requested framebuffer. 1119 */ 1120 1121 if (!vmw_kms_validate_mode_vram(dev_priv, 1122 mode_cmd.pitch, 1123 mode_cmd.height)) { 1124 DRM_ERROR("VRAM size is too small for requested mode.\n"); 1125 return ERR_PTR(-ENOMEM); 1126 } 1127 1128 /* 1129 * Take a reference on the user object of the resource 1130 * backing the kms fb. This ensures that user-space handle 1131 * lookups on that resource will always work as long as 1132 * it's registered with a kms framebuffer. This is important, 1133 * since vmw_execbuf_process identifies resources in the 1134 * command stream using user-space handles. 1135 */ 1136 1137 user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle); 1138 if (unlikely(user_obj == NULL)) { 1139 DRM_ERROR("Could not locate requested kms frame buffer.\n"); 1140 return ERR_PTR(-ENOENT); 1141 } 1142 1143 /** 1144 * End conditioned code. 1145 */ 1146 1147 /* returns either a dmabuf or surface */ 1148 ret = vmw_user_lookup_handle(dev_priv, tfile, 1149 mode_cmd.handle, 1150 &surface, &bo); 1151 if (ret) 1152 goto err_out; 1153 1154 /* Create the new framebuffer depending one what we got back */ 1155 if (bo) 1156 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, 1157 &mode_cmd); 1158 else if (surface) 1159 ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, 1160 surface, &vfb, &mode_cmd); 1161 else 1162 BUG(); 1163 1164 err_out: 1165 /* vmw_user_lookup_handle takes one ref so does new_fb */ 1166 if (bo) 1167 vmw_dmabuf_unreference(&bo); 1168 if (surface) 1169 vmw_surface_unreference(&surface); 1170 1171 if (ret) { 1172 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); 1173 ttm_base_object_unref(&user_obj); 1174 return ERR_PTR(ret); 1175 } else 1176 vfb->user_obj = user_obj; 1177 1178 return &vfb->base; 1179 } 1180 1181 static const struct drm_mode_config_funcs vmw_kms_funcs = { 1182 .fb_create = vmw_kms_fb_create, 1183 }; 1184 1185 int vmw_kms_present(struct vmw_private *dev_priv, 1186 struct drm_file *file_priv, 1187 struct vmw_framebuffer *vfb, 1188 struct vmw_surface *surface, 1189 uint32_t sid, 1190 int32_t destX, int32_t destY, 1191 struct drm_vmw_rect *clips, 1192 uint32_t num_clips) 1193 { 1194 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 1195 struct drm_clip_rect *tmp; 1196 struct drm_crtc *crtc; 1197 size_t fifo_size; 1198 int i, k, num_units; 1199 int ret = 0; /* silence warning */ 1200 int left, right, top, bottom; 1201 1202 struct { 1203 SVGA3dCmdHeader header; 1204 SVGA3dCmdBlitSurfaceToScreen body; 1205 } *cmd; 1206 SVGASignedRect *blits; 1207 1208 num_units = 0; 1209 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { 1210 if (crtc->fb != &vfb->base) 1211 continue; 1212 units[num_units++] = vmw_crtc_to_du(crtc); 1213 } 1214 1215 BUG_ON(surface == NULL); 1216 BUG_ON(!clips || !num_clips); 1217 1218 tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL); 1219 if (unlikely(tmp == NULL)) { 1220 DRM_ERROR("Temporary cliprect memory alloc failed.\n"); 1221 return -ENOMEM; 1222 } 1223 1224 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips; 1225 cmd = kmalloc(fifo_size, GFP_KERNEL); 1226 if (unlikely(cmd == NULL)) { 1227 DRM_ERROR("Failed to allocate temporary fifo memory.\n"); 1228 ret = -ENOMEM; 1229 goto out_free_tmp; 1230 } 1231 1232 left = clips->x; 1233 right = clips->x + clips->w; 1234 top = clips->y; 1235 bottom = clips->y + clips->h; 1236 1237 for (i = 1; i < num_clips; i++) { 1238 left = min_t(int, left, (int)clips[i].x); 1239 right = max_t(int, right, (int)clips[i].x + clips[i].w); 1240 top = min_t(int, top, (int)clips[i].y); 1241 bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h); 1242 } 1243 1244 /* only need to do this once */ 1245 memset(cmd, 0, fifo_size); 1246 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN); 1247 1248 blits = (SVGASignedRect *)&cmd[1]; 1249 1250 cmd->body.srcRect.left = left; 1251 cmd->body.srcRect.right = right; 1252 cmd->body.srcRect.top = top; 1253 cmd->body.srcRect.bottom = bottom; 1254 1255 for (i = 0; i < num_clips; i++) { 1256 tmp[i].x1 = clips[i].x - left; 1257 tmp[i].x2 = clips[i].x + clips[i].w - left; 1258 tmp[i].y1 = clips[i].y - top; 1259 tmp[i].y2 = clips[i].y + clips[i].h - top; 1260 } 1261 1262 for (k = 0; k < num_units; k++) { 1263 struct vmw_display_unit *unit = units[k]; 1264 struct vmw_clip_rect clip; 1265 int num; 1266 1267 clip.x1 = left + destX - unit->crtc.x; 1268 clip.y1 = top + destY - unit->crtc.y; 1269 clip.x2 = right + destX - unit->crtc.x; 1270 clip.y2 = bottom + destY - unit->crtc.y; 1271 1272 /* skip any crtcs that misses the clip region */ 1273 if (clip.x1 >= unit->crtc.mode.hdisplay || 1274 clip.y1 >= unit->crtc.mode.vdisplay || 1275 clip.x2 <= 0 || clip.y2 <= 0) 1276 continue; 1277 1278 /* 1279 * In order for the clip rects to be correctly scaled 1280 * the src and dest rects needs to be the same size. 1281 */ 1282 cmd->body.destRect.left = clip.x1; 1283 cmd->body.destRect.right = clip.x2; 1284 cmd->body.destRect.top = clip.y1; 1285 cmd->body.destRect.bottom = clip.y2; 1286 1287 /* create a clip rect of the crtc in dest coords */ 1288 clip.x2 = unit->crtc.mode.hdisplay - clip.x1; 1289 clip.y2 = unit->crtc.mode.vdisplay - clip.y1; 1290 clip.x1 = 0 - clip.x1; 1291 clip.y1 = 0 - clip.y1; 1292 1293 /* need to reset sid as it is changed by execbuf */ 1294 cmd->body.srcImage.sid = sid; 1295 cmd->body.destScreenId = unit->unit; 1296 1297 /* clip and write blits to cmd stream */ 1298 vmw_clip_cliprects(tmp, num_clips, clip, blits, &num); 1299 1300 /* if no cliprects hit skip this */ 1301 if (num == 0) 1302 continue; 1303 1304 /* recalculate package length */ 1305 fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num; 1306 cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header)); 1307 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, 1308 fifo_size, 0, NULL, NULL); 1309 1310 if (unlikely(ret != 0)) 1311 break; 1312 } 1313 1314 kfree(cmd); 1315 out_free_tmp: 1316 kfree(tmp); 1317 1318 return ret; 1319 } 1320 1321 int vmw_kms_readback(struct vmw_private *dev_priv, 1322 struct drm_file *file_priv, 1323 struct vmw_framebuffer *vfb, 1324 struct drm_vmw_fence_rep __user *user_fence_rep, 1325 struct drm_vmw_rect *clips, 1326 uint32_t num_clips) 1327 { 1328 struct vmw_framebuffer_dmabuf *vfbd = 1329 vmw_framebuffer_to_vfbd(&vfb->base); 1330 struct vmw_dma_buffer *dmabuf = vfbd->buffer; 1331 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 1332 struct drm_crtc *crtc; 1333 size_t fifo_size; 1334 int i, k, ret, num_units, blits_pos; 1335 1336 struct { 1337 uint32_t header; 1338 SVGAFifoCmdDefineGMRFB body; 1339 } *cmd; 1340 struct { 1341 uint32_t header; 1342 SVGAFifoCmdBlitScreenToGMRFB body; 1343 } *blits; 1344 1345 num_units = 0; 1346 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { 1347 if (crtc->fb != &vfb->base) 1348 continue; 1349 units[num_units++] = vmw_crtc_to_du(crtc); 1350 } 1351 1352 BUG_ON(dmabuf == NULL); 1353 BUG_ON(!clips || !num_clips); 1354 1355 /* take a safe guess at fifo size */ 1356 fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units; 1357 cmd = kmalloc(fifo_size, GFP_KERNEL); 1358 if (unlikely(cmd == NULL)) { 1359 DRM_ERROR("Failed to allocate temporary fifo memory.\n"); 1360 return -ENOMEM; 1361 } 1362 1363 memset(cmd, 0, fifo_size); 1364 cmd->header = SVGA_CMD_DEFINE_GMRFB; 1365 cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel; 1366 cmd->body.format.colorDepth = vfb->base.depth; 1367 cmd->body.format.reserved = 0; 1368 cmd->body.bytesPerLine = vfb->base.pitches[0]; 1369 cmd->body.ptr.gmrId = vfb->user_handle; 1370 cmd->body.ptr.offset = 0; 1371 1372 blits = (void *)&cmd[1]; 1373 blits_pos = 0; 1374 for (i = 0; i < num_units; i++) { 1375 struct drm_vmw_rect *c = clips; 1376 for (k = 0; k < num_clips; k++, c++) { 1377 /* transform clip coords to crtc origin based coords */ 1378 int clip_x1 = c->x - units[i]->crtc.x; 1379 int clip_x2 = c->x - units[i]->crtc.x + c->w; 1380 int clip_y1 = c->y - units[i]->crtc.y; 1381 int clip_y2 = c->y - units[i]->crtc.y + c->h; 1382 int dest_x = c->x; 1383 int dest_y = c->y; 1384 1385 /* compensate for clipping, we negate 1386 * a negative number and add that. 1387 */ 1388 if (clip_x1 < 0) 1389 dest_x += -clip_x1; 1390 if (clip_y1 < 0) 1391 dest_y += -clip_y1; 1392 1393 /* clip */ 1394 clip_x1 = max(clip_x1, 0); 1395 clip_y1 = max(clip_y1, 0); 1396 clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay); 1397 clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay); 1398 1399 /* and cull any rects that misses the crtc */ 1400 if (clip_x1 >= units[i]->crtc.mode.hdisplay || 1401 clip_y1 >= units[i]->crtc.mode.vdisplay || 1402 clip_x2 <= 0 || clip_y2 <= 0) 1403 continue; 1404 1405 blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB; 1406 blits[blits_pos].body.srcScreenId = units[i]->unit; 1407 blits[blits_pos].body.destOrigin.x = dest_x; 1408 blits[blits_pos].body.destOrigin.y = dest_y; 1409 1410 blits[blits_pos].body.srcRect.left = clip_x1; 1411 blits[blits_pos].body.srcRect.top = clip_y1; 1412 blits[blits_pos].body.srcRect.right = clip_x2; 1413 blits[blits_pos].body.srcRect.bottom = clip_y2; 1414 blits_pos++; 1415 } 1416 } 1417 /* reset size here and use calculated exact size from loops */ 1418 fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos; 1419 1420 ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size, 1421 0, user_fence_rep, NULL); 1422 1423 kfree(cmd); 1424 1425 return ret; 1426 } 1427 1428 int vmw_kms_init(struct vmw_private *dev_priv) 1429 { 1430 struct drm_device *dev = dev_priv->dev; 1431 int ret; 1432 1433 drm_mode_config_init(dev); 1434 dev->mode_config.funcs = &vmw_kms_funcs; 1435 dev->mode_config.min_width = 1; 1436 dev->mode_config.min_height = 1; 1437 /* assumed largest fb size */ 1438 dev->mode_config.max_width = 8192; 1439 dev->mode_config.max_height = 8192; 1440 1441 ret = vmw_kms_init_screen_object_display(dev_priv); 1442 if (ret) /* Fallback */ 1443 (void)vmw_kms_init_legacy_display_system(dev_priv); 1444 1445 return 0; 1446 } 1447 1448 int vmw_kms_close(struct vmw_private *dev_priv) 1449 { 1450 /* 1451 * Docs says we should take the lock before calling this function 1452 * but since it destroys encoders and our destructor calls 1453 * drm_encoder_cleanup which takes the lock we deadlock. 1454 */ 1455 drm_mode_config_cleanup(dev_priv->dev); 1456 if (dev_priv->sou_priv) 1457 vmw_kms_close_screen_object_display(dev_priv); 1458 else 1459 vmw_kms_close_legacy_display_system(dev_priv); 1460 return 0; 1461 } 1462 1463 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 1464 struct drm_file *file_priv) 1465 { 1466 struct drm_vmw_cursor_bypass_arg *arg = data; 1467 struct vmw_display_unit *du; 1468 struct drm_mode_object *obj; 1469 struct drm_crtc *crtc; 1470 int ret = 0; 1471 1472 1473 mutex_lock(&dev->mode_config.mutex); 1474 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { 1475 1476 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1477 du = vmw_crtc_to_du(crtc); 1478 du->hotspot_x = arg->xhot; 1479 du->hotspot_y = arg->yhot; 1480 } 1481 1482 mutex_unlock(&dev->mode_config.mutex); 1483 return 0; 1484 } 1485 1486 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC); 1487 if (!obj) { 1488 ret = -EINVAL; 1489 goto out; 1490 } 1491 1492 crtc = obj_to_crtc(obj); 1493 du = vmw_crtc_to_du(crtc); 1494 1495 du->hotspot_x = arg->xhot; 1496 du->hotspot_y = arg->yhot; 1497 1498 out: 1499 mutex_unlock(&dev->mode_config.mutex); 1500 1501 return ret; 1502 } 1503 1504 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 1505 unsigned width, unsigned height, unsigned pitch, 1506 unsigned bpp, unsigned depth) 1507 { 1508 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1509 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); 1510 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1511 iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); 1512 vmw_write(vmw_priv, SVGA_REG_WIDTH, width); 1513 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); 1514 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp); 1515 1516 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) { 1517 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n", 1518 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH)); 1519 return -EINVAL; 1520 } 1521 1522 return 0; 1523 } 1524 1525 int vmw_kms_save_vga(struct vmw_private *vmw_priv) 1526 { 1527 struct vmw_vga_topology_state *save; 1528 uint32_t i; 1529 1530 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); 1531 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); 1532 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); 1533 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1534 vmw_priv->vga_pitchlock = 1535 vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); 1536 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1537 vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt + 1538 SVGA_FIFO_PITCHLOCK); 1539 1540 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1541 return 0; 1542 1543 vmw_priv->num_displays = vmw_read(vmw_priv, 1544 SVGA_REG_NUM_GUEST_DISPLAYS); 1545 1546 if (vmw_priv->num_displays == 0) 1547 vmw_priv->num_displays = 1; 1548 1549 for (i = 0; i < vmw_priv->num_displays; ++i) { 1550 save = &vmw_priv->vga_save[i]; 1551 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); 1552 save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY); 1553 save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X); 1554 save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y); 1555 save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); 1556 save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); 1557 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 1558 if (i == 0 && vmw_priv->num_displays == 1 && 1559 save->width == 0 && save->height == 0) { 1560 1561 /* 1562 * It should be fairly safe to assume that these 1563 * values are uninitialized. 1564 */ 1565 1566 save->width = vmw_priv->vga_width - save->pos_x; 1567 save->height = vmw_priv->vga_height - save->pos_y; 1568 } 1569 } 1570 1571 return 0; 1572 } 1573 1574 int vmw_kms_restore_vga(struct vmw_private *vmw_priv) 1575 { 1576 struct vmw_vga_topology_state *save; 1577 uint32_t i; 1578 1579 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); 1580 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); 1581 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); 1582 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 1583 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, 1584 vmw_priv->vga_pitchlock); 1585 else if (vmw_fifo_have_pitchlock(vmw_priv)) 1586 iowrite32(vmw_priv->vga_pitchlock, 1587 vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); 1588 1589 if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) 1590 return 0; 1591 1592 for (i = 0; i < vmw_priv->num_displays; ++i) { 1593 save = &vmw_priv->vga_save[i]; 1594 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); 1595 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary); 1596 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x); 1597 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y); 1598 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width); 1599 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height); 1600 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 1601 } 1602 1603 return 0; 1604 } 1605 1606 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 1607 uint32_t pitch, 1608 uint32_t height) 1609 { 1610 return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; 1611 } 1612 1613 1614 /** 1615 * Function called by DRM code called with vbl_lock held. 1616 */ 1617 u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) 1618 { 1619 return 0; 1620 } 1621 1622 /** 1623 * Function called by DRM code called with vbl_lock held. 1624 */ 1625 int vmw_enable_vblank(struct drm_device *dev, int crtc) 1626 { 1627 return -ENOSYS; 1628 } 1629 1630 /** 1631 * Function called by DRM code called with vbl_lock held. 1632 */ 1633 void vmw_disable_vblank(struct drm_device *dev, int crtc) 1634 { 1635 } 1636 1637 1638 /* 1639 * Small shared kms functions. 1640 */ 1641 1642 int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, 1643 struct drm_vmw_rect *rects) 1644 { 1645 struct drm_device *dev = dev_priv->dev; 1646 struct vmw_display_unit *du; 1647 struct drm_connector *con; 1648 1649 mutex_lock(&dev->mode_config.mutex); 1650 1651 #if 0 1652 { 1653 unsigned int i; 1654 1655 DRM_INFO("%s: new layout ", __func__); 1656 for (i = 0; i < num; i++) 1657 DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y, 1658 rects[i].w, rects[i].h); 1659 DRM_INFO("\n"); 1660 } 1661 #endif 1662 1663 list_for_each_entry(con, &dev->mode_config.connector_list, head) { 1664 du = vmw_connector_to_du(con); 1665 if (num > du->unit) { 1666 du->pref_width = rects[du->unit].w; 1667 du->pref_height = rects[du->unit].h; 1668 du->pref_active = true; 1669 du->gui_x = rects[du->unit].x; 1670 du->gui_y = rects[du->unit].y; 1671 } else { 1672 du->pref_width = 800; 1673 du->pref_height = 600; 1674 du->pref_active = false; 1675 } 1676 con->status = vmw_du_connector_detect(con, true); 1677 } 1678 1679 mutex_unlock(&dev->mode_config.mutex); 1680 1681 return 0; 1682 } 1683 1684 int vmw_du_page_flip(struct drm_crtc *crtc, 1685 struct drm_framebuffer *fb, 1686 struct drm_pending_vblank_event *event) 1687 { 1688 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 1689 struct drm_framebuffer *old_fb = crtc->fb; 1690 struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb); 1691 struct drm_file *file_priv = event->base.file_priv; 1692 struct vmw_fence_obj *fence = NULL; 1693 struct drm_clip_rect clips; 1694 int ret; 1695 1696 /* require ScreenObject support for page flipping */ 1697 if (!dev_priv->sou_priv) 1698 return -ENOSYS; 1699 1700 if (!vmw_kms_screen_object_flippable(dev_priv, crtc)) 1701 return -EINVAL; 1702 1703 crtc->fb = fb; 1704 1705 /* do a full screen dirty update */ 1706 clips.x1 = clips.y1 = 0; 1707 clips.x2 = fb->width; 1708 clips.y2 = fb->height; 1709 1710 if (vfb->dmabuf) 1711 ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb, 1712 0, 0, &clips, 1, 1, &fence); 1713 else 1714 ret = do_surface_dirty_sou(dev_priv, file_priv, vfb, 1715 0, 0, &clips, 1, 1, &fence); 1716 1717 1718 if (ret != 0) 1719 goto out_no_fence; 1720 if (!fence) { 1721 ret = -EINVAL; 1722 goto out_no_fence; 1723 } 1724 1725 ret = vmw_event_fence_action_queue(file_priv, fence, 1726 &event->base, 1727 &event->event.tv_sec, 1728 &event->event.tv_usec, 1729 true); 1730 1731 /* 1732 * No need to hold on to this now. The only cleanup 1733 * we need to do if we fail is unref the fence. 1734 */ 1735 vmw_fence_obj_unreference(&fence); 1736 1737 if (vmw_crtc_to_du(crtc)->is_implicit) 1738 vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc); 1739 1740 return ret; 1741 1742 out_no_fence: 1743 crtc->fb = old_fb; 1744 return ret; 1745 } 1746 1747 1748 void vmw_du_crtc_save(struct drm_crtc *crtc) 1749 { 1750 } 1751 1752 void vmw_du_crtc_restore(struct drm_crtc *crtc) 1753 { 1754 } 1755 1756 void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 1757 u16 *r, u16 *g, u16 *b, 1758 uint32_t start, uint32_t size) 1759 { 1760 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 1761 int i; 1762 1763 for (i = 0; i < size; i++) { 1764 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i, 1765 r[i], g[i], b[i]); 1766 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8); 1767 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); 1768 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); 1769 } 1770 } 1771 1772 void vmw_du_connector_dpms(struct drm_connector *connector, int mode) 1773 { 1774 } 1775 1776 void vmw_du_connector_save(struct drm_connector *connector) 1777 { 1778 } 1779 1780 void vmw_du_connector_restore(struct drm_connector *connector) 1781 { 1782 } 1783 1784 enum drm_connector_status 1785 vmw_du_connector_detect(struct drm_connector *connector, bool force) 1786 { 1787 uint32_t num_displays; 1788 struct drm_device *dev = connector->dev; 1789 struct vmw_private *dev_priv = vmw_priv(dev); 1790 struct vmw_display_unit *du = vmw_connector_to_du(connector); 1791 1792 mutex_lock(&dev_priv->hw_mutex); 1793 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); 1794 mutex_unlock(&dev_priv->hw_mutex); 1795 1796 return ((vmw_connector_to_du(connector)->unit < num_displays && 1797 du->pref_active) ? 1798 connector_status_connected : connector_status_disconnected); 1799 } 1800 1801 static struct drm_display_mode vmw_kms_connector_builtin[] = { 1802 /* 640x480@60Hz */ 1803 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 1804 752, 800, 0, 480, 489, 492, 525, 0, 1805 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 1806 /* 800x600@60Hz */ 1807 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 1808 968, 1056, 0, 600, 601, 605, 628, 0, 1809 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1810 /* 1024x768@60Hz */ 1811 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 1812 1184, 1344, 0, 768, 771, 777, 806, 0, 1813 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 1814 /* 1152x864@75Hz */ 1815 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 1816 1344, 1600, 0, 864, 865, 868, 900, 0, 1817 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1818 /* 1280x768@60Hz */ 1819 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 1820 1472, 1664, 0, 768, 771, 778, 798, 0, 1821 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1822 /* 1280x800@60Hz */ 1823 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, 1824 1480, 1680, 0, 800, 803, 809, 831, 0, 1825 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, 1826 /* 1280x960@60Hz */ 1827 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, 1828 1488, 1800, 0, 960, 961, 964, 1000, 0, 1829 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1830 /* 1280x1024@60Hz */ 1831 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, 1832 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, 1833 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1834 /* 1360x768@60Hz */ 1835 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, 1836 1536, 1792, 0, 768, 771, 777, 795, 0, 1837 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1838 /* 1440x1050@60Hz */ 1839 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, 1840 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, 1841 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1842 /* 1440x900@60Hz */ 1843 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, 1844 1672, 1904, 0, 900, 903, 909, 934, 0, 1845 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1846 /* 1600x1200@60Hz */ 1847 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, 1848 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, 1849 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1850 /* 1680x1050@60Hz */ 1851 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, 1852 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, 1853 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1854 /* 1792x1344@60Hz */ 1855 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, 1856 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, 1857 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1858 /* 1853x1392@60Hz */ 1859 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 1860 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, 1861 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1862 /* 1920x1200@60Hz */ 1863 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 1864 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, 1865 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1866 /* 1920x1440@60Hz */ 1867 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 1868 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, 1869 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1870 /* 2560x1600@60Hz */ 1871 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 1872 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, 1873 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 1874 /* Terminate */ 1875 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, 1876 }; 1877 1878 /** 1879 * vmw_guess_mode_timing - Provide fake timings for a 1880 * 60Hz vrefresh mode. 1881 * 1882 * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay 1883 * members filled in. 1884 */ 1885 static void vmw_guess_mode_timing(struct drm_display_mode *mode) 1886 { 1887 mode->hsync_start = mode->hdisplay + 50; 1888 mode->hsync_end = mode->hsync_start + 50; 1889 mode->htotal = mode->hsync_end + 50; 1890 1891 mode->vsync_start = mode->vdisplay + 50; 1892 mode->vsync_end = mode->vsync_start + 50; 1893 mode->vtotal = mode->vsync_end + 50; 1894 1895 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6; 1896 mode->vrefresh = drm_mode_vrefresh(mode); 1897 } 1898 1899 1900 int vmw_du_connector_fill_modes(struct drm_connector *connector, 1901 uint32_t max_width, uint32_t max_height) 1902 { 1903 struct vmw_display_unit *du = vmw_connector_to_du(connector); 1904 struct drm_device *dev = connector->dev; 1905 struct vmw_private *dev_priv = vmw_priv(dev); 1906 struct drm_display_mode *mode = NULL; 1907 struct drm_display_mode *bmode; 1908 struct drm_display_mode prefmode = { DRM_MODE("preferred", 1909 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 1910 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1911 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) 1912 }; 1913 int i; 1914 1915 /* Add preferred mode */ 1916 { 1917 mode = drm_mode_duplicate(dev, &prefmode); 1918 if (!mode) 1919 return 0; 1920 mode->hdisplay = du->pref_width; 1921 mode->vdisplay = du->pref_height; 1922 vmw_guess_mode_timing(mode); 1923 1924 if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, 1925 mode->vdisplay)) { 1926 drm_mode_probed_add(connector, mode); 1927 } else { 1928 drm_mode_destroy(dev, mode); 1929 mode = NULL; 1930 } 1931 1932 if (du->pref_mode) { 1933 list_del_init(&du->pref_mode->head); 1934 drm_mode_destroy(dev, du->pref_mode); 1935 } 1936 1937 /* mode might be null here, this is intended */ 1938 du->pref_mode = mode; 1939 } 1940 1941 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { 1942 bmode = &vmw_kms_connector_builtin[i]; 1943 if (bmode->hdisplay > max_width || 1944 bmode->vdisplay > max_height) 1945 continue; 1946 1947 if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2, 1948 bmode->vdisplay)) 1949 continue; 1950 1951 mode = drm_mode_duplicate(dev, bmode); 1952 if (!mode) 1953 return 0; 1954 mode->vrefresh = drm_mode_vrefresh(mode); 1955 1956 drm_mode_probed_add(connector, mode); 1957 } 1958 1959 /* Move the prefered mode first, help apps pick the right mode. */ 1960 if (du->pref_mode) 1961 list_move(&du->pref_mode->head, &connector->probed_modes); 1962 1963 drm_mode_connector_list_update(connector); 1964 1965 return 1; 1966 } 1967 1968 int vmw_du_connector_set_property(struct drm_connector *connector, 1969 struct drm_property *property, 1970 uint64_t val) 1971 { 1972 return 0; 1973 } 1974 1975 1976 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 1977 struct drm_file *file_priv) 1978 { 1979 struct vmw_private *dev_priv = vmw_priv(dev); 1980 struct drm_vmw_update_layout_arg *arg = 1981 (struct drm_vmw_update_layout_arg *)data; 1982 struct vmw_master *vmaster = vmw_master(file_priv->master); 1983 void __user *user_rects; 1984 struct drm_vmw_rect *rects; 1985 unsigned rects_size; 1986 int ret; 1987 int i; 1988 struct drm_mode_config *mode_config = &dev->mode_config; 1989 1990 ret = ttm_read_lock(&vmaster->lock, true); 1991 if (unlikely(ret != 0)) 1992 return ret; 1993 1994 if (!arg->num_outputs) { 1995 struct drm_vmw_rect def_rect = {0, 0, 800, 600}; 1996 vmw_du_update_layout(dev_priv, 1, &def_rect); 1997 goto out_unlock; 1998 } 1999 2000 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 2001 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), 2002 GFP_KERNEL); 2003 if (unlikely(!rects)) { 2004 ret = -ENOMEM; 2005 goto out_unlock; 2006 } 2007 2008 user_rects = (void __user *)(unsigned long)arg->rects; 2009 ret = copy_from_user(rects, user_rects, rects_size); 2010 if (unlikely(ret != 0)) { 2011 DRM_ERROR("Failed to get rects.\n"); 2012 ret = -EFAULT; 2013 goto out_free; 2014 } 2015 2016 for (i = 0; i < arg->num_outputs; ++i) { 2017 if (rects[i].x < 0 || 2018 rects[i].y < 0 || 2019 rects[i].x + rects[i].w > mode_config->max_width || 2020 rects[i].y + rects[i].h > mode_config->max_height) { 2021 DRM_ERROR("Invalid GUI layout.\n"); 2022 ret = -EINVAL; 2023 goto out_free; 2024 } 2025 } 2026 2027 vmw_du_update_layout(dev_priv, arg->num_outputs, rects); 2028 2029 out_free: 2030 kfree(rects); 2031 out_unlock: 2032 ttm_read_unlock(&vmaster->lock); 2033 return ret; 2034 } 2035