1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 #include "vmwgfx_kms.h" 28 29 #include "vmwgfx_bo.h" 30 #include "vmw_surface_cache.h" 31 32 #include <drm/drm_atomic.h> 33 #include <drm/drm_atomic_helper.h> 34 #include <drm/drm_damage_helper.h> 35 #include <drm/drm_fourcc.h> 36 #include <drm/drm_rect.h> 37 #include <drm/drm_sysfs.h> 38 39 void vmw_du_cleanup(struct vmw_display_unit *du) 40 { 41 struct vmw_private *dev_priv = vmw_priv(du->primary.dev); 42 drm_plane_cleanup(&du->primary); 43 if (vmw_cmd_supported(dev_priv)) 44 drm_plane_cleanup(&du->cursor.base); 45 46 drm_connector_unregister(&du->connector); 47 drm_crtc_cleanup(&du->crtc); 48 drm_encoder_cleanup(&du->encoder); 49 drm_connector_cleanup(&du->connector); 50 } 51 52 /* 53 * Display Unit Cursor functions 54 */ 55 56 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps); 57 static void vmw_cursor_update_mob(struct vmw_private *dev_priv, 58 struct vmw_plane_state *vps, 59 u32 *image, u32 width, u32 height, 60 u32 hotspotX, u32 hotspotY); 61 62 struct vmw_svga_fifo_cmd_define_cursor { 63 u32 cmd; 64 SVGAFifoCmdDefineAlphaCursor cursor; 65 }; 66 67 /** 68 * vmw_send_define_cursor_cmd - queue a define cursor command 69 * @dev_priv: the private driver struct 70 * @image: buffer which holds the cursor image 71 * @width: width of the mouse cursor image 72 * @height: height of the mouse cursor image 73 * @hotspotX: the horizontal position of mouse hotspot 74 * @hotspotY: the vertical position of mouse hotspot 75 */ 76 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv, 77 u32 *image, u32 width, u32 height, 78 u32 hotspotX, u32 hotspotY) 79 { 80 struct vmw_svga_fifo_cmd_define_cursor *cmd; 81 const u32 image_size = width * height * sizeof(*image); 82 const u32 cmd_size = sizeof(*cmd) + image_size; 83 84 /* Try to reserve fifocmd space and swallow any failures; 85 such reservations cannot be left unconsumed for long 86 under the risk of clogging other fifocmd users, so 87 we treat reservations separtely from the way we treat 88 other fallible KMS-atomic resources at prepare_fb */ 89 cmd = VMW_CMD_RESERVE(dev_priv, cmd_size); 90 91 if (unlikely(!cmd)) 92 return; 93 94 memset(cmd, 0, sizeof(*cmd)); 95 96 memcpy(&cmd[1], image, image_size); 97 98 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR; 99 cmd->cursor.id = 0; 100 cmd->cursor.width = width; 101 cmd->cursor.height = height; 102 cmd->cursor.hotspotX = hotspotX; 103 cmd->cursor.hotspotY = hotspotY; 104 105 vmw_cmd_commit_flush(dev_priv, cmd_size); 106 } 107 108 /** 109 * vmw_cursor_update_image - update the cursor image on the provided plane 110 * @dev_priv: the private driver struct 111 * @vps: the plane state of the cursor plane 112 * @image: buffer which holds the cursor image 113 * @width: width of the mouse cursor image 114 * @height: height of the mouse cursor image 115 * @hotspotX: the horizontal position of mouse hotspot 116 * @hotspotY: the vertical position of mouse hotspot 117 */ 118 static void vmw_cursor_update_image(struct vmw_private *dev_priv, 119 struct vmw_plane_state *vps, 120 u32 *image, u32 width, u32 height, 121 u32 hotspotX, u32 hotspotY) 122 { 123 if (vps->cursor.bo) 124 vmw_cursor_update_mob(dev_priv, vps, image, 125 vps->base.crtc_w, vps->base.crtc_h, 126 hotspotX, hotspotY); 127 128 else 129 vmw_send_define_cursor_cmd(dev_priv, image, width, height, 130 hotspotX, hotspotY); 131 } 132 133 134 /** 135 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism 136 * 137 * Called from inside vmw_du_cursor_plane_atomic_update to actually 138 * make the cursor-image live. 139 * 140 * @dev_priv: device to work with 141 * @vps: the plane state of the cursor plane 142 * @image: cursor source data to fill the MOB with 143 * @width: source data width 144 * @height: source data height 145 * @hotspotX: cursor hotspot x 146 * @hotspotY: cursor hotspot Y 147 */ 148 static void vmw_cursor_update_mob(struct vmw_private *dev_priv, 149 struct vmw_plane_state *vps, 150 u32 *image, u32 width, u32 height, 151 u32 hotspotX, u32 hotspotY) 152 { 153 SVGAGBCursorHeader *header; 154 SVGAGBAlphaCursorHeader *alpha_header; 155 const u32 image_size = width * height * sizeof(*image); 156 157 header = vmw_bo_map_and_cache(vps->cursor.bo); 158 alpha_header = &header->header.alphaHeader; 159 160 memset(header, 0, sizeof(*header)); 161 162 header->type = SVGA_ALPHA_CURSOR; 163 header->sizeInBytes = image_size; 164 165 alpha_header->hotspotX = hotspotX; 166 alpha_header->hotspotY = hotspotY; 167 alpha_header->width = width; 168 alpha_header->height = height; 169 170 memcpy(header + 1, image, image_size); 171 vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID, 172 vps->cursor.bo->tbo.resource->start); 173 } 174 175 176 static u32 vmw_du_cursor_mob_size(u32 w, u32 h) 177 { 178 return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader); 179 } 180 181 /** 182 * vmw_du_cursor_plane_acquire_image -- Acquire the image data 183 * @vps: cursor plane state 184 */ 185 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps) 186 { 187 bool is_iomem; 188 if (vps->surf) { 189 if (vps->surf_mapped) 190 return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo); 191 return vps->surf->snooper.image; 192 } else if (vps->bo) 193 return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem); 194 return NULL; 195 } 196 197 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps, 198 struct vmw_plane_state *new_vps) 199 { 200 void *old_image; 201 void *new_image; 202 u32 size; 203 bool changed; 204 205 if (old_vps->base.crtc_w != new_vps->base.crtc_w || 206 old_vps->base.crtc_h != new_vps->base.crtc_h) 207 return true; 208 209 if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x || 210 old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y) 211 return true; 212 213 size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32); 214 215 old_image = vmw_du_cursor_plane_acquire_image(old_vps); 216 new_image = vmw_du_cursor_plane_acquire_image(new_vps); 217 218 changed = false; 219 if (old_image && new_image) 220 changed = memcmp(old_image, new_image, size) != 0; 221 222 return changed; 223 } 224 225 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo) 226 { 227 if (!(*vbo)) 228 return; 229 230 ttm_bo_unpin(&(*vbo)->tbo); 231 vmw_bo_unreference(vbo); 232 } 233 234 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp, 235 struct vmw_plane_state *vps) 236 { 237 u32 i; 238 239 if (!vps->cursor.bo) 240 return; 241 242 vmw_du_cursor_plane_unmap_cm(vps); 243 244 /* Look for a free slot to return this mob to the cache. */ 245 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { 246 if (!vcp->cursor_mobs[i]) { 247 vcp->cursor_mobs[i] = vps->cursor.bo; 248 vps->cursor.bo = NULL; 249 return; 250 } 251 } 252 253 /* Cache is full: See if this mob is bigger than an existing mob. */ 254 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { 255 if (vcp->cursor_mobs[i]->tbo.base.size < 256 vps->cursor.bo->tbo.base.size) { 257 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]); 258 vcp->cursor_mobs[i] = vps->cursor.bo; 259 vps->cursor.bo = NULL; 260 return; 261 } 262 } 263 264 /* Destroy it if it's not worth caching. */ 265 vmw_du_destroy_cursor_mob(&vps->cursor.bo); 266 } 267 268 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp, 269 struct vmw_plane_state *vps) 270 { 271 struct vmw_private *dev_priv = vcp->base.dev->dev_private; 272 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h); 273 u32 i; 274 u32 cursor_max_dim, mob_max_size; 275 int ret; 276 277 if (!dev_priv->has_mob || 278 (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0) 279 return -EINVAL; 280 281 mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); 282 cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION); 283 284 if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim || 285 vps->base.crtc_h > cursor_max_dim) 286 return -EINVAL; 287 288 if (vps->cursor.bo) { 289 if (vps->cursor.bo->tbo.base.size >= size) 290 return 0; 291 vmw_du_put_cursor_mob(vcp, vps); 292 } 293 294 /* Look for an unused mob in the cache. */ 295 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { 296 if (vcp->cursor_mobs[i] && 297 vcp->cursor_mobs[i]->tbo.base.size >= size) { 298 vps->cursor.bo = vcp->cursor_mobs[i]; 299 vcp->cursor_mobs[i] = NULL; 300 return 0; 301 } 302 } 303 /* Create a new mob if we can't find an existing one. */ 304 ret = vmw_bo_create_and_populate(dev_priv, size, 305 VMW_BO_DOMAIN_MOB, 306 &vps->cursor.bo); 307 308 if (ret != 0) 309 return ret; 310 311 /* Fence the mob creation so we are guarateed to have the mob */ 312 ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL); 313 if (ret != 0) 314 goto teardown; 315 316 vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL); 317 ttm_bo_unreserve(&vps->cursor.bo->tbo); 318 return 0; 319 320 teardown: 321 vmw_du_destroy_cursor_mob(&vps->cursor.bo); 322 return ret; 323 } 324 325 326 static void vmw_cursor_update_position(struct vmw_private *dev_priv, 327 bool show, int x, int y) 328 { 329 const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW 330 : SVGA_CURSOR_ON_HIDE; 331 uint32_t count; 332 333 spin_lock(&dev_priv->cursor_lock); 334 if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) { 335 vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x); 336 vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y); 337 vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID); 338 vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on); 339 vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1); 340 } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) { 341 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on); 342 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x); 343 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y); 344 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT); 345 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count); 346 } else { 347 vmw_write(dev_priv, SVGA_REG_CURSOR_X, x); 348 vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y); 349 vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on); 350 } 351 spin_unlock(&dev_priv->cursor_lock); 352 } 353 354 void vmw_kms_cursor_snoop(struct vmw_surface *srf, 355 struct ttm_object_file *tfile, 356 struct ttm_buffer_object *bo, 357 SVGA3dCmdHeader *header) 358 { 359 struct ttm_bo_kmap_obj map; 360 unsigned long kmap_offset; 361 unsigned long kmap_num; 362 SVGA3dCopyBox *box; 363 unsigned box_count; 364 void *virtual; 365 bool is_iomem; 366 struct vmw_dma_cmd { 367 SVGA3dCmdHeader header; 368 SVGA3dCmdSurfaceDMA dma; 369 } *cmd; 370 int i, ret; 371 const struct SVGA3dSurfaceDesc *desc = 372 vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT); 373 const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock; 374 375 cmd = container_of(header, struct vmw_dma_cmd, header); 376 377 /* No snooper installed, nothing to copy */ 378 if (!srf->snooper.image) 379 return; 380 381 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { 382 DRM_ERROR("face and mipmap for cursors should never != 0\n"); 383 return; 384 } 385 386 if (cmd->header.size < 64) { 387 DRM_ERROR("at least one full copy box must be given\n"); 388 return; 389 } 390 391 box = (SVGA3dCopyBox *)&cmd[1]; 392 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / 393 sizeof(SVGA3dCopyBox); 394 395 if (cmd->dma.guest.ptr.offset % PAGE_SIZE || 396 box->x != 0 || box->y != 0 || box->z != 0 || 397 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || 398 box->d != 1 || box_count != 1 || 399 box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) { 400 /* TODO handle none page aligned offsets */ 401 /* TODO handle more dst & src != 0 */ 402 /* TODO handle more then one copy */ 403 DRM_ERROR("Can't snoop dma request for cursor!\n"); 404 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", 405 box->srcx, box->srcy, box->srcz, 406 box->x, box->y, box->z, 407 box->w, box->h, box->d, box_count, 408 cmd->dma.guest.ptr.offset); 409 return; 410 } 411 412 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; 413 kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT; 414 415 ret = ttm_bo_reserve(bo, true, false, NULL); 416 if (unlikely(ret != 0)) { 417 DRM_ERROR("reserve failed\n"); 418 return; 419 } 420 421 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 422 if (unlikely(ret != 0)) 423 goto err_unreserve; 424 425 virtual = ttm_kmap_obj_virtual(&map, &is_iomem); 426 427 if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) { 428 memcpy(srf->snooper.image, virtual, 429 VMW_CURSOR_SNOOP_HEIGHT*image_pitch); 430 } else { 431 /* Image is unsigned pointer. */ 432 for (i = 0; i < box->h; i++) 433 memcpy(srf->snooper.image + i * image_pitch, 434 virtual + i * cmd->dma.guest.pitch, 435 box->w * desc->pitchBytesPerBlock); 436 } 437 438 srf->snooper.age++; 439 440 ttm_bo_kunmap(&map); 441 err_unreserve: 442 ttm_bo_unreserve(bo); 443 } 444 445 /** 446 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots 447 * 448 * @dev_priv: Pointer to the device private struct. 449 * 450 * Clears all legacy hotspots. 451 */ 452 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv) 453 { 454 struct drm_device *dev = &dev_priv->drm; 455 struct vmw_display_unit *du; 456 struct drm_crtc *crtc; 457 458 drm_modeset_lock_all(dev); 459 drm_for_each_crtc(crtc, dev) { 460 du = vmw_crtc_to_du(crtc); 461 462 du->hotspot_x = 0; 463 du->hotspot_y = 0; 464 } 465 drm_modeset_unlock_all(dev); 466 } 467 468 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) 469 { 470 struct drm_device *dev = &dev_priv->drm; 471 struct vmw_display_unit *du; 472 struct drm_crtc *crtc; 473 474 mutex_lock(&dev->mode_config.mutex); 475 476 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 477 du = vmw_crtc_to_du(crtc); 478 if (!du->cursor_surface || 479 du->cursor_age == du->cursor_surface->snooper.age || 480 !du->cursor_surface->snooper.image) 481 continue; 482 483 du->cursor_age = du->cursor_surface->snooper.age; 484 vmw_send_define_cursor_cmd(dev_priv, 485 du->cursor_surface->snooper.image, 486 VMW_CURSOR_SNOOP_WIDTH, 487 VMW_CURSOR_SNOOP_HEIGHT, 488 du->hotspot_x + du->core_hotspot_x, 489 du->hotspot_y + du->core_hotspot_y); 490 } 491 492 mutex_unlock(&dev->mode_config.mutex); 493 } 494 495 496 void vmw_du_cursor_plane_destroy(struct drm_plane *plane) 497 { 498 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); 499 u32 i; 500 501 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); 502 503 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) 504 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]); 505 506 drm_plane_cleanup(plane); 507 } 508 509 510 void vmw_du_primary_plane_destroy(struct drm_plane *plane) 511 { 512 drm_plane_cleanup(plane); 513 514 /* Planes are static in our case so we don't free it */ 515 } 516 517 518 /** 519 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface 520 * 521 * @vps: plane state associated with the display surface 522 * @unreference: true if we also want to unreference the display. 523 */ 524 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps, 525 bool unreference) 526 { 527 if (vps->surf) { 528 if (vps->pinned) { 529 vmw_resource_unpin(&vps->surf->res); 530 vps->pinned--; 531 } 532 533 if (unreference) { 534 if (vps->pinned) 535 DRM_ERROR("Surface still pinned\n"); 536 vmw_surface_unreference(&vps->surf); 537 } 538 } 539 } 540 541 542 /** 543 * vmw_du_plane_cleanup_fb - Unpins the plane surface 544 * 545 * @plane: display plane 546 * @old_state: Contains the FB to clean up 547 * 548 * Unpins the framebuffer surface 549 * 550 * Returns 0 on success 551 */ 552 void 553 vmw_du_plane_cleanup_fb(struct drm_plane *plane, 554 struct drm_plane_state *old_state) 555 { 556 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); 557 558 vmw_du_plane_unpin_surf(vps, false); 559 } 560 561 562 /** 563 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs. 564 * 565 * @vps: plane_state 566 * 567 * Returns 0 on success 568 */ 569 570 static int 571 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps) 572 { 573 int ret; 574 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h); 575 struct ttm_buffer_object *bo; 576 577 if (!vps->cursor.bo) 578 return -EINVAL; 579 580 bo = &vps->cursor.bo->tbo; 581 582 if (bo->base.size < size) 583 return -EINVAL; 584 585 if (vps->cursor.bo->map.virtual) 586 return 0; 587 588 ret = ttm_bo_reserve(bo, false, false, NULL); 589 if (unlikely(ret != 0)) 590 return -ENOMEM; 591 592 vmw_bo_map_and_cache(vps->cursor.bo); 593 594 ttm_bo_unreserve(bo); 595 596 if (unlikely(ret != 0)) 597 return -ENOMEM; 598 599 return 0; 600 } 601 602 603 /** 604 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs. 605 * 606 * @vps: state of the cursor plane 607 * 608 * Returns 0 on success 609 */ 610 611 static int 612 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps) 613 { 614 int ret = 0; 615 struct vmw_bo *vbo = vps->cursor.bo; 616 617 if (!vbo || !vbo->map.virtual) 618 return 0; 619 620 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL); 621 if (likely(ret == 0)) { 622 vmw_bo_unmap(vbo); 623 ttm_bo_unreserve(&vbo->tbo); 624 } 625 626 return ret; 627 } 628 629 630 /** 631 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface 632 * 633 * @plane: cursor plane 634 * @old_state: contains the state to clean up 635 * 636 * Unmaps all cursor bo mappings and unpins the cursor surface 637 * 638 * Returns 0 on success 639 */ 640 void 641 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane, 642 struct drm_plane_state *old_state) 643 { 644 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); 645 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); 646 bool is_iomem; 647 648 if (vps->surf_mapped) { 649 vmw_bo_unmap(vps->surf->res.guest_memory_bo); 650 vps->surf_mapped = false; 651 } 652 653 if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) { 654 const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL); 655 656 if (likely(ret == 0)) { 657 ttm_bo_kunmap(&vps->bo->map); 658 ttm_bo_unreserve(&vps->bo->tbo); 659 } 660 } 661 662 vmw_du_cursor_plane_unmap_cm(vps); 663 vmw_du_put_cursor_mob(vcp, vps); 664 665 vmw_du_plane_unpin_surf(vps, false); 666 667 if (vps->surf) { 668 vmw_surface_unreference(&vps->surf); 669 vps->surf = NULL; 670 } 671 672 if (vps->bo) { 673 vmw_bo_unreference(&vps->bo); 674 vps->bo = NULL; 675 } 676 } 677 678 679 /** 680 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it 681 * 682 * @plane: display plane 683 * @new_state: info on the new plane state, including the FB 684 * 685 * Returns 0 on success 686 */ 687 int 688 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, 689 struct drm_plane_state *new_state) 690 { 691 struct drm_framebuffer *fb = new_state->fb; 692 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); 693 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); 694 int ret = 0; 695 696 if (vps->surf) { 697 vmw_surface_unreference(&vps->surf); 698 vps->surf = NULL; 699 } 700 701 if (vps->bo) { 702 vmw_bo_unreference(&vps->bo); 703 vps->bo = NULL; 704 } 705 706 if (fb) { 707 if (vmw_framebuffer_to_vfb(fb)->bo) { 708 vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer; 709 vmw_bo_reference(vps->bo); 710 } else { 711 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface; 712 vmw_surface_reference(vps->surf); 713 } 714 } 715 716 if (!vps->surf && vps->bo) { 717 const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32); 718 719 /* 720 * Not using vmw_bo_map_and_cache() helper here as we need to 721 * reserve the ttm_buffer_object first which 722 * vmw_bo_map_and_cache() omits. 723 */ 724 ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL); 725 726 if (unlikely(ret != 0)) 727 return -ENOMEM; 728 729 ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map); 730 731 ttm_bo_unreserve(&vps->bo->tbo); 732 733 if (unlikely(ret != 0)) 734 return -ENOMEM; 735 } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) { 736 737 WARN_ON(vps->surf->snooper.image); 738 ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false, 739 NULL); 740 if (unlikely(ret != 0)) 741 return -ENOMEM; 742 vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo); 743 ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo); 744 vps->surf_mapped = true; 745 } 746 747 if (vps->surf || vps->bo) { 748 vmw_du_get_cursor_mob(vcp, vps); 749 vmw_du_cursor_plane_map_cm(vps); 750 } 751 752 return 0; 753 } 754 755 756 void 757 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, 758 struct drm_atomic_state *state) 759 { 760 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 761 plane); 762 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, 763 plane); 764 struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc; 765 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 766 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 767 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); 768 struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state); 769 s32 hotspot_x, hotspot_y; 770 771 hotspot_x = du->hotspot_x; 772 hotspot_y = du->hotspot_y; 773 774 if (new_state->fb) { 775 hotspot_x += new_state->fb->hot_x; 776 hotspot_y += new_state->fb->hot_y; 777 } 778 779 du->cursor_surface = vps->surf; 780 du->cursor_bo = vps->bo; 781 782 if (!vps->surf && !vps->bo) { 783 vmw_cursor_update_position(dev_priv, false, 0, 0); 784 return; 785 } 786 787 vps->cursor.hotspot_x = hotspot_x; 788 vps->cursor.hotspot_y = hotspot_y; 789 790 if (vps->surf) { 791 du->cursor_age = du->cursor_surface->snooper.age; 792 } 793 794 if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) { 795 /* 796 * If it hasn't changed, avoid making the device do extra 797 * work by keeping the old cursor active. 798 */ 799 struct vmw_cursor_plane_state tmp = old_vps->cursor; 800 old_vps->cursor = vps->cursor; 801 vps->cursor = tmp; 802 } else { 803 void *image = vmw_du_cursor_plane_acquire_image(vps); 804 if (image) 805 vmw_cursor_update_image(dev_priv, vps, image, 806 new_state->crtc_w, 807 new_state->crtc_h, 808 hotspot_x, hotspot_y); 809 } 810 811 du->cursor_x = new_state->crtc_x + du->set_gui_x; 812 du->cursor_y = new_state->crtc_y + du->set_gui_y; 813 814 vmw_cursor_update_position(dev_priv, true, 815 du->cursor_x + hotspot_x, 816 du->cursor_y + hotspot_y); 817 818 du->core_hotspot_x = hotspot_x - du->hotspot_x; 819 du->core_hotspot_y = hotspot_y - du->hotspot_y; 820 } 821 822 823 /** 824 * vmw_du_primary_plane_atomic_check - check if the new state is okay 825 * 826 * @plane: display plane 827 * @state: info on the new plane state, including the FB 828 * 829 * Check if the new state is settable given the current state. Other 830 * than what the atomic helper checks, we care about crtc fitting 831 * the FB and maintaining one active framebuffer. 832 * 833 * Returns 0 on success 834 */ 835 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, 836 struct drm_atomic_state *state) 837 { 838 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 839 plane); 840 struct drm_crtc_state *crtc_state = NULL; 841 struct drm_framebuffer *new_fb = new_state->fb; 842 int ret; 843 844 if (new_state->crtc) 845 crtc_state = drm_atomic_get_new_crtc_state(state, 846 new_state->crtc); 847 848 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, 849 DRM_PLANE_NO_SCALING, 850 DRM_PLANE_NO_SCALING, 851 false, true); 852 853 if (!ret && new_fb) { 854 struct drm_crtc *crtc = new_state->crtc; 855 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 856 857 vmw_connector_state_to_vcs(du->connector.state); 858 } 859 860 861 return ret; 862 } 863 864 865 /** 866 * vmw_du_cursor_plane_atomic_check - check if the new state is okay 867 * 868 * @plane: cursor plane 869 * @state: info on the new plane state 870 * 871 * This is a chance to fail if the new cursor state does not fit 872 * our requirements. 873 * 874 * Returns 0 on success 875 */ 876 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, 877 struct drm_atomic_state *state) 878 { 879 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, 880 plane); 881 int ret = 0; 882 struct drm_crtc_state *crtc_state = NULL; 883 struct vmw_surface *surface = NULL; 884 struct drm_framebuffer *fb = new_state->fb; 885 886 if (new_state->crtc) 887 crtc_state = drm_atomic_get_new_crtc_state(new_state->state, 888 new_state->crtc); 889 890 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, 891 DRM_PLANE_NO_SCALING, 892 DRM_PLANE_NO_SCALING, 893 true, true); 894 if (ret) 895 return ret; 896 897 /* Turning off */ 898 if (!fb) 899 return 0; 900 901 /* A lot of the code assumes this */ 902 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) { 903 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n", 904 new_state->crtc_w, new_state->crtc_h); 905 return -EINVAL; 906 } 907 908 if (!vmw_framebuffer_to_vfb(fb)->bo) { 909 surface = vmw_framebuffer_to_vfbs(fb)->surface; 910 911 WARN_ON(!surface); 912 913 if (!surface || 914 (!surface->snooper.image && !surface->res.guest_memory_bo)) { 915 DRM_ERROR("surface not suitable for cursor\n"); 916 return -EINVAL; 917 } 918 } 919 920 return 0; 921 } 922 923 924 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, 925 struct drm_atomic_state *state) 926 { 927 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state, 928 crtc); 929 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc); 930 int connector_mask = drm_connector_mask(&du->connector); 931 bool has_primary = new_state->plane_mask & 932 drm_plane_mask(crtc->primary); 933 934 /* We always want to have an active plane with an active CRTC */ 935 if (has_primary != new_state->enable) 936 return -EINVAL; 937 938 939 if (new_state->connector_mask != connector_mask && 940 new_state->connector_mask != 0) { 941 DRM_ERROR("Invalid connectors configuration\n"); 942 return -EINVAL; 943 } 944 945 /* 946 * Our virtual device does not have a dot clock, so use the logical 947 * clock value as the dot clock. 948 */ 949 if (new_state->mode.crtc_clock == 0) 950 new_state->adjusted_mode.crtc_clock = new_state->mode.clock; 951 952 return 0; 953 } 954 955 956 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, 957 struct drm_atomic_state *state) 958 { 959 } 960 961 962 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, 963 struct drm_atomic_state *state) 964 { 965 } 966 967 968 /** 969 * vmw_du_crtc_duplicate_state - duplicate crtc state 970 * @crtc: DRM crtc 971 * 972 * Allocates and returns a copy of the crtc state (both common and 973 * vmw-specific) for the specified crtc. 974 * 975 * Returns: The newly allocated crtc state, or NULL on failure. 976 */ 977 struct drm_crtc_state * 978 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc) 979 { 980 struct drm_crtc_state *state; 981 struct vmw_crtc_state *vcs; 982 983 if (WARN_ON(!crtc->state)) 984 return NULL; 985 986 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL); 987 988 if (!vcs) 989 return NULL; 990 991 state = &vcs->base; 992 993 __drm_atomic_helper_crtc_duplicate_state(crtc, state); 994 995 return state; 996 } 997 998 999 /** 1000 * vmw_du_crtc_reset - creates a blank vmw crtc state 1001 * @crtc: DRM crtc 1002 * 1003 * Resets the atomic state for @crtc by freeing the state pointer (which 1004 * might be NULL, e.g. at driver load time) and allocating a new empty state 1005 * object. 1006 */ 1007 void vmw_du_crtc_reset(struct drm_crtc *crtc) 1008 { 1009 struct vmw_crtc_state *vcs; 1010 1011 1012 if (crtc->state) { 1013 __drm_atomic_helper_crtc_destroy_state(crtc->state); 1014 1015 kfree(vmw_crtc_state_to_vcs(crtc->state)); 1016 } 1017 1018 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL); 1019 1020 if (!vcs) { 1021 DRM_ERROR("Cannot allocate vmw_crtc_state\n"); 1022 return; 1023 } 1024 1025 __drm_atomic_helper_crtc_reset(crtc, &vcs->base); 1026 } 1027 1028 1029 /** 1030 * vmw_du_crtc_destroy_state - destroy crtc state 1031 * @crtc: DRM crtc 1032 * @state: state object to destroy 1033 * 1034 * Destroys the crtc state (both common and vmw-specific) for the 1035 * specified plane. 1036 */ 1037 void 1038 vmw_du_crtc_destroy_state(struct drm_crtc *crtc, 1039 struct drm_crtc_state *state) 1040 { 1041 drm_atomic_helper_crtc_destroy_state(crtc, state); 1042 } 1043 1044 1045 /** 1046 * vmw_du_plane_duplicate_state - duplicate plane state 1047 * @plane: drm plane 1048 * 1049 * Allocates and returns a copy of the plane state (both common and 1050 * vmw-specific) for the specified plane. 1051 * 1052 * Returns: The newly allocated plane state, or NULL on failure. 1053 */ 1054 struct drm_plane_state * 1055 vmw_du_plane_duplicate_state(struct drm_plane *plane) 1056 { 1057 struct drm_plane_state *state; 1058 struct vmw_plane_state *vps; 1059 1060 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL); 1061 1062 if (!vps) 1063 return NULL; 1064 1065 vps->pinned = 0; 1066 vps->cpp = 0; 1067 1068 memset(&vps->cursor, 0, sizeof(vps->cursor)); 1069 1070 /* Each ref counted resource needs to be acquired again */ 1071 if (vps->surf) 1072 (void) vmw_surface_reference(vps->surf); 1073 1074 if (vps->bo) 1075 (void) vmw_bo_reference(vps->bo); 1076 1077 state = &vps->base; 1078 1079 __drm_atomic_helper_plane_duplicate_state(plane, state); 1080 1081 return state; 1082 } 1083 1084 1085 /** 1086 * vmw_du_plane_reset - creates a blank vmw plane state 1087 * @plane: drm plane 1088 * 1089 * Resets the atomic state for @plane by freeing the state pointer (which might 1090 * be NULL, e.g. at driver load time) and allocating a new empty state object. 1091 */ 1092 void vmw_du_plane_reset(struct drm_plane *plane) 1093 { 1094 struct vmw_plane_state *vps; 1095 1096 if (plane->state) 1097 vmw_du_plane_destroy_state(plane, plane->state); 1098 1099 vps = kzalloc(sizeof(*vps), GFP_KERNEL); 1100 1101 if (!vps) { 1102 DRM_ERROR("Cannot allocate vmw_plane_state\n"); 1103 return; 1104 } 1105 1106 __drm_atomic_helper_plane_reset(plane, &vps->base); 1107 } 1108 1109 1110 /** 1111 * vmw_du_plane_destroy_state - destroy plane state 1112 * @plane: DRM plane 1113 * @state: state object to destroy 1114 * 1115 * Destroys the plane state (both common and vmw-specific) for the 1116 * specified plane. 1117 */ 1118 void 1119 vmw_du_plane_destroy_state(struct drm_plane *plane, 1120 struct drm_plane_state *state) 1121 { 1122 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state); 1123 1124 /* Should have been freed by cleanup_fb */ 1125 if (vps->surf) 1126 vmw_surface_unreference(&vps->surf); 1127 1128 if (vps->bo) 1129 vmw_bo_unreference(&vps->bo); 1130 1131 drm_atomic_helper_plane_destroy_state(plane, state); 1132 } 1133 1134 1135 /** 1136 * vmw_du_connector_duplicate_state - duplicate connector state 1137 * @connector: DRM connector 1138 * 1139 * Allocates and returns a copy of the connector state (both common and 1140 * vmw-specific) for the specified connector. 1141 * 1142 * Returns: The newly allocated connector state, or NULL on failure. 1143 */ 1144 struct drm_connector_state * 1145 vmw_du_connector_duplicate_state(struct drm_connector *connector) 1146 { 1147 struct drm_connector_state *state; 1148 struct vmw_connector_state *vcs; 1149 1150 if (WARN_ON(!connector->state)) 1151 return NULL; 1152 1153 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL); 1154 1155 if (!vcs) 1156 return NULL; 1157 1158 state = &vcs->base; 1159 1160 __drm_atomic_helper_connector_duplicate_state(connector, state); 1161 1162 return state; 1163 } 1164 1165 1166 /** 1167 * vmw_du_connector_reset - creates a blank vmw connector state 1168 * @connector: DRM connector 1169 * 1170 * Resets the atomic state for @connector by freeing the state pointer (which 1171 * might be NULL, e.g. at driver load time) and allocating a new empty state 1172 * object. 1173 */ 1174 void vmw_du_connector_reset(struct drm_connector *connector) 1175 { 1176 struct vmw_connector_state *vcs; 1177 1178 1179 if (connector->state) { 1180 __drm_atomic_helper_connector_destroy_state(connector->state); 1181 1182 kfree(vmw_connector_state_to_vcs(connector->state)); 1183 } 1184 1185 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL); 1186 1187 if (!vcs) { 1188 DRM_ERROR("Cannot allocate vmw_connector_state\n"); 1189 return; 1190 } 1191 1192 __drm_atomic_helper_connector_reset(connector, &vcs->base); 1193 } 1194 1195 1196 /** 1197 * vmw_du_connector_destroy_state - destroy connector state 1198 * @connector: DRM connector 1199 * @state: state object to destroy 1200 * 1201 * Destroys the connector state (both common and vmw-specific) for the 1202 * specified plane. 1203 */ 1204 void 1205 vmw_du_connector_destroy_state(struct drm_connector *connector, 1206 struct drm_connector_state *state) 1207 { 1208 drm_atomic_helper_connector_destroy_state(connector, state); 1209 } 1210 /* 1211 * Generic framebuffer code 1212 */ 1213 1214 /* 1215 * Surface framebuffer code 1216 */ 1217 1218 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 1219 { 1220 struct vmw_framebuffer_surface *vfbs = 1221 vmw_framebuffer_to_vfbs(framebuffer); 1222 1223 drm_framebuffer_cleanup(framebuffer); 1224 vmw_surface_unreference(&vfbs->surface); 1225 1226 kfree(vfbs); 1227 } 1228 1229 /** 1230 * vmw_kms_readback - Perform a readback from the screen system to 1231 * a buffer-object backed framebuffer. 1232 * 1233 * @dev_priv: Pointer to the device private structure. 1234 * @file_priv: Pointer to a struct drm_file identifying the caller. 1235 * Must be set to NULL if @user_fence_rep is NULL. 1236 * @vfb: Pointer to the buffer-object backed framebuffer. 1237 * @user_fence_rep: User-space provided structure for fence information. 1238 * Must be set to non-NULL if @file_priv is non-NULL. 1239 * @vclips: Array of clip rects. 1240 * @num_clips: Number of clip rects in @vclips. 1241 * 1242 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if 1243 * interrupted. 1244 */ 1245 int vmw_kms_readback(struct vmw_private *dev_priv, 1246 struct drm_file *file_priv, 1247 struct vmw_framebuffer *vfb, 1248 struct drm_vmw_fence_rep __user *user_fence_rep, 1249 struct drm_vmw_rect *vclips, 1250 uint32_t num_clips) 1251 { 1252 switch (dev_priv->active_display_unit) { 1253 case vmw_du_screen_object: 1254 return vmw_kms_sou_readback(dev_priv, file_priv, vfb, 1255 user_fence_rep, vclips, num_clips, 1256 NULL); 1257 case vmw_du_screen_target: 1258 return vmw_kms_stdu_readback(dev_priv, file_priv, vfb, 1259 user_fence_rep, NULL, vclips, num_clips, 1260 1, NULL); 1261 default: 1262 WARN_ONCE(true, 1263 "Readback called with invalid display system.\n"); 1264 } 1265 1266 return -ENOSYS; 1267 } 1268 1269 1270 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { 1271 .destroy = vmw_framebuffer_surface_destroy, 1272 .dirty = drm_atomic_helper_dirtyfb, 1273 }; 1274 1275 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 1276 struct vmw_surface *surface, 1277 struct vmw_framebuffer **out, 1278 const struct drm_mode_fb_cmd2 1279 *mode_cmd, 1280 bool is_bo_proxy) 1281 1282 { 1283 struct drm_device *dev = &dev_priv->drm; 1284 struct vmw_framebuffer_surface *vfbs; 1285 enum SVGA3dSurfaceFormat format; 1286 int ret; 1287 1288 /* 3D is only supported on HWv8 and newer hosts */ 1289 if (dev_priv->active_display_unit == vmw_du_legacy) 1290 return -ENOSYS; 1291 1292 /* 1293 * Sanity checks. 1294 */ 1295 1296 if (!drm_any_plane_has_format(&dev_priv->drm, 1297 mode_cmd->pixel_format, 1298 mode_cmd->modifier[0])) { 1299 drm_dbg(&dev_priv->drm, 1300 "unsupported pixel format %p4cc / modifier 0x%llx\n", 1301 &mode_cmd->pixel_format, mode_cmd->modifier[0]); 1302 return -EINVAL; 1303 } 1304 1305 /* Surface must be marked as a scanout. */ 1306 if (unlikely(!surface->metadata.scanout)) 1307 return -EINVAL; 1308 1309 if (unlikely(surface->metadata.mip_levels[0] != 1 || 1310 surface->metadata.num_sizes != 1 || 1311 surface->metadata.base_size.width < mode_cmd->width || 1312 surface->metadata.base_size.height < mode_cmd->height || 1313 surface->metadata.base_size.depth != 1)) { 1314 DRM_ERROR("Incompatible surface dimensions " 1315 "for requested mode.\n"); 1316 return -EINVAL; 1317 } 1318 1319 switch (mode_cmd->pixel_format) { 1320 case DRM_FORMAT_ARGB8888: 1321 format = SVGA3D_A8R8G8B8; 1322 break; 1323 case DRM_FORMAT_XRGB8888: 1324 format = SVGA3D_X8R8G8B8; 1325 break; 1326 case DRM_FORMAT_RGB565: 1327 format = SVGA3D_R5G6B5; 1328 break; 1329 case DRM_FORMAT_XRGB1555: 1330 format = SVGA3D_A1R5G5B5; 1331 break; 1332 default: 1333 DRM_ERROR("Invalid pixel format: %p4cc\n", 1334 &mode_cmd->pixel_format); 1335 return -EINVAL; 1336 } 1337 1338 /* 1339 * For DX, surface format validation is done when surface->scanout 1340 * is set. 1341 */ 1342 if (!has_sm4_context(dev_priv) && format != surface->metadata.format) { 1343 DRM_ERROR("Invalid surface format for requested mode.\n"); 1344 return -EINVAL; 1345 } 1346 1347 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); 1348 if (!vfbs) { 1349 ret = -ENOMEM; 1350 goto out_err1; 1351 } 1352 1353 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); 1354 vfbs->surface = vmw_surface_reference(surface); 1355 vfbs->base.user_handle = mode_cmd->handles[0]; 1356 vfbs->is_bo_proxy = is_bo_proxy; 1357 1358 *out = &vfbs->base; 1359 1360 ret = drm_framebuffer_init(dev, &vfbs->base.base, 1361 &vmw_framebuffer_surface_funcs); 1362 if (ret) 1363 goto out_err2; 1364 1365 return 0; 1366 1367 out_err2: 1368 vmw_surface_unreference(&surface); 1369 kfree(vfbs); 1370 out_err1: 1371 return ret; 1372 } 1373 1374 /* 1375 * Buffer-object framebuffer code 1376 */ 1377 1378 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb, 1379 struct drm_file *file_priv, 1380 unsigned int *handle) 1381 { 1382 struct vmw_framebuffer_bo *vfbd = 1383 vmw_framebuffer_to_vfbd(fb); 1384 1385 return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle); 1386 } 1387 1388 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) 1389 { 1390 struct vmw_framebuffer_bo *vfbd = 1391 vmw_framebuffer_to_vfbd(framebuffer); 1392 1393 drm_framebuffer_cleanup(framebuffer); 1394 vmw_bo_unreference(&vfbd->buffer); 1395 1396 kfree(vfbd); 1397 } 1398 1399 static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer, 1400 struct drm_file *file_priv, 1401 unsigned int flags, unsigned int color, 1402 struct drm_clip_rect *clips, 1403 unsigned int num_clips) 1404 { 1405 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 1406 struct vmw_framebuffer_bo *vfbd = 1407 vmw_framebuffer_to_vfbd(framebuffer); 1408 struct drm_clip_rect norect; 1409 int ret, increment = 1; 1410 1411 drm_modeset_lock_all(&dev_priv->drm); 1412 1413 if (!num_clips) { 1414 num_clips = 1; 1415 clips = &norect; 1416 norect.x1 = norect.y1 = 0; 1417 norect.x2 = framebuffer->width; 1418 norect.y2 = framebuffer->height; 1419 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { 1420 num_clips /= 2; 1421 increment = 2; 1422 } 1423 1424 switch (dev_priv->active_display_unit) { 1425 case vmw_du_legacy: 1426 ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0, 1427 clips, num_clips, increment); 1428 break; 1429 default: 1430 ret = -EINVAL; 1431 WARN_ONCE(true, "Dirty called with invalid display system.\n"); 1432 break; 1433 } 1434 1435 vmw_cmd_flush(dev_priv, false); 1436 1437 drm_modeset_unlock_all(&dev_priv->drm); 1438 1439 return ret; 1440 } 1441 1442 static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer, 1443 struct drm_file *file_priv, 1444 unsigned int flags, unsigned int color, 1445 struct drm_clip_rect *clips, 1446 unsigned int num_clips) 1447 { 1448 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 1449 1450 if (dev_priv->active_display_unit == vmw_du_legacy && 1451 vmw_cmd_supported(dev_priv)) 1452 return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags, 1453 color, clips, num_clips); 1454 1455 return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color, 1456 clips, num_clips); 1457 } 1458 1459 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = { 1460 .create_handle = vmw_framebuffer_bo_create_handle, 1461 .destroy = vmw_framebuffer_bo_destroy, 1462 .dirty = vmw_framebuffer_bo_dirty_ext, 1463 }; 1464 1465 /** 1466 * vmw_create_bo_proxy - create a proxy surface for the buffer object 1467 * 1468 * @dev: DRM device 1469 * @mode_cmd: parameters for the new surface 1470 * @bo_mob: MOB backing the buffer object 1471 * @srf_out: newly created surface 1472 * 1473 * When the content FB is a buffer object, we create a surface as a proxy to the 1474 * same buffer. This way we can do a surface copy rather than a surface DMA. 1475 * This is a more efficient approach 1476 * 1477 * RETURNS: 1478 * 0 on success, error code otherwise 1479 */ 1480 static int vmw_create_bo_proxy(struct drm_device *dev, 1481 const struct drm_mode_fb_cmd2 *mode_cmd, 1482 struct vmw_bo *bo_mob, 1483 struct vmw_surface **srf_out) 1484 { 1485 struct vmw_surface_metadata metadata = {0}; 1486 uint32_t format; 1487 struct vmw_resource *res; 1488 unsigned int bytes_pp; 1489 int ret; 1490 1491 switch (mode_cmd->pixel_format) { 1492 case DRM_FORMAT_ARGB8888: 1493 case DRM_FORMAT_XRGB8888: 1494 format = SVGA3D_X8R8G8B8; 1495 bytes_pp = 4; 1496 break; 1497 1498 case DRM_FORMAT_RGB565: 1499 case DRM_FORMAT_XRGB1555: 1500 format = SVGA3D_R5G6B5; 1501 bytes_pp = 2; 1502 break; 1503 1504 case 8: 1505 format = SVGA3D_P8; 1506 bytes_pp = 1; 1507 break; 1508 1509 default: 1510 DRM_ERROR("Invalid framebuffer format %p4cc\n", 1511 &mode_cmd->pixel_format); 1512 return -EINVAL; 1513 } 1514 1515 metadata.format = format; 1516 metadata.mip_levels[0] = 1; 1517 metadata.num_sizes = 1; 1518 metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp; 1519 metadata.base_size.height = mode_cmd->height; 1520 metadata.base_size.depth = 1; 1521 metadata.scanout = true; 1522 1523 ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out); 1524 if (ret) { 1525 DRM_ERROR("Failed to allocate proxy content buffer\n"); 1526 return ret; 1527 } 1528 1529 res = &(*srf_out)->res; 1530 1531 /* Reserve and switch the backing mob. */ 1532 mutex_lock(&res->dev_priv->cmdbuf_mutex); 1533 (void) vmw_resource_reserve(res, false, true); 1534 vmw_bo_unreference(&res->guest_memory_bo); 1535 res->guest_memory_bo = vmw_bo_reference(bo_mob); 1536 res->guest_memory_offset = 0; 1537 vmw_resource_unreserve(res, false, false, false, NULL, 0); 1538 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 1539 1540 return 0; 1541 } 1542 1543 1544 1545 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, 1546 struct vmw_bo *bo, 1547 struct vmw_framebuffer **out, 1548 const struct drm_mode_fb_cmd2 1549 *mode_cmd) 1550 1551 { 1552 struct drm_device *dev = &dev_priv->drm; 1553 struct vmw_framebuffer_bo *vfbd; 1554 unsigned int requested_size; 1555 int ret; 1556 1557 requested_size = mode_cmd->height * mode_cmd->pitches[0]; 1558 if (unlikely(requested_size > bo->tbo.base.size)) { 1559 DRM_ERROR("Screen buffer object size is too small " 1560 "for requested mode.\n"); 1561 return -EINVAL; 1562 } 1563 1564 if (!drm_any_plane_has_format(&dev_priv->drm, 1565 mode_cmd->pixel_format, 1566 mode_cmd->modifier[0])) { 1567 drm_dbg(&dev_priv->drm, 1568 "unsupported pixel format %p4cc / modifier 0x%llx\n", 1569 &mode_cmd->pixel_format, mode_cmd->modifier[0]); 1570 return -EINVAL; 1571 } 1572 1573 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); 1574 if (!vfbd) { 1575 ret = -ENOMEM; 1576 goto out_err1; 1577 } 1578 1579 vfbd->base.base.obj[0] = &bo->tbo.base; 1580 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); 1581 vfbd->base.bo = true; 1582 vfbd->buffer = vmw_bo_reference(bo); 1583 vfbd->base.user_handle = mode_cmd->handles[0]; 1584 *out = &vfbd->base; 1585 1586 ret = drm_framebuffer_init(dev, &vfbd->base.base, 1587 &vmw_framebuffer_bo_funcs); 1588 if (ret) 1589 goto out_err2; 1590 1591 return 0; 1592 1593 out_err2: 1594 vmw_bo_unreference(&bo); 1595 kfree(vfbd); 1596 out_err1: 1597 return ret; 1598 } 1599 1600 1601 /** 1602 * vmw_kms_srf_ok - check if a surface can be created 1603 * 1604 * @dev_priv: Pointer to device private struct. 1605 * @width: requested width 1606 * @height: requested height 1607 * 1608 * Surfaces need to be less than texture size 1609 */ 1610 static bool 1611 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height) 1612 { 1613 if (width > dev_priv->texture_max_width || 1614 height > dev_priv->texture_max_height) 1615 return false; 1616 1617 return true; 1618 } 1619 1620 /** 1621 * vmw_kms_new_framebuffer - Create a new framebuffer. 1622 * 1623 * @dev_priv: Pointer to device private struct. 1624 * @bo: Pointer to buffer object to wrap the kms framebuffer around. 1625 * Either @bo or @surface must be NULL. 1626 * @surface: Pointer to a surface to wrap the kms framebuffer around. 1627 * Either @bo or @surface must be NULL. 1628 * @only_2d: No presents will occur to this buffer object based framebuffer. 1629 * This helps the code to do some important optimizations. 1630 * @mode_cmd: Frame-buffer metadata. 1631 */ 1632 struct vmw_framebuffer * 1633 vmw_kms_new_framebuffer(struct vmw_private *dev_priv, 1634 struct vmw_bo *bo, 1635 struct vmw_surface *surface, 1636 bool only_2d, 1637 const struct drm_mode_fb_cmd2 *mode_cmd) 1638 { 1639 struct vmw_framebuffer *vfb = NULL; 1640 bool is_bo_proxy = false; 1641 int ret; 1642 1643 /* 1644 * We cannot use the SurfaceDMA command in an non-accelerated VM, 1645 * therefore, wrap the buffer object in a surface so we can use the 1646 * SurfaceCopy command. 1647 */ 1648 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && 1649 bo && only_2d && 1650 mode_cmd->width > 64 && /* Don't create a proxy for cursor */ 1651 dev_priv->active_display_unit == vmw_du_screen_target) { 1652 ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd, 1653 bo, &surface); 1654 if (ret) 1655 return ERR_PTR(ret); 1656 1657 is_bo_proxy = true; 1658 } 1659 1660 /* Create the new framebuffer depending one what we have */ 1661 if (surface) { 1662 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, 1663 mode_cmd, 1664 is_bo_proxy); 1665 /* 1666 * vmw_create_bo_proxy() adds a reference that is no longer 1667 * needed 1668 */ 1669 if (is_bo_proxy) 1670 vmw_surface_unreference(&surface); 1671 } else if (bo) { 1672 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb, 1673 mode_cmd); 1674 } else { 1675 BUG(); 1676 } 1677 1678 if (ret) 1679 return ERR_PTR(ret); 1680 1681 return vfb; 1682 } 1683 1684 /* 1685 * Generic Kernel modesetting functions 1686 */ 1687 1688 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, 1689 struct drm_file *file_priv, 1690 const struct drm_mode_fb_cmd2 *mode_cmd) 1691 { 1692 struct vmw_private *dev_priv = vmw_priv(dev); 1693 struct vmw_framebuffer *vfb = NULL; 1694 struct vmw_surface *surface = NULL; 1695 struct vmw_bo *bo = NULL; 1696 int ret; 1697 1698 /* returns either a bo or surface */ 1699 ret = vmw_user_lookup_handle(dev_priv, file_priv, 1700 mode_cmd->handles[0], 1701 &surface, &bo); 1702 if (ret) { 1703 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n", 1704 mode_cmd->handles[0], mode_cmd->handles[0]); 1705 goto err_out; 1706 } 1707 1708 1709 if (!bo && 1710 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) { 1711 DRM_ERROR("Surface size cannot exceed %dx%d\n", 1712 dev_priv->texture_max_width, 1713 dev_priv->texture_max_height); 1714 goto err_out; 1715 } 1716 1717 1718 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface, 1719 !(dev_priv->capabilities & SVGA_CAP_3D), 1720 mode_cmd); 1721 if (IS_ERR(vfb)) { 1722 ret = PTR_ERR(vfb); 1723 goto err_out; 1724 } 1725 1726 err_out: 1727 /* vmw_user_lookup_handle takes one ref so does new_fb */ 1728 if (bo) { 1729 vmw_bo_unreference(&bo); 1730 drm_gem_object_put(&bo->tbo.base); 1731 } 1732 if (surface) 1733 vmw_surface_unreference(&surface); 1734 1735 if (ret) { 1736 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); 1737 return ERR_PTR(ret); 1738 } 1739 1740 return &vfb->base; 1741 } 1742 1743 /** 1744 * vmw_kms_check_display_memory - Validates display memory required for a 1745 * topology 1746 * @dev: DRM device 1747 * @num_rects: number of drm_rect in rects 1748 * @rects: array of drm_rect representing the topology to validate indexed by 1749 * crtc index. 1750 * 1751 * Returns: 1752 * 0 on success otherwise negative error code 1753 */ 1754 static int vmw_kms_check_display_memory(struct drm_device *dev, 1755 uint32_t num_rects, 1756 struct drm_rect *rects) 1757 { 1758 struct vmw_private *dev_priv = vmw_priv(dev); 1759 struct drm_rect bounding_box = {0}; 1760 u64 total_pixels = 0, pixel_mem, bb_mem; 1761 int i; 1762 1763 for (i = 0; i < num_rects; i++) { 1764 /* 1765 * For STDU only individual screen (screen target) is limited by 1766 * SCREENTARGET_MAX_WIDTH/HEIGHT registers. 1767 */ 1768 if (dev_priv->active_display_unit == vmw_du_screen_target && 1769 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width || 1770 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) { 1771 VMW_DEBUG_KMS("Screen size not supported.\n"); 1772 return -EINVAL; 1773 } 1774 1775 /* Bounding box upper left is at (0,0). */ 1776 if (rects[i].x2 > bounding_box.x2) 1777 bounding_box.x2 = rects[i].x2; 1778 1779 if (rects[i].y2 > bounding_box.y2) 1780 bounding_box.y2 = rects[i].y2; 1781 1782 total_pixels += (u64) drm_rect_width(&rects[i]) * 1783 (u64) drm_rect_height(&rects[i]); 1784 } 1785 1786 /* Virtual svga device primary limits are always in 32-bpp. */ 1787 pixel_mem = total_pixels * 4; 1788 1789 /* 1790 * For HV10 and below prim_bb_mem is vram size. When 1791 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is 1792 * limit on primary bounding box 1793 */ 1794 if (pixel_mem > dev_priv->max_primary_mem) { 1795 VMW_DEBUG_KMS("Combined output size too large.\n"); 1796 return -EINVAL; 1797 } 1798 1799 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */ 1800 if (dev_priv->active_display_unit != vmw_du_screen_target || 1801 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) { 1802 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4; 1803 1804 if (bb_mem > dev_priv->max_primary_mem) { 1805 VMW_DEBUG_KMS("Topology is beyond supported limits.\n"); 1806 return -EINVAL; 1807 } 1808 } 1809 1810 return 0; 1811 } 1812 1813 /** 1814 * vmw_crtc_state_and_lock - Return new or current crtc state with locked 1815 * crtc mutex 1816 * @state: The atomic state pointer containing the new atomic state 1817 * @crtc: The crtc 1818 * 1819 * This function returns the new crtc state if it's part of the state update. 1820 * Otherwise returns the current crtc state. It also makes sure that the 1821 * crtc mutex is locked. 1822 * 1823 * Returns: A valid crtc state pointer or NULL. It may also return a 1824 * pointer error, in particular -EDEADLK if locking needs to be rerun. 1825 */ 1826 static struct drm_crtc_state * 1827 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc) 1828 { 1829 struct drm_crtc_state *crtc_state; 1830 1831 crtc_state = drm_atomic_get_new_crtc_state(state, crtc); 1832 if (crtc_state) { 1833 lockdep_assert_held(&crtc->mutex.mutex.base); 1834 } else { 1835 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx); 1836 1837 if (ret != 0 && ret != -EALREADY) 1838 return ERR_PTR(ret); 1839 1840 crtc_state = crtc->state; 1841 } 1842 1843 return crtc_state; 1844 } 1845 1846 /** 1847 * vmw_kms_check_implicit - Verify that all implicit display units scan out 1848 * from the same fb after the new state is committed. 1849 * @dev: The drm_device. 1850 * @state: The new state to be checked. 1851 * 1852 * Returns: 1853 * Zero on success, 1854 * -EINVAL on invalid state, 1855 * -EDEADLK if modeset locking needs to be rerun. 1856 */ 1857 static int vmw_kms_check_implicit(struct drm_device *dev, 1858 struct drm_atomic_state *state) 1859 { 1860 struct drm_framebuffer *implicit_fb = NULL; 1861 struct drm_crtc *crtc; 1862 struct drm_crtc_state *crtc_state; 1863 struct drm_plane_state *plane_state; 1864 1865 drm_for_each_crtc(crtc, dev) { 1866 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 1867 1868 if (!du->is_implicit) 1869 continue; 1870 1871 crtc_state = vmw_crtc_state_and_lock(state, crtc); 1872 if (IS_ERR(crtc_state)) 1873 return PTR_ERR(crtc_state); 1874 1875 if (!crtc_state || !crtc_state->enable) 1876 continue; 1877 1878 /* 1879 * Can't move primary planes across crtcs, so this is OK. 1880 * It also means we don't need to take the plane mutex. 1881 */ 1882 plane_state = du->primary.state; 1883 if (plane_state->crtc != crtc) 1884 continue; 1885 1886 if (!implicit_fb) 1887 implicit_fb = plane_state->fb; 1888 else if (implicit_fb != plane_state->fb) 1889 return -EINVAL; 1890 } 1891 1892 return 0; 1893 } 1894 1895 /** 1896 * vmw_kms_check_topology - Validates topology in drm_atomic_state 1897 * @dev: DRM device 1898 * @state: the driver state object 1899 * 1900 * Returns: 1901 * 0 on success otherwise negative error code 1902 */ 1903 static int vmw_kms_check_topology(struct drm_device *dev, 1904 struct drm_atomic_state *state) 1905 { 1906 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 1907 struct drm_rect *rects; 1908 struct drm_crtc *crtc; 1909 uint32_t i; 1910 int ret = 0; 1911 1912 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect), 1913 GFP_KERNEL); 1914 if (!rects) 1915 return -ENOMEM; 1916 1917 drm_for_each_crtc(crtc, dev) { 1918 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 1919 struct drm_crtc_state *crtc_state; 1920 1921 i = drm_crtc_index(crtc); 1922 1923 crtc_state = vmw_crtc_state_and_lock(state, crtc); 1924 if (IS_ERR(crtc_state)) { 1925 ret = PTR_ERR(crtc_state); 1926 goto clean; 1927 } 1928 1929 if (!crtc_state) 1930 continue; 1931 1932 if (crtc_state->enable) { 1933 rects[i].x1 = du->gui_x; 1934 rects[i].y1 = du->gui_y; 1935 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay; 1936 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay; 1937 } else { 1938 rects[i].x1 = 0; 1939 rects[i].y1 = 0; 1940 rects[i].x2 = 0; 1941 rects[i].y2 = 0; 1942 } 1943 } 1944 1945 /* Determine change to topology due to new atomic state */ 1946 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 1947 new_crtc_state, i) { 1948 struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 1949 struct drm_connector *connector; 1950 struct drm_connector_state *conn_state; 1951 struct vmw_connector_state *vmw_conn_state; 1952 1953 if (!du->pref_active && new_crtc_state->enable) { 1954 VMW_DEBUG_KMS("Enabling a disabled display unit\n"); 1955 ret = -EINVAL; 1956 goto clean; 1957 } 1958 1959 /* 1960 * For vmwgfx each crtc has only one connector attached and it 1961 * is not changed so don't really need to check the 1962 * crtc->connector_mask and iterate over it. 1963 */ 1964 connector = &du->connector; 1965 conn_state = drm_atomic_get_connector_state(state, connector); 1966 if (IS_ERR(conn_state)) { 1967 ret = PTR_ERR(conn_state); 1968 goto clean; 1969 } 1970 1971 vmw_conn_state = vmw_connector_state_to_vcs(conn_state); 1972 vmw_conn_state->gui_x = du->gui_x; 1973 vmw_conn_state->gui_y = du->gui_y; 1974 } 1975 1976 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc, 1977 rects); 1978 1979 clean: 1980 kfree(rects); 1981 return ret; 1982 } 1983 1984 /** 1985 * vmw_kms_atomic_check_modeset- validate state object for modeset changes 1986 * 1987 * @dev: DRM device 1988 * @state: the driver state object 1989 * 1990 * This is a simple wrapper around drm_atomic_helper_check_modeset() for 1991 * us to assign a value to mode->crtc_clock so that 1992 * drm_calc_timestamping_constants() won't throw an error message 1993 * 1994 * Returns: 1995 * Zero for success or -errno 1996 */ 1997 static int 1998 vmw_kms_atomic_check_modeset(struct drm_device *dev, 1999 struct drm_atomic_state *state) 2000 { 2001 struct drm_crtc *crtc; 2002 struct drm_crtc_state *crtc_state; 2003 bool need_modeset = false; 2004 int i, ret; 2005 2006 ret = drm_atomic_helper_check(dev, state); 2007 if (ret) 2008 return ret; 2009 2010 ret = vmw_kms_check_implicit(dev, state); 2011 if (ret) { 2012 VMW_DEBUG_KMS("Invalid implicit state\n"); 2013 return ret; 2014 } 2015 2016 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 2017 if (drm_atomic_crtc_needs_modeset(crtc_state)) 2018 need_modeset = true; 2019 } 2020 2021 if (need_modeset) 2022 return vmw_kms_check_topology(dev, state); 2023 2024 return ret; 2025 } 2026 2027 static const struct drm_mode_config_funcs vmw_kms_funcs = { 2028 .fb_create = vmw_kms_fb_create, 2029 .atomic_check = vmw_kms_atomic_check_modeset, 2030 .atomic_commit = drm_atomic_helper_commit, 2031 }; 2032 2033 static int vmw_kms_generic_present(struct vmw_private *dev_priv, 2034 struct drm_file *file_priv, 2035 struct vmw_framebuffer *vfb, 2036 struct vmw_surface *surface, 2037 uint32_t sid, 2038 int32_t destX, int32_t destY, 2039 struct drm_vmw_rect *clips, 2040 uint32_t num_clips) 2041 { 2042 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips, 2043 &surface->res, destX, destY, 2044 num_clips, 1, NULL, NULL); 2045 } 2046 2047 2048 int vmw_kms_present(struct vmw_private *dev_priv, 2049 struct drm_file *file_priv, 2050 struct vmw_framebuffer *vfb, 2051 struct vmw_surface *surface, 2052 uint32_t sid, 2053 int32_t destX, int32_t destY, 2054 struct drm_vmw_rect *clips, 2055 uint32_t num_clips) 2056 { 2057 int ret; 2058 2059 switch (dev_priv->active_display_unit) { 2060 case vmw_du_screen_target: 2061 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips, 2062 &surface->res, destX, destY, 2063 num_clips, 1, NULL, NULL); 2064 break; 2065 case vmw_du_screen_object: 2066 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface, 2067 sid, destX, destY, clips, 2068 num_clips); 2069 break; 2070 default: 2071 WARN_ONCE(true, 2072 "Present called with invalid display system.\n"); 2073 ret = -ENOSYS; 2074 break; 2075 } 2076 if (ret) 2077 return ret; 2078 2079 vmw_cmd_flush(dev_priv, false); 2080 2081 return 0; 2082 } 2083 2084 static void 2085 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv) 2086 { 2087 if (dev_priv->hotplug_mode_update_property) 2088 return; 2089 2090 dev_priv->hotplug_mode_update_property = 2091 drm_property_create_range(&dev_priv->drm, 2092 DRM_MODE_PROP_IMMUTABLE, 2093 "hotplug_mode_update", 0, 1); 2094 } 2095 2096 int vmw_kms_init(struct vmw_private *dev_priv) 2097 { 2098 struct drm_device *dev = &dev_priv->drm; 2099 int ret; 2100 static const char *display_unit_names[] = { 2101 "Invalid", 2102 "Legacy", 2103 "Screen Object", 2104 "Screen Target", 2105 "Invalid (max)" 2106 }; 2107 2108 drm_mode_config_init(dev); 2109 dev->mode_config.funcs = &vmw_kms_funcs; 2110 dev->mode_config.min_width = 1; 2111 dev->mode_config.min_height = 1; 2112 dev->mode_config.max_width = dev_priv->texture_max_width; 2113 dev->mode_config.max_height = dev_priv->texture_max_height; 2114 dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32; 2115 dev->mode_config.prefer_shadow_fbdev = !dev_priv->has_mob; 2116 2117 drm_mode_create_suggested_offset_properties(dev); 2118 vmw_kms_create_hotplug_mode_update_property(dev_priv); 2119 2120 ret = vmw_kms_stdu_init_display(dev_priv); 2121 if (ret) { 2122 ret = vmw_kms_sou_init_display(dev_priv); 2123 if (ret) /* Fallback */ 2124 ret = vmw_kms_ldu_init_display(dev_priv); 2125 } 2126 BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1)); 2127 drm_info(&dev_priv->drm, "%s display unit initialized\n", 2128 display_unit_names[dev_priv->active_display_unit]); 2129 2130 return ret; 2131 } 2132 2133 int vmw_kms_close(struct vmw_private *dev_priv) 2134 { 2135 int ret = 0; 2136 2137 /* 2138 * Docs says we should take the lock before calling this function 2139 * but since it destroys encoders and our destructor calls 2140 * drm_encoder_cleanup which takes the lock we deadlock. 2141 */ 2142 drm_mode_config_cleanup(&dev_priv->drm); 2143 if (dev_priv->active_display_unit == vmw_du_legacy) 2144 ret = vmw_kms_ldu_close_display(dev_priv); 2145 2146 return ret; 2147 } 2148 2149 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, 2150 struct drm_file *file_priv) 2151 { 2152 struct drm_vmw_cursor_bypass_arg *arg = data; 2153 struct vmw_display_unit *du; 2154 struct drm_crtc *crtc; 2155 int ret = 0; 2156 2157 mutex_lock(&dev->mode_config.mutex); 2158 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { 2159 2160 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2161 du = vmw_crtc_to_du(crtc); 2162 du->hotspot_x = arg->xhot; 2163 du->hotspot_y = arg->yhot; 2164 } 2165 2166 mutex_unlock(&dev->mode_config.mutex); 2167 return 0; 2168 } 2169 2170 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id); 2171 if (!crtc) { 2172 ret = -ENOENT; 2173 goto out; 2174 } 2175 2176 du = vmw_crtc_to_du(crtc); 2177 2178 du->hotspot_x = arg->xhot; 2179 du->hotspot_y = arg->yhot; 2180 2181 out: 2182 mutex_unlock(&dev->mode_config.mutex); 2183 2184 return ret; 2185 } 2186 2187 int vmw_kms_write_svga(struct vmw_private *vmw_priv, 2188 unsigned width, unsigned height, unsigned pitch, 2189 unsigned bpp, unsigned depth) 2190 { 2191 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) 2192 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); 2193 else if (vmw_fifo_have_pitchlock(vmw_priv)) 2194 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch); 2195 vmw_write(vmw_priv, SVGA_REG_WIDTH, width); 2196 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); 2197 if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0) 2198 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp); 2199 2200 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) { 2201 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n", 2202 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH)); 2203 return -EINVAL; 2204 } 2205 2206 return 0; 2207 } 2208 2209 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, 2210 uint32_t pitch, 2211 uint32_t height) 2212 { 2213 return ((u64) pitch * (u64) height) < (u64) 2214 ((dev_priv->active_display_unit == vmw_du_screen_target) ? 2215 dev_priv->max_primary_mem : dev_priv->vram_size); 2216 } 2217 2218 /** 2219 * vmw_du_update_layout - Update the display unit with topology from resolution 2220 * plugin and generate DRM uevent 2221 * @dev_priv: device private 2222 * @num_rects: number of drm_rect in rects 2223 * @rects: toplogy to update 2224 */ 2225 static int vmw_du_update_layout(struct vmw_private *dev_priv, 2226 unsigned int num_rects, struct drm_rect *rects) 2227 { 2228 struct drm_device *dev = &dev_priv->drm; 2229 struct vmw_display_unit *du; 2230 struct drm_connector *con; 2231 struct drm_connector_list_iter conn_iter; 2232 struct drm_modeset_acquire_ctx ctx; 2233 struct drm_crtc *crtc; 2234 int ret; 2235 2236 /* Currently gui_x/y is protected with the crtc mutex */ 2237 mutex_lock(&dev->mode_config.mutex); 2238 drm_modeset_acquire_init(&ctx, 0); 2239 retry: 2240 drm_for_each_crtc(crtc, dev) { 2241 ret = drm_modeset_lock(&crtc->mutex, &ctx); 2242 if (ret < 0) { 2243 if (ret == -EDEADLK) { 2244 drm_modeset_backoff(&ctx); 2245 goto retry; 2246 } 2247 goto out_fini; 2248 } 2249 } 2250 2251 drm_connector_list_iter_begin(dev, &conn_iter); 2252 drm_for_each_connector_iter(con, &conn_iter) { 2253 du = vmw_connector_to_du(con); 2254 if (num_rects > du->unit) { 2255 du->pref_width = drm_rect_width(&rects[du->unit]); 2256 du->pref_height = drm_rect_height(&rects[du->unit]); 2257 du->pref_active = true; 2258 du->gui_x = rects[du->unit].x1; 2259 du->gui_y = rects[du->unit].y1; 2260 } else { 2261 du->pref_width = VMWGFX_MIN_INITIAL_WIDTH; 2262 du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT; 2263 du->pref_active = false; 2264 du->gui_x = 0; 2265 du->gui_y = 0; 2266 } 2267 } 2268 drm_connector_list_iter_end(&conn_iter); 2269 2270 list_for_each_entry(con, &dev->mode_config.connector_list, head) { 2271 du = vmw_connector_to_du(con); 2272 if (num_rects > du->unit) { 2273 drm_object_property_set_value 2274 (&con->base, dev->mode_config.suggested_x_property, 2275 du->gui_x); 2276 drm_object_property_set_value 2277 (&con->base, dev->mode_config.suggested_y_property, 2278 du->gui_y); 2279 } else { 2280 drm_object_property_set_value 2281 (&con->base, dev->mode_config.suggested_x_property, 2282 0); 2283 drm_object_property_set_value 2284 (&con->base, dev->mode_config.suggested_y_property, 2285 0); 2286 } 2287 con->status = vmw_du_connector_detect(con, true); 2288 } 2289 out_fini: 2290 drm_modeset_drop_locks(&ctx); 2291 drm_modeset_acquire_fini(&ctx); 2292 mutex_unlock(&dev->mode_config.mutex); 2293 2294 drm_sysfs_hotplug_event(dev); 2295 2296 return 0; 2297 } 2298 2299 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 2300 u16 *r, u16 *g, u16 *b, 2301 uint32_t size, 2302 struct drm_modeset_acquire_ctx *ctx) 2303 { 2304 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 2305 int i; 2306 2307 for (i = 0; i < size; i++) { 2308 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i, 2309 r[i], g[i], b[i]); 2310 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8); 2311 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); 2312 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); 2313 } 2314 2315 return 0; 2316 } 2317 2318 int vmw_du_connector_dpms(struct drm_connector *connector, int mode) 2319 { 2320 return 0; 2321 } 2322 2323 enum drm_connector_status 2324 vmw_du_connector_detect(struct drm_connector *connector, bool force) 2325 { 2326 uint32_t num_displays; 2327 struct drm_device *dev = connector->dev; 2328 struct vmw_private *dev_priv = vmw_priv(dev); 2329 struct vmw_display_unit *du = vmw_connector_to_du(connector); 2330 2331 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); 2332 2333 return ((vmw_connector_to_du(connector)->unit < num_displays && 2334 du->pref_active) ? 2335 connector_status_connected : connector_status_disconnected); 2336 } 2337 2338 static struct drm_display_mode vmw_kms_connector_builtin[] = { 2339 /* 640x480@60Hz */ 2340 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 2341 752, 800, 0, 480, 489, 492, 525, 0, 2342 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2343 /* 800x600@60Hz */ 2344 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, 2345 968, 1056, 0, 600, 601, 605, 628, 0, 2346 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2347 /* 1024x768@60Hz */ 2348 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, 2349 1184, 1344, 0, 768, 771, 777, 806, 0, 2350 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2351 /* 1152x864@75Hz */ 2352 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 2353 1344, 1600, 0, 864, 865, 868, 900, 0, 2354 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2355 /* 1280x720@60Hz */ 2356 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344, 2357 1472, 1664, 0, 720, 723, 728, 748, 0, 2358 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2359 /* 1280x768@60Hz */ 2360 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 2361 1472, 1664, 0, 768, 771, 778, 798, 0, 2362 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2363 /* 1280x800@60Hz */ 2364 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, 2365 1480, 1680, 0, 800, 803, 809, 831, 0, 2366 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2367 /* 1280x960@60Hz */ 2368 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, 2369 1488, 1800, 0, 960, 961, 964, 1000, 0, 2370 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2371 /* 1280x1024@60Hz */ 2372 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, 2373 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, 2374 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2375 /* 1360x768@60Hz */ 2376 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, 2377 1536, 1792, 0, 768, 771, 777, 795, 0, 2378 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2379 /* 1440x1050@60Hz */ 2380 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, 2381 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, 2382 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2383 /* 1440x900@60Hz */ 2384 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, 2385 1672, 1904, 0, 900, 903, 909, 934, 0, 2386 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2387 /* 1600x1200@60Hz */ 2388 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, 2389 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, 2390 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2391 /* 1680x1050@60Hz */ 2392 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, 2393 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, 2394 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2395 /* 1792x1344@60Hz */ 2396 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, 2397 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, 2398 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2399 /* 1853x1392@60Hz */ 2400 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 2401 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, 2402 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2403 /* 1920x1080@60Hz */ 2404 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048, 2405 2248, 2576, 0, 1080, 1083, 1088, 1120, 0, 2406 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2407 /* 1920x1200@60Hz */ 2408 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 2409 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, 2410 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2411 /* 1920x1440@60Hz */ 2412 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 2413 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, 2414 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2415 /* 2560x1440@60Hz */ 2416 { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608, 2417 2640, 2720, 0, 1440, 1443, 1448, 1481, 0, 2418 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2419 /* 2560x1600@60Hz */ 2420 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 2421 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, 2422 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 2423 /* 2880x1800@60Hz */ 2424 { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928, 2425 2960, 3040, 0, 1800, 1803, 1809, 1852, 0, 2426 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2427 /* 3840x2160@60Hz */ 2428 { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888, 2429 3920, 4000, 0, 2160, 2163, 2168, 2222, 0, 2430 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2431 /* 3840x2400@60Hz */ 2432 { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888, 2433 3920, 4000, 0, 2400, 2403, 2409, 2469, 0, 2434 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, 2435 /* Terminate */ 2436 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, 2437 }; 2438 2439 /** 2440 * vmw_guess_mode_timing - Provide fake timings for a 2441 * 60Hz vrefresh mode. 2442 * 2443 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay 2444 * members filled in. 2445 */ 2446 void vmw_guess_mode_timing(struct drm_display_mode *mode) 2447 { 2448 mode->hsync_start = mode->hdisplay + 50; 2449 mode->hsync_end = mode->hsync_start + 50; 2450 mode->htotal = mode->hsync_end + 50; 2451 2452 mode->vsync_start = mode->vdisplay + 50; 2453 mode->vsync_end = mode->vsync_start + 50; 2454 mode->vtotal = mode->vsync_end + 50; 2455 2456 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6; 2457 } 2458 2459 2460 int vmw_du_connector_fill_modes(struct drm_connector *connector, 2461 uint32_t max_width, uint32_t max_height) 2462 { 2463 struct vmw_display_unit *du = vmw_connector_to_du(connector); 2464 struct drm_device *dev = connector->dev; 2465 struct vmw_private *dev_priv = vmw_priv(dev); 2466 struct drm_display_mode *mode = NULL; 2467 struct drm_display_mode *bmode; 2468 struct drm_display_mode prefmode = { DRM_MODE("preferred", 2469 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 2470 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2471 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) 2472 }; 2473 int i; 2474 u32 assumed_bpp = 4; 2475 2476 if (dev_priv->assume_16bpp) 2477 assumed_bpp = 2; 2478 2479 max_width = min(max_width, dev_priv->texture_max_width); 2480 max_height = min(max_height, dev_priv->texture_max_height); 2481 2482 /* 2483 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/ 2484 * HEIGHT registers. 2485 */ 2486 if (dev_priv->active_display_unit == vmw_du_screen_target) { 2487 max_width = min(max_width, dev_priv->stdu_max_width); 2488 max_height = min(max_height, dev_priv->stdu_max_height); 2489 } 2490 2491 /* Add preferred mode */ 2492 mode = drm_mode_duplicate(dev, &prefmode); 2493 if (!mode) 2494 return 0; 2495 mode->hdisplay = du->pref_width; 2496 mode->vdisplay = du->pref_height; 2497 vmw_guess_mode_timing(mode); 2498 drm_mode_set_name(mode); 2499 2500 if (vmw_kms_validate_mode_vram(dev_priv, 2501 mode->hdisplay * assumed_bpp, 2502 mode->vdisplay)) { 2503 drm_mode_probed_add(connector, mode); 2504 } else { 2505 drm_mode_destroy(dev, mode); 2506 mode = NULL; 2507 } 2508 2509 if (du->pref_mode) { 2510 list_del_init(&du->pref_mode->head); 2511 drm_mode_destroy(dev, du->pref_mode); 2512 } 2513 2514 /* mode might be null here, this is intended */ 2515 du->pref_mode = mode; 2516 2517 for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { 2518 bmode = &vmw_kms_connector_builtin[i]; 2519 if (bmode->hdisplay > max_width || 2520 bmode->vdisplay > max_height) 2521 continue; 2522 2523 if (!vmw_kms_validate_mode_vram(dev_priv, 2524 bmode->hdisplay * assumed_bpp, 2525 bmode->vdisplay)) 2526 continue; 2527 2528 mode = drm_mode_duplicate(dev, bmode); 2529 if (!mode) 2530 return 0; 2531 2532 drm_mode_probed_add(connector, mode); 2533 } 2534 2535 drm_connector_list_update(connector); 2536 /* Move the prefered mode first, help apps pick the right mode. */ 2537 drm_mode_sort(&connector->modes); 2538 2539 return 1; 2540 } 2541 2542 /** 2543 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl 2544 * @dev: drm device for the ioctl 2545 * @data: data pointer for the ioctl 2546 * @file_priv: drm file for the ioctl call 2547 * 2548 * Update preferred topology of display unit as per ioctl request. The topology 2549 * is expressed as array of drm_vmw_rect. 2550 * e.g. 2551 * [0 0 640 480] [640 0 800 600] [0 480 640 480] 2552 * 2553 * NOTE: 2554 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside 2555 * device limit on topology, x + w and y + h (lower right) cannot be greater 2556 * than INT_MAX. So topology beyond these limits will return with error. 2557 * 2558 * Returns: 2559 * Zero on success, negative errno on failure. 2560 */ 2561 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 2562 struct drm_file *file_priv) 2563 { 2564 struct vmw_private *dev_priv = vmw_priv(dev); 2565 struct drm_mode_config *mode_config = &dev->mode_config; 2566 struct drm_vmw_update_layout_arg *arg = 2567 (struct drm_vmw_update_layout_arg *)data; 2568 void __user *user_rects; 2569 struct drm_vmw_rect *rects; 2570 struct drm_rect *drm_rects; 2571 unsigned rects_size; 2572 int ret, i; 2573 2574 if (!arg->num_outputs) { 2575 struct drm_rect def_rect = {0, 0, 2576 VMWGFX_MIN_INITIAL_WIDTH, 2577 VMWGFX_MIN_INITIAL_HEIGHT}; 2578 vmw_du_update_layout(dev_priv, 1, &def_rect); 2579 return 0; 2580 } 2581 2582 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 2583 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), 2584 GFP_KERNEL); 2585 if (unlikely(!rects)) 2586 return -ENOMEM; 2587 2588 user_rects = (void __user *)(unsigned long)arg->rects; 2589 ret = copy_from_user(rects, user_rects, rects_size); 2590 if (unlikely(ret != 0)) { 2591 DRM_ERROR("Failed to get rects.\n"); 2592 ret = -EFAULT; 2593 goto out_free; 2594 } 2595 2596 drm_rects = (struct drm_rect *)rects; 2597 2598 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs); 2599 for (i = 0; i < arg->num_outputs; i++) { 2600 struct drm_vmw_rect curr_rect; 2601 2602 /* Verify user-space for overflow as kernel use drm_rect */ 2603 if ((rects[i].x + rects[i].w > INT_MAX) || 2604 (rects[i].y + rects[i].h > INT_MAX)) { 2605 ret = -ERANGE; 2606 goto out_free; 2607 } 2608 2609 curr_rect = rects[i]; 2610 drm_rects[i].x1 = curr_rect.x; 2611 drm_rects[i].y1 = curr_rect.y; 2612 drm_rects[i].x2 = curr_rect.x + curr_rect.w; 2613 drm_rects[i].y2 = curr_rect.y + curr_rect.h; 2614 2615 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n", 2616 drm_rects[i].x1, drm_rects[i].y1, 2617 drm_rects[i].x2, drm_rects[i].y2); 2618 2619 /* 2620 * Currently this check is limiting the topology within 2621 * mode_config->max (which actually is max texture size 2622 * supported by virtual device). This limit is here to address 2623 * window managers that create a big framebuffer for whole 2624 * topology. 2625 */ 2626 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 || 2627 drm_rects[i].x2 > mode_config->max_width || 2628 drm_rects[i].y2 > mode_config->max_height) { 2629 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n", 2630 drm_rects[i].x1, drm_rects[i].y1, 2631 drm_rects[i].x2, drm_rects[i].y2); 2632 ret = -EINVAL; 2633 goto out_free; 2634 } 2635 } 2636 2637 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); 2638 2639 if (ret == 0) 2640 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects); 2641 2642 out_free: 2643 kfree(rects); 2644 return ret; 2645 } 2646 2647 /** 2648 * vmw_kms_helper_dirty - Helper to build commands and perform actions based 2649 * on a set of cliprects and a set of display units. 2650 * 2651 * @dev_priv: Pointer to a device private structure. 2652 * @framebuffer: Pointer to the framebuffer on which to perform the actions. 2653 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL. 2654 * Cliprects are given in framebuffer coordinates. 2655 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must 2656 * be NULL. Cliprects are given in source coordinates. 2657 * @dest_x: X coordinate offset for the crtc / destination clip rects. 2658 * @dest_y: Y coordinate offset for the crtc / destination clip rects. 2659 * @num_clips: Number of cliprects in the @clips or @vclips array. 2660 * @increment: Integer with which to increment the clip counter when looping. 2661 * Used to skip a predetermined number of clip rects. 2662 * @dirty: Closure structure. See the description of struct vmw_kms_dirty. 2663 */ 2664 int vmw_kms_helper_dirty(struct vmw_private *dev_priv, 2665 struct vmw_framebuffer *framebuffer, 2666 const struct drm_clip_rect *clips, 2667 const struct drm_vmw_rect *vclips, 2668 s32 dest_x, s32 dest_y, 2669 int num_clips, 2670 int increment, 2671 struct vmw_kms_dirty *dirty) 2672 { 2673 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS]; 2674 struct drm_crtc *crtc; 2675 u32 num_units = 0; 2676 u32 i, k; 2677 2678 dirty->dev_priv = dev_priv; 2679 2680 /* If crtc is passed, no need to iterate over other display units */ 2681 if (dirty->crtc) { 2682 units[num_units++] = vmw_crtc_to_du(dirty->crtc); 2683 } else { 2684 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list, 2685 head) { 2686 struct drm_plane *plane = crtc->primary; 2687 2688 if (plane->state->fb == &framebuffer->base) 2689 units[num_units++] = vmw_crtc_to_du(crtc); 2690 } 2691 } 2692 2693 for (k = 0; k < num_units; k++) { 2694 struct vmw_display_unit *unit = units[k]; 2695 s32 crtc_x = unit->crtc.x; 2696 s32 crtc_y = unit->crtc.y; 2697 s32 crtc_width = unit->crtc.mode.hdisplay; 2698 s32 crtc_height = unit->crtc.mode.vdisplay; 2699 const struct drm_clip_rect *clips_ptr = clips; 2700 const struct drm_vmw_rect *vclips_ptr = vclips; 2701 2702 dirty->unit = unit; 2703 if (dirty->fifo_reserve_size > 0) { 2704 dirty->cmd = VMW_CMD_RESERVE(dev_priv, 2705 dirty->fifo_reserve_size); 2706 if (!dirty->cmd) 2707 return -ENOMEM; 2708 2709 memset(dirty->cmd, 0, dirty->fifo_reserve_size); 2710 } 2711 dirty->num_hits = 0; 2712 for (i = 0; i < num_clips; i++, clips_ptr += increment, 2713 vclips_ptr += increment) { 2714 s32 clip_left; 2715 s32 clip_top; 2716 2717 /* 2718 * Select clip array type. Note that integer type 2719 * in @clips is unsigned short, whereas in @vclips 2720 * it's 32-bit. 2721 */ 2722 if (clips) { 2723 dirty->fb_x = (s32) clips_ptr->x1; 2724 dirty->fb_y = (s32) clips_ptr->y1; 2725 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x - 2726 crtc_x; 2727 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y - 2728 crtc_y; 2729 } else { 2730 dirty->fb_x = vclips_ptr->x; 2731 dirty->fb_y = vclips_ptr->y; 2732 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w + 2733 dest_x - crtc_x; 2734 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h + 2735 dest_y - crtc_y; 2736 } 2737 2738 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x; 2739 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y; 2740 2741 /* Skip this clip if it's outside the crtc region */ 2742 if (dirty->unit_x1 >= crtc_width || 2743 dirty->unit_y1 >= crtc_height || 2744 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0) 2745 continue; 2746 2747 /* Clip right and bottom to crtc limits */ 2748 dirty->unit_x2 = min_t(s32, dirty->unit_x2, 2749 crtc_width); 2750 dirty->unit_y2 = min_t(s32, dirty->unit_y2, 2751 crtc_height); 2752 2753 /* Clip left and top to crtc limits */ 2754 clip_left = min_t(s32, dirty->unit_x1, 0); 2755 clip_top = min_t(s32, dirty->unit_y1, 0); 2756 dirty->unit_x1 -= clip_left; 2757 dirty->unit_y1 -= clip_top; 2758 dirty->fb_x -= clip_left; 2759 dirty->fb_y -= clip_top; 2760 2761 dirty->clip(dirty); 2762 } 2763 2764 dirty->fifo_commit(dirty); 2765 } 2766 2767 return 0; 2768 } 2769 2770 /** 2771 * vmw_kms_helper_validation_finish - Helper for post KMS command submission 2772 * cleanup and fencing 2773 * @dev_priv: Pointer to the device-private struct 2774 * @file_priv: Pointer identifying the client when user-space fencing is used 2775 * @ctx: Pointer to the validation context 2776 * @out_fence: If non-NULL, returned refcounted fence-pointer 2777 * @user_fence_rep: If non-NULL, pointer to user-space address area 2778 * in which to copy user-space fence info 2779 */ 2780 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, 2781 struct drm_file *file_priv, 2782 struct vmw_validation_context *ctx, 2783 struct vmw_fence_obj **out_fence, 2784 struct drm_vmw_fence_rep __user * 2785 user_fence_rep) 2786 { 2787 struct vmw_fence_obj *fence = NULL; 2788 uint32_t handle = 0; 2789 int ret = 0; 2790 2791 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || 2792 out_fence) 2793 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, 2794 file_priv ? &handle : NULL); 2795 vmw_validation_done(ctx, fence); 2796 if (file_priv) 2797 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), 2798 ret, user_fence_rep, fence, 2799 handle, -1); 2800 if (out_fence) 2801 *out_fence = fence; 2802 else 2803 vmw_fence_obj_unreference(&fence); 2804 } 2805 2806 /** 2807 * vmw_kms_update_proxy - Helper function to update a proxy surface from 2808 * its backing MOB. 2809 * 2810 * @res: Pointer to the surface resource 2811 * @clips: Clip rects in framebuffer (surface) space. 2812 * @num_clips: Number of clips in @clips. 2813 * @increment: Integer with which to increment the clip counter when looping. 2814 * Used to skip a predetermined number of clip rects. 2815 * 2816 * This function makes sure the proxy surface is updated from its backing MOB 2817 * using the region given by @clips. The surface resource @res and its backing 2818 * MOB needs to be reserved and validated on call. 2819 */ 2820 int vmw_kms_update_proxy(struct vmw_resource *res, 2821 const struct drm_clip_rect *clips, 2822 unsigned num_clips, 2823 int increment) 2824 { 2825 struct vmw_private *dev_priv = res->dev_priv; 2826 struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size; 2827 struct { 2828 SVGA3dCmdHeader header; 2829 SVGA3dCmdUpdateGBImage body; 2830 } *cmd; 2831 SVGA3dBox *box; 2832 size_t copy_size = 0; 2833 int i; 2834 2835 if (!clips) 2836 return 0; 2837 2838 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips); 2839 if (!cmd) 2840 return -ENOMEM; 2841 2842 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) { 2843 box = &cmd->body.box; 2844 2845 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; 2846 cmd->header.size = sizeof(cmd->body); 2847 cmd->body.image.sid = res->id; 2848 cmd->body.image.face = 0; 2849 cmd->body.image.mipmap = 0; 2850 2851 if (clips->x1 > size->width || clips->x2 > size->width || 2852 clips->y1 > size->height || clips->y2 > size->height) { 2853 DRM_ERROR("Invalid clips outsize of framebuffer.\n"); 2854 return -EINVAL; 2855 } 2856 2857 box->x = clips->x1; 2858 box->y = clips->y1; 2859 box->z = 0; 2860 box->w = clips->x2 - clips->x1; 2861 box->h = clips->y2 - clips->y1; 2862 box->d = 1; 2863 2864 copy_size += sizeof(*cmd); 2865 } 2866 2867 vmw_cmd_commit(dev_priv, copy_size); 2868 2869 return 0; 2870 } 2871 2872 /** 2873 * vmw_kms_create_implicit_placement_property - Set up the implicit placement 2874 * property. 2875 * 2876 * @dev_priv: Pointer to a device private struct. 2877 * 2878 * Sets up the implicit placement property unless it's already set up. 2879 */ 2880 void 2881 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv) 2882 { 2883 if (dev_priv->implicit_placement_property) 2884 return; 2885 2886 dev_priv->implicit_placement_property = 2887 drm_property_create_range(&dev_priv->drm, 2888 DRM_MODE_PROP_IMMUTABLE, 2889 "implicit_placement", 0, 1); 2890 } 2891 2892 /** 2893 * vmw_kms_suspend - Save modesetting state and turn modesetting off. 2894 * 2895 * @dev: Pointer to the drm device 2896 * Return: 0 on success. Negative error code on failure. 2897 */ 2898 int vmw_kms_suspend(struct drm_device *dev) 2899 { 2900 struct vmw_private *dev_priv = vmw_priv(dev); 2901 2902 dev_priv->suspend_state = drm_atomic_helper_suspend(dev); 2903 if (IS_ERR(dev_priv->suspend_state)) { 2904 int ret = PTR_ERR(dev_priv->suspend_state); 2905 2906 DRM_ERROR("Failed kms suspend: %d\n", ret); 2907 dev_priv->suspend_state = NULL; 2908 2909 return ret; 2910 } 2911 2912 return 0; 2913 } 2914 2915 2916 /** 2917 * vmw_kms_resume - Re-enable modesetting and restore state 2918 * 2919 * @dev: Pointer to the drm device 2920 * Return: 0 on success. Negative error code on failure. 2921 * 2922 * State is resumed from a previous vmw_kms_suspend(). It's illegal 2923 * to call this function without a previous vmw_kms_suspend(). 2924 */ 2925 int vmw_kms_resume(struct drm_device *dev) 2926 { 2927 struct vmw_private *dev_priv = vmw_priv(dev); 2928 int ret; 2929 2930 if (WARN_ON(!dev_priv->suspend_state)) 2931 return 0; 2932 2933 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state); 2934 dev_priv->suspend_state = NULL; 2935 2936 return ret; 2937 } 2938 2939 /** 2940 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost 2941 * 2942 * @dev: Pointer to the drm device 2943 */ 2944 void vmw_kms_lost_device(struct drm_device *dev) 2945 { 2946 drm_atomic_helper_shutdown(dev); 2947 } 2948 2949 /** 2950 * vmw_du_helper_plane_update - Helper to do plane update on a display unit. 2951 * @update: The closure structure. 2952 * 2953 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane 2954 * update on display unit. 2955 * 2956 * Return: 0 on success or a negative error code on failure. 2957 */ 2958 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update) 2959 { 2960 struct drm_plane_state *state = update->plane->state; 2961 struct drm_plane_state *old_state = update->old_state; 2962 struct drm_atomic_helper_damage_iter iter; 2963 struct drm_rect clip; 2964 struct drm_rect bb; 2965 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0); 2966 uint32_t reserved_size = 0; 2967 uint32_t submit_size = 0; 2968 uint32_t curr_size = 0; 2969 uint32_t num_hits = 0; 2970 void *cmd_start; 2971 char *cmd_next; 2972 int ret; 2973 2974 /* 2975 * Iterate in advance to check if really need plane update and find the 2976 * number of clips that actually are in plane src for fifo allocation. 2977 */ 2978 drm_atomic_helper_damage_iter_init(&iter, old_state, state); 2979 drm_atomic_for_each_plane_damage(&iter, &clip) 2980 num_hits++; 2981 2982 if (num_hits == 0) 2983 return 0; 2984 2985 if (update->vfb->bo) { 2986 struct vmw_framebuffer_bo *vfbbo = 2987 container_of(update->vfb, typeof(*vfbbo), base); 2988 2989 /* 2990 * For screen targets we want a mappable bo, for everything else we want 2991 * accelerated i.e. host backed (vram or gmr) bo. If the display unit 2992 * is not screen target then mob's shouldn't be available. 2993 */ 2994 if (update->dev_priv->active_display_unit == vmw_du_screen_target) { 2995 vmw_bo_placement_set(vfbbo->buffer, 2996 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR, 2997 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR); 2998 } else { 2999 WARN_ON(update->dev_priv->has_mob); 3000 vmw_bo_placement_set_default_accelerated(vfbbo->buffer); 3001 } 3002 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer); 3003 } else { 3004 struct vmw_framebuffer_surface *vfbs = 3005 container_of(update->vfb, typeof(*vfbs), base); 3006 3007 ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res, 3008 0, VMW_RES_DIRTY_NONE, NULL, 3009 NULL); 3010 } 3011 3012 if (ret) 3013 return ret; 3014 3015 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr); 3016 if (ret) 3017 goto out_unref; 3018 3019 reserved_size = update->calc_fifo_size(update, num_hits); 3020 cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size); 3021 if (!cmd_start) { 3022 ret = -ENOMEM; 3023 goto out_revert; 3024 } 3025 3026 cmd_next = cmd_start; 3027 3028 if (update->post_prepare) { 3029 curr_size = update->post_prepare(update, cmd_next); 3030 cmd_next += curr_size; 3031 submit_size += curr_size; 3032 } 3033 3034 if (update->pre_clip) { 3035 curr_size = update->pre_clip(update, cmd_next, num_hits); 3036 cmd_next += curr_size; 3037 submit_size += curr_size; 3038 } 3039 3040 bb.x1 = INT_MAX; 3041 bb.y1 = INT_MAX; 3042 bb.x2 = INT_MIN; 3043 bb.y2 = INT_MIN; 3044 3045 drm_atomic_helper_damage_iter_init(&iter, old_state, state); 3046 drm_atomic_for_each_plane_damage(&iter, &clip) { 3047 uint32_t fb_x = clip.x1; 3048 uint32_t fb_y = clip.y1; 3049 3050 vmw_du_translate_to_crtc(state, &clip); 3051 if (update->clip) { 3052 curr_size = update->clip(update, cmd_next, &clip, fb_x, 3053 fb_y); 3054 cmd_next += curr_size; 3055 submit_size += curr_size; 3056 } 3057 bb.x1 = min_t(int, bb.x1, clip.x1); 3058 bb.y1 = min_t(int, bb.y1, clip.y1); 3059 bb.x2 = max_t(int, bb.x2, clip.x2); 3060 bb.y2 = max_t(int, bb.y2, clip.y2); 3061 } 3062 3063 curr_size = update->post_clip(update, cmd_next, &bb); 3064 submit_size += curr_size; 3065 3066 if (reserved_size < submit_size) 3067 submit_size = 0; 3068 3069 vmw_cmd_commit(update->dev_priv, submit_size); 3070 3071 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx, 3072 update->out_fence, NULL); 3073 return ret; 3074 3075 out_revert: 3076 vmw_validation_revert(&val_ctx); 3077 3078 out_unref: 3079 vmw_validation_unref_lists(&val_ctx); 3080 return ret; 3081 } 3082