1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 #include <linux/module.h> 28 #include <linux/console.h> 29 #include <linux/dma-mapping.h> 30 31 #include <drm/drmP.h> 32 #include "vmwgfx_drv.h" 33 #include "vmwgfx_binding.h" 34 #include "ttm_object.h" 35 #include <drm/ttm/ttm_placement.h> 36 #include <drm/ttm/ttm_bo_driver.h> 37 #include <drm/ttm/ttm_module.h> 38 39 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 40 #define VMWGFX_CHIP_SVGAII 0 41 #define VMW_FB_RESERVATION 0 42 43 #define VMW_MIN_INITIAL_WIDTH 800 44 #define VMW_MIN_INITIAL_HEIGHT 600 45 46 #ifndef VMWGFX_GIT_VERSION 47 #define VMWGFX_GIT_VERSION "Unknown" 48 #endif 49 50 #define VMWGFX_REPO "In Tree" 51 52 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) 53 54 55 /** 56 * Fully encoded drm commands. Might move to vmw_drm.h 57 */ 58 59 #define DRM_IOCTL_VMW_GET_PARAM \ 60 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ 61 struct drm_vmw_getparam_arg) 62 #define DRM_IOCTL_VMW_ALLOC_DMABUF \ 63 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ 64 union drm_vmw_alloc_dmabuf_arg) 65 #define DRM_IOCTL_VMW_UNREF_DMABUF \ 66 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ 67 struct drm_vmw_unref_dmabuf_arg) 68 #define DRM_IOCTL_VMW_CURSOR_BYPASS \ 69 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ 70 struct drm_vmw_cursor_bypass_arg) 71 72 #define DRM_IOCTL_VMW_CONTROL_STREAM \ 73 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ 74 struct drm_vmw_control_stream_arg) 75 #define DRM_IOCTL_VMW_CLAIM_STREAM \ 76 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ 77 struct drm_vmw_stream_arg) 78 #define DRM_IOCTL_VMW_UNREF_STREAM \ 79 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ 80 struct drm_vmw_stream_arg) 81 82 #define DRM_IOCTL_VMW_CREATE_CONTEXT \ 83 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ 84 struct drm_vmw_context_arg) 85 #define DRM_IOCTL_VMW_UNREF_CONTEXT \ 86 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ 87 struct drm_vmw_context_arg) 88 #define DRM_IOCTL_VMW_CREATE_SURFACE \ 89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ 90 union drm_vmw_surface_create_arg) 91 #define DRM_IOCTL_VMW_UNREF_SURFACE \ 92 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ 93 struct drm_vmw_surface_arg) 94 #define DRM_IOCTL_VMW_REF_SURFACE \ 95 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ 96 union drm_vmw_surface_reference_arg) 97 #define DRM_IOCTL_VMW_EXECBUF \ 98 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ 99 struct drm_vmw_execbuf_arg) 100 #define DRM_IOCTL_VMW_GET_3D_CAP \ 101 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ 102 struct drm_vmw_get_3d_cap_arg) 103 #define DRM_IOCTL_VMW_FENCE_WAIT \ 104 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ 105 struct drm_vmw_fence_wait_arg) 106 #define DRM_IOCTL_VMW_FENCE_SIGNALED \ 107 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ 108 struct drm_vmw_fence_signaled_arg) 109 #define DRM_IOCTL_VMW_FENCE_UNREF \ 110 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ 111 struct drm_vmw_fence_arg) 112 #define DRM_IOCTL_VMW_FENCE_EVENT \ 113 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ 114 struct drm_vmw_fence_event_arg) 115 #define DRM_IOCTL_VMW_PRESENT \ 116 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ 117 struct drm_vmw_present_arg) 118 #define DRM_IOCTL_VMW_PRESENT_READBACK \ 119 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ 120 struct drm_vmw_present_readback_arg) 121 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ 122 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ 123 struct drm_vmw_update_layout_arg) 124 #define DRM_IOCTL_VMW_CREATE_SHADER \ 125 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ 126 struct drm_vmw_shader_create_arg) 127 #define DRM_IOCTL_VMW_UNREF_SHADER \ 128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ 129 struct drm_vmw_shader_arg) 130 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ 131 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ 132 union drm_vmw_gb_surface_create_arg) 133 #define DRM_IOCTL_VMW_GB_SURFACE_REF \ 134 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ 135 union drm_vmw_gb_surface_reference_arg) 136 #define DRM_IOCTL_VMW_SYNCCPU \ 137 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ 138 struct drm_vmw_synccpu_arg) 139 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \ 140 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \ 141 struct drm_vmw_context_arg) 142 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \ 143 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \ 144 union drm_vmw_gb_surface_create_ext_arg) 145 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \ 146 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \ 147 union drm_vmw_gb_surface_reference_ext_arg) 148 149 /** 150 * The core DRM version of this macro doesn't account for 151 * DRM_COMMAND_BASE. 152 */ 153 154 #define VMW_IOCTL_DEF(ioctl, func, flags) \ 155 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func} 156 157 /** 158 * Ioctl definitions. 159 */ 160 161 static const struct drm_ioctl_desc vmw_ioctls[] = { 162 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, 163 DRM_AUTH | DRM_RENDER_ALLOW), 164 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, 165 DRM_AUTH | DRM_RENDER_ALLOW), 166 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, 167 DRM_RENDER_ALLOW), 168 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, 169 vmw_kms_cursor_bypass_ioctl, 170 DRM_MASTER), 171 172 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, 173 DRM_MASTER), 174 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, 175 DRM_MASTER), 176 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, 177 DRM_MASTER), 178 179 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 180 DRM_AUTH | DRM_RENDER_ALLOW), 181 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, 182 DRM_RENDER_ALLOW), 183 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, 184 DRM_AUTH | DRM_RENDER_ALLOW), 185 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, 186 DRM_RENDER_ALLOW), 187 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, 188 DRM_AUTH | DRM_RENDER_ALLOW), 189 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH | 190 DRM_RENDER_ALLOW), 191 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, 192 DRM_RENDER_ALLOW), 193 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, 194 vmw_fence_obj_signaled_ioctl, 195 DRM_RENDER_ALLOW), 196 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, 197 DRM_RENDER_ALLOW), 198 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, 199 DRM_AUTH | DRM_RENDER_ALLOW), 200 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, 201 DRM_AUTH | DRM_RENDER_ALLOW), 202 203 /* these allow direct access to the framebuffers mark as master only */ 204 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, 205 DRM_MASTER | DRM_AUTH), 206 VMW_IOCTL_DEF(VMW_PRESENT_READBACK, 207 vmw_present_readback_ioctl, 208 DRM_MASTER | DRM_AUTH), 209 /* 210 * The permissions of the below ioctl are overridden in 211 * vmw_generic_ioctl(). We require either 212 * DRM_MASTER or capable(CAP_SYS_ADMIN). 213 */ 214 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, 215 vmw_kms_update_layout_ioctl, 216 DRM_RENDER_ALLOW), 217 VMW_IOCTL_DEF(VMW_CREATE_SHADER, 218 vmw_shader_define_ioctl, 219 DRM_AUTH | DRM_RENDER_ALLOW), 220 VMW_IOCTL_DEF(VMW_UNREF_SHADER, 221 vmw_shader_destroy_ioctl, 222 DRM_RENDER_ALLOW), 223 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, 224 vmw_gb_surface_define_ioctl, 225 DRM_AUTH | DRM_RENDER_ALLOW), 226 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, 227 vmw_gb_surface_reference_ioctl, 228 DRM_AUTH | DRM_RENDER_ALLOW), 229 VMW_IOCTL_DEF(VMW_SYNCCPU, 230 vmw_user_bo_synccpu_ioctl, 231 DRM_RENDER_ALLOW), 232 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, 233 vmw_extended_context_define_ioctl, 234 DRM_AUTH | DRM_RENDER_ALLOW), 235 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT, 236 vmw_gb_surface_define_ext_ioctl, 237 DRM_AUTH | DRM_RENDER_ALLOW), 238 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT, 239 vmw_gb_surface_reference_ext_ioctl, 240 DRM_AUTH | DRM_RENDER_ALLOW), 241 }; 242 243 static const struct pci_device_id vmw_pci_id_list[] = { 244 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, 245 {0, 0, 0} 246 }; 247 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); 248 249 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); 250 static int vmw_force_iommu; 251 static int vmw_restrict_iommu; 252 static int vmw_force_coherent; 253 static int vmw_restrict_dma_mask; 254 static int vmw_assume_16bpp; 255 256 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 257 static void vmw_master_init(struct vmw_master *); 258 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 259 void *ptr); 260 261 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 262 module_param_named(enable_fbdev, enable_fbdev, int, 0600); 263 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); 264 module_param_named(force_dma_api, vmw_force_iommu, int, 0600); 265 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); 266 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); 267 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); 268 module_param_named(force_coherent, vmw_force_coherent, int, 0600); 269 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); 270 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); 271 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); 272 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); 273 274 275 static void vmw_print_capabilities2(uint32_t capabilities2) 276 { 277 DRM_INFO("Capabilities2:\n"); 278 if (capabilities2 & SVGA_CAP2_GROW_OTABLE) 279 DRM_INFO(" Grow oTable.\n"); 280 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY) 281 DRM_INFO(" IntraSurface copy.\n"); 282 } 283 284 static void vmw_print_capabilities(uint32_t capabilities) 285 { 286 DRM_INFO("Capabilities:\n"); 287 if (capabilities & SVGA_CAP_RECT_COPY) 288 DRM_INFO(" Rect copy.\n"); 289 if (capabilities & SVGA_CAP_CURSOR) 290 DRM_INFO(" Cursor.\n"); 291 if (capabilities & SVGA_CAP_CURSOR_BYPASS) 292 DRM_INFO(" Cursor bypass.\n"); 293 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) 294 DRM_INFO(" Cursor bypass 2.\n"); 295 if (capabilities & SVGA_CAP_8BIT_EMULATION) 296 DRM_INFO(" 8bit emulation.\n"); 297 if (capabilities & SVGA_CAP_ALPHA_CURSOR) 298 DRM_INFO(" Alpha cursor.\n"); 299 if (capabilities & SVGA_CAP_3D) 300 DRM_INFO(" 3D.\n"); 301 if (capabilities & SVGA_CAP_EXTENDED_FIFO) 302 DRM_INFO(" Extended Fifo.\n"); 303 if (capabilities & SVGA_CAP_MULTIMON) 304 DRM_INFO(" Multimon.\n"); 305 if (capabilities & SVGA_CAP_PITCHLOCK) 306 DRM_INFO(" Pitchlock.\n"); 307 if (capabilities & SVGA_CAP_IRQMASK) 308 DRM_INFO(" Irq mask.\n"); 309 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) 310 DRM_INFO(" Display Topology.\n"); 311 if (capabilities & SVGA_CAP_GMR) 312 DRM_INFO(" GMR.\n"); 313 if (capabilities & SVGA_CAP_TRACES) 314 DRM_INFO(" Traces.\n"); 315 if (capabilities & SVGA_CAP_GMR2) 316 DRM_INFO(" GMR2.\n"); 317 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) 318 DRM_INFO(" Screen Object 2.\n"); 319 if (capabilities & SVGA_CAP_COMMAND_BUFFERS) 320 DRM_INFO(" Command Buffers.\n"); 321 if (capabilities & SVGA_CAP_CMD_BUFFERS_2) 322 DRM_INFO(" Command Buffers 2.\n"); 323 if (capabilities & SVGA_CAP_GBOBJECTS) 324 DRM_INFO(" Guest Backed Resources.\n"); 325 if (capabilities & SVGA_CAP_DX) 326 DRM_INFO(" DX Features.\n"); 327 if (capabilities & SVGA_CAP_HP_CMD_QUEUE) 328 DRM_INFO(" HP Command Queue.\n"); 329 } 330 331 /** 332 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result 333 * 334 * @dev_priv: A device private structure. 335 * 336 * This function creates a small buffer object that holds the query 337 * result for dummy queries emitted as query barriers. 338 * The function will then map the first page and initialize a pending 339 * occlusion query result structure, Finally it will unmap the buffer. 340 * No interruptible waits are done within this function. 341 * 342 * Returns an error if bo creation or initialization fails. 343 */ 344 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) 345 { 346 int ret; 347 struct vmw_buffer_object *vbo; 348 struct ttm_bo_kmap_obj map; 349 volatile SVGA3dQueryResult *result; 350 bool dummy; 351 352 /* 353 * Create the vbo as pinned, so that a tryreserve will 354 * immediately succeed. This is because we're the only 355 * user of the bo currently. 356 */ 357 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); 358 if (!vbo) 359 return -ENOMEM; 360 361 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE, 362 &vmw_sys_ne_placement, false, 363 &vmw_bo_bo_free); 364 if (unlikely(ret != 0)) 365 return ret; 366 367 ret = ttm_bo_reserve(&vbo->base, false, true, NULL); 368 BUG_ON(ret != 0); 369 vmw_bo_pin_reserved(vbo, true); 370 371 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); 372 if (likely(ret == 0)) { 373 result = ttm_kmap_obj_virtual(&map, &dummy); 374 result->totalSize = sizeof(*result); 375 result->state = SVGA3D_QUERYSTATE_PENDING; 376 result->result32 = 0xff; 377 ttm_bo_kunmap(&map); 378 } 379 vmw_bo_pin_reserved(vbo, false); 380 ttm_bo_unreserve(&vbo->base); 381 382 if (unlikely(ret != 0)) { 383 DRM_ERROR("Dummy query buffer map failed.\n"); 384 vmw_bo_unreference(&vbo); 385 } else 386 dev_priv->dummy_query_bo = vbo; 387 388 return ret; 389 } 390 391 /** 392 * vmw_request_device_late - Perform late device setup 393 * 394 * @dev_priv: Pointer to device private. 395 * 396 * This function performs setup of otables and enables large command 397 * buffer submission. These tasks are split out to a separate function 398 * because it reverts vmw_release_device_early and is intended to be used 399 * by an error path in the hibernation code. 400 */ 401 static int vmw_request_device_late(struct vmw_private *dev_priv) 402 { 403 int ret; 404 405 if (dev_priv->has_mob) { 406 ret = vmw_otables_setup(dev_priv); 407 if (unlikely(ret != 0)) { 408 DRM_ERROR("Unable to initialize " 409 "guest Memory OBjects.\n"); 410 return ret; 411 } 412 } 413 414 if (dev_priv->cman) { 415 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 416 256*4096, 2*4096); 417 if (ret) { 418 struct vmw_cmdbuf_man *man = dev_priv->cman; 419 420 dev_priv->cman = NULL; 421 vmw_cmdbuf_man_destroy(man); 422 } 423 } 424 425 return 0; 426 } 427 428 static int vmw_request_device(struct vmw_private *dev_priv) 429 { 430 int ret; 431 432 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); 433 if (unlikely(ret != 0)) { 434 DRM_ERROR("Unable to initialize FIFO.\n"); 435 return ret; 436 } 437 vmw_fence_fifo_up(dev_priv->fman); 438 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); 439 if (IS_ERR(dev_priv->cman)) { 440 dev_priv->cman = NULL; 441 dev_priv->has_dx = false; 442 } 443 444 ret = vmw_request_device_late(dev_priv); 445 if (ret) 446 goto out_no_mob; 447 448 ret = vmw_dummy_query_bo_create(dev_priv); 449 if (unlikely(ret != 0)) 450 goto out_no_query_bo; 451 452 return 0; 453 454 out_no_query_bo: 455 if (dev_priv->cman) 456 vmw_cmdbuf_remove_pool(dev_priv->cman); 457 if (dev_priv->has_mob) { 458 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); 459 vmw_otables_takedown(dev_priv); 460 } 461 if (dev_priv->cman) 462 vmw_cmdbuf_man_destroy(dev_priv->cman); 463 out_no_mob: 464 vmw_fence_fifo_down(dev_priv->fman); 465 vmw_fifo_release(dev_priv, &dev_priv->fifo); 466 return ret; 467 } 468 469 /** 470 * vmw_release_device_early - Early part of fifo takedown. 471 * 472 * @dev_priv: Pointer to device private struct. 473 * 474 * This is the first part of command submission takedown, to be called before 475 * buffer management is taken down. 476 */ 477 static void vmw_release_device_early(struct vmw_private *dev_priv) 478 { 479 /* 480 * Previous destructions should've released 481 * the pinned bo. 482 */ 483 484 BUG_ON(dev_priv->pinned_bo != NULL); 485 486 vmw_bo_unreference(&dev_priv->dummy_query_bo); 487 if (dev_priv->cman) 488 vmw_cmdbuf_remove_pool(dev_priv->cman); 489 490 if (dev_priv->has_mob) { 491 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); 492 vmw_otables_takedown(dev_priv); 493 } 494 } 495 496 /** 497 * vmw_release_device_late - Late part of fifo takedown. 498 * 499 * @dev_priv: Pointer to device private struct. 500 * 501 * This is the last part of the command submission takedown, to be called when 502 * command submission is no longer needed. It may wait on pending fences. 503 */ 504 static void vmw_release_device_late(struct vmw_private *dev_priv) 505 { 506 vmw_fence_fifo_down(dev_priv->fman); 507 if (dev_priv->cman) 508 vmw_cmdbuf_man_destroy(dev_priv->cman); 509 510 vmw_fifo_release(dev_priv, &dev_priv->fifo); 511 } 512 513 /** 514 * Sets the initial_[width|height] fields on the given vmw_private. 515 * 516 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then 517 * clamping the value to fb_max_[width|height] fields and the 518 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 519 * If the values appear to be invalid, set them to 520 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 521 */ 522 static void vmw_get_initial_size(struct vmw_private *dev_priv) 523 { 524 uint32_t width; 525 uint32_t height; 526 527 width = vmw_read(dev_priv, SVGA_REG_WIDTH); 528 height = vmw_read(dev_priv, SVGA_REG_HEIGHT); 529 530 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); 531 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); 532 533 if (width > dev_priv->fb_max_width || 534 height > dev_priv->fb_max_height) { 535 536 /* 537 * This is a host error and shouldn't occur. 538 */ 539 540 width = VMW_MIN_INITIAL_WIDTH; 541 height = VMW_MIN_INITIAL_HEIGHT; 542 } 543 544 dev_priv->initial_width = width; 545 dev_priv->initial_height = height; 546 } 547 548 /** 549 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this 550 * system. 551 * 552 * @dev_priv: Pointer to a struct vmw_private 553 * 554 * This functions tries to determine what actions need to be taken by the 555 * driver to make system pages visible to the device. 556 * If this function decides that DMA is not possible, it returns -EINVAL. 557 * The driver may then try to disable features of the device that require 558 * DMA. 559 */ 560 static int vmw_dma_select_mode(struct vmw_private *dev_priv) 561 { 562 static const char *names[vmw_dma_map_max] = { 563 [vmw_dma_phys] = "Using physical TTM page addresses.", 564 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", 565 [vmw_dma_map_populate] = "Caching DMA mappings.", 566 [vmw_dma_map_bind] = "Giving up DMA mappings early."}; 567 568 if (vmw_force_coherent) 569 dev_priv->map_mode = vmw_dma_alloc_coherent; 570 else if (vmw_restrict_iommu) 571 dev_priv->map_mode = vmw_dma_map_bind; 572 else 573 dev_priv->map_mode = vmw_dma_map_populate; 574 575 /* No TTM coherent page pool? FIXME: Ask TTM instead! */ 576 if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && 577 (dev_priv->map_mode == vmw_dma_alloc_coherent)) 578 return -EINVAL; 579 580 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 581 return 0; 582 } 583 584 /** 585 * vmw_dma_masks - set required page- and dma masks 586 * 587 * @dev: Pointer to struct drm-device 588 * 589 * With 32-bit we can only handle 32 bit PFNs. Optionally set that 590 * restriction also for 64-bit systems. 591 */ 592 static int vmw_dma_masks(struct vmw_private *dev_priv) 593 { 594 struct drm_device *dev = dev_priv->dev; 595 int ret = 0; 596 597 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); 598 if (dev_priv->map_mode != vmw_dma_phys && 599 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { 600 DRM_INFO("Restricting DMA addresses to 44 bits.\n"); 601 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); 602 } 603 604 return ret; 605 } 606 607 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 608 { 609 struct vmw_private *dev_priv; 610 int ret; 611 uint32_t svga_id; 612 enum vmw_res_type i; 613 bool refuse_dma = false; 614 char host_log[100] = {0}; 615 616 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 617 if (unlikely(!dev_priv)) { 618 DRM_ERROR("Failed allocating a device private struct.\n"); 619 return -ENOMEM; 620 } 621 622 pci_set_master(dev->pdev); 623 624 dev_priv->dev = dev; 625 dev_priv->vmw_chipset = chipset; 626 dev_priv->last_read_seqno = (uint32_t) -100; 627 mutex_init(&dev_priv->cmdbuf_mutex); 628 mutex_init(&dev_priv->release_mutex); 629 mutex_init(&dev_priv->binding_mutex); 630 mutex_init(&dev_priv->global_kms_state_mutex); 631 ttm_lock_init(&dev_priv->reservation_sem); 632 spin_lock_init(&dev_priv->resource_lock); 633 spin_lock_init(&dev_priv->hw_lock); 634 spin_lock_init(&dev_priv->waiter_lock); 635 spin_lock_init(&dev_priv->cap_lock); 636 spin_lock_init(&dev_priv->svga_lock); 637 spin_lock_init(&dev_priv->cursor_lock); 638 639 for (i = vmw_res_context; i < vmw_res_max; ++i) { 640 idr_init(&dev_priv->res_idr[i]); 641 INIT_LIST_HEAD(&dev_priv->res_lru[i]); 642 } 643 644 init_waitqueue_head(&dev_priv->fence_queue); 645 init_waitqueue_head(&dev_priv->fifo_queue); 646 dev_priv->fence_queue_waiters = 0; 647 dev_priv->fifo_queue_waiters = 0; 648 649 dev_priv->used_memory_size = 0; 650 651 dev_priv->io_start = pci_resource_start(dev->pdev, 0); 652 dev_priv->vram_start = pci_resource_start(dev->pdev, 1); 653 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 654 655 dev_priv->assume_16bpp = !!vmw_assume_16bpp; 656 657 dev_priv->enable_fb = enable_fbdev; 658 659 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 660 svga_id = vmw_read(dev_priv, SVGA_REG_ID); 661 if (svga_id != SVGA_ID_2) { 662 ret = -ENOSYS; 663 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); 664 goto out_err0; 665 } 666 667 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 668 669 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) { 670 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2); 671 } 672 673 674 ret = vmw_dma_select_mode(dev_priv); 675 if (unlikely(ret != 0)) { 676 DRM_INFO("Restricting capabilities due to IOMMU setup.\n"); 677 refuse_dma = true; 678 } 679 680 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); 681 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); 682 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); 683 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); 684 685 vmw_get_initial_size(dev_priv); 686 687 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 688 dev_priv->max_gmr_ids = 689 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); 690 dev_priv->max_gmr_pages = 691 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); 692 dev_priv->memory_size = 693 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); 694 dev_priv->memory_size -= dev_priv->vram_size; 695 } else { 696 /* 697 * An arbitrary limit of 512MiB on surface 698 * memory. But all HWV8 hardware supports GMR2. 699 */ 700 dev_priv->memory_size = 512*1024*1024; 701 } 702 dev_priv->max_mob_pages = 0; 703 dev_priv->max_mob_size = 0; 704 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 705 uint64_t mem_size = 706 vmw_read(dev_priv, 707 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); 708 709 /* 710 * Workaround for low memory 2D VMs to compensate for the 711 * allocation taken by fbdev 712 */ 713 if (!(dev_priv->capabilities & SVGA_CAP_3D)) 714 mem_size *= 3; 715 716 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; 717 dev_priv->prim_bb_mem = 718 vmw_read(dev_priv, 719 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); 720 dev_priv->max_mob_size = 721 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); 722 dev_priv->stdu_max_width = 723 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); 724 dev_priv->stdu_max_height = 725 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); 726 727 vmw_write(dev_priv, SVGA_REG_DEV_CAP, 728 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); 729 dev_priv->texture_max_width = vmw_read(dev_priv, 730 SVGA_REG_DEV_CAP); 731 vmw_write(dev_priv, SVGA_REG_DEV_CAP, 732 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); 733 dev_priv->texture_max_height = vmw_read(dev_priv, 734 SVGA_REG_DEV_CAP); 735 } else { 736 dev_priv->texture_max_width = 8192; 737 dev_priv->texture_max_height = 8192; 738 dev_priv->prim_bb_mem = dev_priv->vram_size; 739 } 740 741 vmw_print_capabilities(dev_priv->capabilities); 742 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) 743 vmw_print_capabilities2(dev_priv->capabilities2); 744 745 ret = vmw_dma_masks(dev_priv); 746 if (unlikely(ret != 0)) 747 goto out_err0; 748 749 dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK, 750 SCATTERLIST_MAX_SEGMENT)); 751 752 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 753 DRM_INFO("Max GMR ids is %u\n", 754 (unsigned)dev_priv->max_gmr_ids); 755 DRM_INFO("Max number of GMR pages is %u\n", 756 (unsigned)dev_priv->max_gmr_pages); 757 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", 758 (unsigned)dev_priv->memory_size / 1024); 759 } 760 DRM_INFO("Maximum display memory size is %u kiB\n", 761 dev_priv->prim_bb_mem / 1024); 762 DRM_INFO("VRAM at 0x%08x size is %u kiB\n", 763 dev_priv->vram_start, dev_priv->vram_size / 1024); 764 DRM_INFO("MMIO at 0x%08x size is %u kiB\n", 765 dev_priv->mmio_start, dev_priv->mmio_size / 1024); 766 767 vmw_master_init(&dev_priv->fbdev_master); 768 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 769 dev_priv->active_master = &dev_priv->fbdev_master; 770 771 dev_priv->mmio_virt = memremap(dev_priv->mmio_start, 772 dev_priv->mmio_size, MEMREMAP_WB); 773 774 if (unlikely(dev_priv->mmio_virt == NULL)) { 775 ret = -ENOMEM; 776 DRM_ERROR("Failed mapping MMIO.\n"); 777 goto out_err0; 778 } 779 780 /* Need mmio memory to check for fifo pitchlock cap. */ 781 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && 782 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && 783 !vmw_fifo_have_pitchlock(dev_priv)) { 784 ret = -ENOSYS; 785 DRM_ERROR("Hardware has no pitchlock\n"); 786 goto out_err4; 787 } 788 789 dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12, 790 &vmw_prime_dmabuf_ops); 791 792 if (unlikely(dev_priv->tdev == NULL)) { 793 DRM_ERROR("Unable to initialize TTM object management.\n"); 794 ret = -ENOMEM; 795 goto out_err4; 796 } 797 798 dev->dev_private = dev_priv; 799 800 ret = pci_request_regions(dev->pdev, "vmwgfx probe"); 801 dev_priv->stealth = (ret != 0); 802 if (dev_priv->stealth) { 803 /** 804 * Request at least the mmio PCI resource. 805 */ 806 807 DRM_INFO("It appears like vesafb is loaded. " 808 "Ignore above error if any.\n"); 809 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); 810 if (unlikely(ret != 0)) { 811 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); 812 goto out_no_device; 813 } 814 } 815 816 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 817 ret = vmw_irq_install(dev, dev->pdev->irq); 818 if (ret != 0) { 819 DRM_ERROR("Failed installing irq: %d\n", ret); 820 goto out_no_irq; 821 } 822 } 823 824 dev_priv->fman = vmw_fence_manager_init(dev_priv); 825 if (unlikely(dev_priv->fman == NULL)) { 826 ret = -ENOMEM; 827 goto out_no_fman; 828 } 829 830 drm_vma_offset_manager_init(&dev_priv->vma_manager, 831 DRM_FILE_PAGE_OFFSET_START, 832 DRM_FILE_PAGE_OFFSET_SIZE); 833 ret = ttm_bo_device_init(&dev_priv->bdev, 834 &vmw_bo_driver, 835 dev->anon_inode->i_mapping, 836 &dev_priv->vma_manager, 837 false); 838 if (unlikely(ret != 0)) { 839 DRM_ERROR("Failed initializing TTM buffer object driver.\n"); 840 goto out_no_bdev; 841 } 842 843 /* 844 * Enable VRAM, but initially don't use it until SVGA is enabled and 845 * unhidden. 846 */ 847 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, 848 (dev_priv->vram_size >> PAGE_SHIFT)); 849 if (unlikely(ret != 0)) { 850 DRM_ERROR("Failed initializing memory manager for VRAM.\n"); 851 goto out_no_vram; 852 } 853 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; 854 855 dev_priv->has_gmr = true; 856 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || 857 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, 858 VMW_PL_GMR) != 0) { 859 DRM_INFO("No GMR memory available. " 860 "Graphics memory resources are very limited.\n"); 861 dev_priv->has_gmr = false; 862 } 863 864 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 865 dev_priv->has_mob = true; 866 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, 867 VMW_PL_MOB) != 0) { 868 DRM_INFO("No MOB memory available. " 869 "3D will be disabled.\n"); 870 dev_priv->has_mob = false; 871 } 872 } 873 874 if (dev_priv->has_mob) { 875 spin_lock(&dev_priv->cap_lock); 876 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT); 877 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP); 878 spin_unlock(&dev_priv->cap_lock); 879 } 880 881 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN); 882 ret = vmw_kms_init(dev_priv); 883 if (unlikely(ret != 0)) 884 goto out_no_kms; 885 vmw_overlay_init(dev_priv); 886 887 ret = vmw_request_device(dev_priv); 888 if (ret) 889 goto out_no_fifo; 890 891 if (dev_priv->has_dx) { 892 /* 893 * SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 894 * support 895 */ 896 if ((dev_priv->capabilities2 & SVGA_CAP2_DX2) != 0) { 897 vmw_write(dev_priv, SVGA_REG_DEV_CAP, 898 SVGA3D_DEVCAP_SM41); 899 dev_priv->has_sm4_1 = vmw_read(dev_priv, 900 SVGA_REG_DEV_CAP); 901 } 902 } 903 904 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no."); 905 DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC) 906 ? "yes." : "no."); 907 DRM_INFO("SM4_1: %s\n", dev_priv->has_sm4_1 ? "yes." : "no."); 908 909 snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s", 910 VMWGFX_REPO, VMWGFX_GIT_VERSION); 911 vmw_host_log(host_log); 912 913 memset(host_log, 0, sizeof(host_log)); 914 snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d", 915 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR, 916 VMWGFX_DRIVER_PATCHLEVEL); 917 vmw_host_log(host_log); 918 919 if (dev_priv->enable_fb) { 920 vmw_fifo_resource_inc(dev_priv); 921 vmw_svga_enable(dev_priv); 922 vmw_fb_init(dev_priv); 923 } 924 925 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 926 register_pm_notifier(&dev_priv->pm_nb); 927 928 return 0; 929 930 out_no_fifo: 931 vmw_overlay_close(dev_priv); 932 vmw_kms_close(dev_priv); 933 out_no_kms: 934 if (dev_priv->has_mob) 935 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); 936 if (dev_priv->has_gmr) 937 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 938 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 939 out_no_vram: 940 (void)ttm_bo_device_release(&dev_priv->bdev); 941 out_no_bdev: 942 vmw_fence_manager_takedown(dev_priv->fman); 943 out_no_fman: 944 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 945 vmw_irq_uninstall(dev_priv->dev); 946 out_no_irq: 947 if (dev_priv->stealth) 948 pci_release_region(dev->pdev, 2); 949 else 950 pci_release_regions(dev->pdev); 951 out_no_device: 952 ttm_object_device_release(&dev_priv->tdev); 953 out_err4: 954 memunmap(dev_priv->mmio_virt); 955 out_err0: 956 for (i = vmw_res_context; i < vmw_res_max; ++i) 957 idr_destroy(&dev_priv->res_idr[i]); 958 959 if (dev_priv->ctx.staged_bindings) 960 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 961 kfree(dev_priv); 962 return ret; 963 } 964 965 static void vmw_driver_unload(struct drm_device *dev) 966 { 967 struct vmw_private *dev_priv = vmw_priv(dev); 968 enum vmw_res_type i; 969 970 unregister_pm_notifier(&dev_priv->pm_nb); 971 972 if (dev_priv->ctx.res_ht_initialized) 973 drm_ht_remove(&dev_priv->ctx.res_ht); 974 vfree(dev_priv->ctx.cmd_bounce); 975 if (dev_priv->enable_fb) { 976 vmw_fb_off(dev_priv); 977 vmw_fb_close(dev_priv); 978 vmw_fifo_resource_dec(dev_priv); 979 vmw_svga_disable(dev_priv); 980 } 981 982 vmw_kms_close(dev_priv); 983 vmw_overlay_close(dev_priv); 984 985 if (dev_priv->has_gmr) 986 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 987 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 988 989 vmw_release_device_early(dev_priv); 990 if (dev_priv->has_mob) 991 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); 992 (void) ttm_bo_device_release(&dev_priv->bdev); 993 drm_vma_offset_manager_destroy(&dev_priv->vma_manager); 994 vmw_release_device_late(dev_priv); 995 vmw_fence_manager_takedown(dev_priv->fman); 996 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 997 vmw_irq_uninstall(dev_priv->dev); 998 if (dev_priv->stealth) 999 pci_release_region(dev->pdev, 2); 1000 else 1001 pci_release_regions(dev->pdev); 1002 1003 ttm_object_device_release(&dev_priv->tdev); 1004 memunmap(dev_priv->mmio_virt); 1005 if (dev_priv->ctx.staged_bindings) 1006 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 1007 1008 for (i = vmw_res_context; i < vmw_res_max; ++i) 1009 idr_destroy(&dev_priv->res_idr[i]); 1010 1011 kfree(dev_priv); 1012 } 1013 1014 static void vmw_postclose(struct drm_device *dev, 1015 struct drm_file *file_priv) 1016 { 1017 struct vmw_fpriv *vmw_fp; 1018 1019 vmw_fp = vmw_fpriv(file_priv); 1020 1021 if (vmw_fp->locked_master) { 1022 struct vmw_master *vmaster = 1023 vmw_master(vmw_fp->locked_master); 1024 1025 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 1026 ttm_vt_unlock(&vmaster->lock); 1027 drm_master_put(&vmw_fp->locked_master); 1028 } 1029 1030 ttm_object_file_release(&vmw_fp->tfile); 1031 kfree(vmw_fp); 1032 } 1033 1034 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) 1035 { 1036 struct vmw_private *dev_priv = vmw_priv(dev); 1037 struct vmw_fpriv *vmw_fp; 1038 int ret = -ENOMEM; 1039 1040 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); 1041 if (unlikely(!vmw_fp)) 1042 return ret; 1043 1044 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); 1045 if (unlikely(vmw_fp->tfile == NULL)) 1046 goto out_no_tfile; 1047 1048 file_priv->driver_priv = vmw_fp; 1049 1050 return 0; 1051 1052 out_no_tfile: 1053 kfree(vmw_fp); 1054 return ret; 1055 } 1056 1057 static struct vmw_master *vmw_master_check(struct drm_device *dev, 1058 struct drm_file *file_priv, 1059 unsigned int flags) 1060 { 1061 int ret; 1062 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1063 struct vmw_master *vmaster; 1064 1065 if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH)) 1066 return NULL; 1067 1068 ret = mutex_lock_interruptible(&dev->master_mutex); 1069 if (unlikely(ret != 0)) 1070 return ERR_PTR(-ERESTARTSYS); 1071 1072 if (drm_is_current_master(file_priv)) { 1073 mutex_unlock(&dev->master_mutex); 1074 return NULL; 1075 } 1076 1077 /* 1078 * Check if we were previously master, but now dropped. In that 1079 * case, allow at least render node functionality. 1080 */ 1081 if (vmw_fp->locked_master) { 1082 mutex_unlock(&dev->master_mutex); 1083 1084 if (flags & DRM_RENDER_ALLOW) 1085 return NULL; 1086 1087 DRM_ERROR("Dropped master trying to access ioctl that " 1088 "requires authentication.\n"); 1089 return ERR_PTR(-EACCES); 1090 } 1091 mutex_unlock(&dev->master_mutex); 1092 1093 /* 1094 * Take the TTM lock. Possibly sleep waiting for the authenticating 1095 * master to become master again, or for a SIGTERM if the 1096 * authenticating master exits. 1097 */ 1098 vmaster = vmw_master(file_priv->master); 1099 ret = ttm_read_lock(&vmaster->lock, true); 1100 if (unlikely(ret != 0)) 1101 vmaster = ERR_PTR(ret); 1102 1103 return vmaster; 1104 } 1105 1106 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, 1107 unsigned long arg, 1108 long (*ioctl_func)(struct file *, unsigned int, 1109 unsigned long)) 1110 { 1111 struct drm_file *file_priv = filp->private_data; 1112 struct drm_device *dev = file_priv->minor->dev; 1113 unsigned int nr = DRM_IOCTL_NR(cmd); 1114 struct vmw_master *vmaster; 1115 unsigned int flags; 1116 long ret; 1117 1118 /* 1119 * Do extra checking on driver private ioctls. 1120 */ 1121 1122 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 1123 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 1124 const struct drm_ioctl_desc *ioctl = 1125 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 1126 1127 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { 1128 return ioctl_func(filp, cmd, arg); 1129 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) { 1130 if (!drm_is_current_master(file_priv) && 1131 !capable(CAP_SYS_ADMIN)) 1132 return -EACCES; 1133 } 1134 1135 if (unlikely(ioctl->cmd != cmd)) 1136 goto out_io_encoding; 1137 1138 flags = ioctl->flags; 1139 } else if (!drm_ioctl_flags(nr, &flags)) 1140 return -EINVAL; 1141 1142 vmaster = vmw_master_check(dev, file_priv, flags); 1143 if (IS_ERR(vmaster)) { 1144 ret = PTR_ERR(vmaster); 1145 1146 if (ret != -ERESTARTSYS) 1147 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n", 1148 nr, ret); 1149 return ret; 1150 } 1151 1152 ret = ioctl_func(filp, cmd, arg); 1153 if (vmaster) 1154 ttm_read_unlock(&vmaster->lock); 1155 1156 return ret; 1157 1158 out_io_encoding: 1159 DRM_ERROR("Invalid command format, ioctl %d\n", 1160 nr - DRM_COMMAND_BASE); 1161 1162 return -EINVAL; 1163 } 1164 1165 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, 1166 unsigned long arg) 1167 { 1168 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl); 1169 } 1170 1171 #ifdef CONFIG_COMPAT 1172 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, 1173 unsigned long arg) 1174 { 1175 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl); 1176 } 1177 #endif 1178 1179 static void vmw_master_init(struct vmw_master *vmaster) 1180 { 1181 ttm_lock_init(&vmaster->lock); 1182 } 1183 1184 static int vmw_master_create(struct drm_device *dev, 1185 struct drm_master *master) 1186 { 1187 struct vmw_master *vmaster; 1188 1189 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); 1190 if (unlikely(!vmaster)) 1191 return -ENOMEM; 1192 1193 vmw_master_init(vmaster); 1194 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 1195 master->driver_priv = vmaster; 1196 1197 return 0; 1198 } 1199 1200 static void vmw_master_destroy(struct drm_device *dev, 1201 struct drm_master *master) 1202 { 1203 struct vmw_master *vmaster = vmw_master(master); 1204 1205 master->driver_priv = NULL; 1206 kfree(vmaster); 1207 } 1208 1209 static int vmw_master_set(struct drm_device *dev, 1210 struct drm_file *file_priv, 1211 bool from_open) 1212 { 1213 struct vmw_private *dev_priv = vmw_priv(dev); 1214 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1215 struct vmw_master *active = dev_priv->active_master; 1216 struct vmw_master *vmaster = vmw_master(file_priv->master); 1217 int ret = 0; 1218 1219 if (active) { 1220 BUG_ON(active != &dev_priv->fbdev_master); 1221 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); 1222 if (unlikely(ret != 0)) 1223 return ret; 1224 1225 ttm_lock_set_kill(&active->lock, true, SIGTERM); 1226 dev_priv->active_master = NULL; 1227 } 1228 1229 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); 1230 if (!from_open) { 1231 ttm_vt_unlock(&vmaster->lock); 1232 BUG_ON(vmw_fp->locked_master != file_priv->master); 1233 drm_master_put(&vmw_fp->locked_master); 1234 } 1235 1236 dev_priv->active_master = vmaster; 1237 1238 /* 1239 * Inform a new master that the layout may have changed while 1240 * it was gone. 1241 */ 1242 if (!from_open) 1243 drm_sysfs_hotplug_event(dev); 1244 1245 return 0; 1246 } 1247 1248 static void vmw_master_drop(struct drm_device *dev, 1249 struct drm_file *file_priv) 1250 { 1251 struct vmw_private *dev_priv = vmw_priv(dev); 1252 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1253 struct vmw_master *vmaster = vmw_master(file_priv->master); 1254 int ret; 1255 1256 /** 1257 * Make sure the master doesn't disappear while we have 1258 * it locked. 1259 */ 1260 1261 vmw_fp->locked_master = drm_master_get(file_priv->master); 1262 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 1263 vmw_kms_legacy_hotspot_clear(dev_priv); 1264 if (unlikely((ret != 0))) { 1265 DRM_ERROR("Unable to lock TTM at VT switch.\n"); 1266 drm_master_put(&vmw_fp->locked_master); 1267 } 1268 1269 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); 1270 1271 if (!dev_priv->enable_fb) 1272 vmw_svga_disable(dev_priv); 1273 1274 dev_priv->active_master = &dev_priv->fbdev_master; 1275 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 1276 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 1277 } 1278 1279 /** 1280 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1281 * 1282 * @dev_priv: Pointer to device private struct. 1283 * Needs the reservation sem to be held in non-exclusive mode. 1284 */ 1285 static void __vmw_svga_enable(struct vmw_private *dev_priv) 1286 { 1287 spin_lock(&dev_priv->svga_lock); 1288 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1289 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); 1290 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true; 1291 } 1292 spin_unlock(&dev_priv->svga_lock); 1293 } 1294 1295 /** 1296 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1297 * 1298 * @dev_priv: Pointer to device private struct. 1299 */ 1300 void vmw_svga_enable(struct vmw_private *dev_priv) 1301 { 1302 (void) ttm_read_lock(&dev_priv->reservation_sem, false); 1303 __vmw_svga_enable(dev_priv); 1304 ttm_read_unlock(&dev_priv->reservation_sem); 1305 } 1306 1307 /** 1308 * __vmw_svga_disable - Disable SVGA mode and use of VRAM. 1309 * 1310 * @dev_priv: Pointer to device private struct. 1311 * Needs the reservation sem to be held in exclusive mode. 1312 * Will not empty VRAM. VRAM must be emptied by caller. 1313 */ 1314 static void __vmw_svga_disable(struct vmw_private *dev_priv) 1315 { 1316 spin_lock(&dev_priv->svga_lock); 1317 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1318 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; 1319 vmw_write(dev_priv, SVGA_REG_ENABLE, 1320 SVGA_REG_ENABLE_HIDE | 1321 SVGA_REG_ENABLE_ENABLE); 1322 } 1323 spin_unlock(&dev_priv->svga_lock); 1324 } 1325 1326 /** 1327 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo 1328 * running. 1329 * 1330 * @dev_priv: Pointer to device private struct. 1331 * Will empty VRAM. 1332 */ 1333 void vmw_svga_disable(struct vmw_private *dev_priv) 1334 { 1335 /* 1336 * Disabling SVGA will turn off device modesetting capabilities, so 1337 * notify KMS about that so that it doesn't cache atomic state that 1338 * isn't valid anymore, for example crtcs turned on. 1339 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex), 1340 * but vmw_kms_lost_device() takes the reservation sem and thus we'll 1341 * end up with lock order reversal. Thus, a master may actually perform 1342 * a new modeset just after we call vmw_kms_lost_device() and race with 1343 * vmw_svga_disable(), but that should at worst cause atomic KMS state 1344 * to be inconsistent with the device, causing modesetting problems. 1345 * 1346 */ 1347 vmw_kms_lost_device(dev_priv->dev); 1348 ttm_write_lock(&dev_priv->reservation_sem, false); 1349 spin_lock(&dev_priv->svga_lock); 1350 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1351 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; 1352 spin_unlock(&dev_priv->svga_lock); 1353 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) 1354 DRM_ERROR("Failed evicting VRAM buffers.\n"); 1355 vmw_write(dev_priv, SVGA_REG_ENABLE, 1356 SVGA_REG_ENABLE_HIDE | 1357 SVGA_REG_ENABLE_ENABLE); 1358 } else 1359 spin_unlock(&dev_priv->svga_lock); 1360 ttm_write_unlock(&dev_priv->reservation_sem); 1361 } 1362 1363 static void vmw_remove(struct pci_dev *pdev) 1364 { 1365 struct drm_device *dev = pci_get_drvdata(pdev); 1366 1367 pci_disable_device(pdev); 1368 drm_put_dev(dev); 1369 } 1370 1371 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 1372 void *ptr) 1373 { 1374 struct vmw_private *dev_priv = 1375 container_of(nb, struct vmw_private, pm_nb); 1376 1377 switch (val) { 1378 case PM_HIBERNATION_PREPARE: 1379 /* 1380 * Take the reservation sem in write mode, which will make sure 1381 * there are no other processes holding a buffer object 1382 * reservation, meaning we should be able to evict all buffer 1383 * objects if needed. 1384 * Once user-space processes have been frozen, we can release 1385 * the lock again. 1386 */ 1387 ttm_suspend_lock(&dev_priv->reservation_sem); 1388 dev_priv->suspend_locked = true; 1389 break; 1390 case PM_POST_HIBERNATION: 1391 case PM_POST_RESTORE: 1392 if (READ_ONCE(dev_priv->suspend_locked)) { 1393 dev_priv->suspend_locked = false; 1394 ttm_suspend_unlock(&dev_priv->reservation_sem); 1395 } 1396 break; 1397 default: 1398 break; 1399 } 1400 return 0; 1401 } 1402 1403 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1404 { 1405 struct drm_device *dev = pci_get_drvdata(pdev); 1406 struct vmw_private *dev_priv = vmw_priv(dev); 1407 1408 if (dev_priv->refuse_hibernation) 1409 return -EBUSY; 1410 1411 pci_save_state(pdev); 1412 pci_disable_device(pdev); 1413 pci_set_power_state(pdev, PCI_D3hot); 1414 return 0; 1415 } 1416 1417 static int vmw_pci_resume(struct pci_dev *pdev) 1418 { 1419 pci_set_power_state(pdev, PCI_D0); 1420 pci_restore_state(pdev); 1421 return pci_enable_device(pdev); 1422 } 1423 1424 static int vmw_pm_suspend(struct device *kdev) 1425 { 1426 struct pci_dev *pdev = to_pci_dev(kdev); 1427 struct pm_message dummy; 1428 1429 dummy.event = 0; 1430 1431 return vmw_pci_suspend(pdev, dummy); 1432 } 1433 1434 static int vmw_pm_resume(struct device *kdev) 1435 { 1436 struct pci_dev *pdev = to_pci_dev(kdev); 1437 1438 return vmw_pci_resume(pdev); 1439 } 1440 1441 static int vmw_pm_freeze(struct device *kdev) 1442 { 1443 struct pci_dev *pdev = to_pci_dev(kdev); 1444 struct drm_device *dev = pci_get_drvdata(pdev); 1445 struct vmw_private *dev_priv = vmw_priv(dev); 1446 int ret; 1447 1448 /* 1449 * Unlock for vmw_kms_suspend. 1450 * No user-space processes should be running now. 1451 */ 1452 ttm_suspend_unlock(&dev_priv->reservation_sem); 1453 ret = vmw_kms_suspend(dev_priv->dev); 1454 if (ret) { 1455 ttm_suspend_lock(&dev_priv->reservation_sem); 1456 DRM_ERROR("Failed to freeze modesetting.\n"); 1457 return ret; 1458 } 1459 if (dev_priv->enable_fb) 1460 vmw_fb_off(dev_priv); 1461 1462 ttm_suspend_lock(&dev_priv->reservation_sem); 1463 vmw_execbuf_release_pinned_bo(dev_priv); 1464 vmw_resource_evict_all(dev_priv); 1465 vmw_release_device_early(dev_priv); 1466 ttm_bo_swapout_all(&dev_priv->bdev); 1467 if (dev_priv->enable_fb) 1468 vmw_fifo_resource_dec(dev_priv); 1469 if (atomic_read(&dev_priv->num_fifo_resources) != 0) { 1470 DRM_ERROR("Can't hibernate while 3D resources are active.\n"); 1471 if (dev_priv->enable_fb) 1472 vmw_fifo_resource_inc(dev_priv); 1473 WARN_ON(vmw_request_device_late(dev_priv)); 1474 dev_priv->suspend_locked = false; 1475 ttm_suspend_unlock(&dev_priv->reservation_sem); 1476 if (dev_priv->suspend_state) 1477 vmw_kms_resume(dev); 1478 if (dev_priv->enable_fb) 1479 vmw_fb_on(dev_priv); 1480 return -EBUSY; 1481 } 1482 1483 vmw_fence_fifo_down(dev_priv->fman); 1484 __vmw_svga_disable(dev_priv); 1485 1486 vmw_release_device_late(dev_priv); 1487 return 0; 1488 } 1489 1490 static int vmw_pm_restore(struct device *kdev) 1491 { 1492 struct pci_dev *pdev = to_pci_dev(kdev); 1493 struct drm_device *dev = pci_get_drvdata(pdev); 1494 struct vmw_private *dev_priv = vmw_priv(dev); 1495 int ret; 1496 1497 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 1498 (void) vmw_read(dev_priv, SVGA_REG_ID); 1499 1500 if (dev_priv->enable_fb) 1501 vmw_fifo_resource_inc(dev_priv); 1502 1503 ret = vmw_request_device(dev_priv); 1504 if (ret) 1505 return ret; 1506 1507 if (dev_priv->enable_fb) 1508 __vmw_svga_enable(dev_priv); 1509 1510 vmw_fence_fifo_up(dev_priv->fman); 1511 dev_priv->suspend_locked = false; 1512 ttm_suspend_unlock(&dev_priv->reservation_sem); 1513 if (dev_priv->suspend_state) 1514 vmw_kms_resume(dev_priv->dev); 1515 1516 if (dev_priv->enable_fb) 1517 vmw_fb_on(dev_priv); 1518 1519 return 0; 1520 } 1521 1522 static const struct dev_pm_ops vmw_pm_ops = { 1523 .freeze = vmw_pm_freeze, 1524 .thaw = vmw_pm_restore, 1525 .restore = vmw_pm_restore, 1526 .suspend = vmw_pm_suspend, 1527 .resume = vmw_pm_resume, 1528 }; 1529 1530 static const struct file_operations vmwgfx_driver_fops = { 1531 .owner = THIS_MODULE, 1532 .open = drm_open, 1533 .release = drm_release, 1534 .unlocked_ioctl = vmw_unlocked_ioctl, 1535 .mmap = vmw_mmap, 1536 .poll = vmw_fops_poll, 1537 .read = vmw_fops_read, 1538 #if defined(CONFIG_COMPAT) 1539 .compat_ioctl = vmw_compat_ioctl, 1540 #endif 1541 .llseek = noop_llseek, 1542 }; 1543 1544 static struct drm_driver driver = { 1545 .driver_features = 1546 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC, 1547 .load = vmw_driver_load, 1548 .unload = vmw_driver_unload, 1549 .get_vblank_counter = vmw_get_vblank_counter, 1550 .enable_vblank = vmw_enable_vblank, 1551 .disable_vblank = vmw_disable_vblank, 1552 .ioctls = vmw_ioctls, 1553 .num_ioctls = ARRAY_SIZE(vmw_ioctls), 1554 .master_create = vmw_master_create, 1555 .master_destroy = vmw_master_destroy, 1556 .master_set = vmw_master_set, 1557 .master_drop = vmw_master_drop, 1558 .open = vmw_driver_open, 1559 .postclose = vmw_postclose, 1560 1561 .dumb_create = vmw_dumb_create, 1562 .dumb_map_offset = vmw_dumb_map_offset, 1563 .dumb_destroy = vmw_dumb_destroy, 1564 1565 .prime_fd_to_handle = vmw_prime_fd_to_handle, 1566 .prime_handle_to_fd = vmw_prime_handle_to_fd, 1567 1568 .fops = &vmwgfx_driver_fops, 1569 .name = VMWGFX_DRIVER_NAME, 1570 .desc = VMWGFX_DRIVER_DESC, 1571 .date = VMWGFX_DRIVER_DATE, 1572 .major = VMWGFX_DRIVER_MAJOR, 1573 .minor = VMWGFX_DRIVER_MINOR, 1574 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL 1575 }; 1576 1577 static struct pci_driver vmw_pci_driver = { 1578 .name = VMWGFX_DRIVER_NAME, 1579 .id_table = vmw_pci_id_list, 1580 .probe = vmw_probe, 1581 .remove = vmw_remove, 1582 .driver = { 1583 .pm = &vmw_pm_ops 1584 } 1585 }; 1586 1587 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1588 { 1589 return drm_get_pci_dev(pdev, ent, &driver); 1590 } 1591 1592 static int __init vmwgfx_init(void) 1593 { 1594 int ret; 1595 1596 if (vgacon_text_force()) 1597 return -EINVAL; 1598 1599 ret = pci_register_driver(&vmw_pci_driver); 1600 if (ret) 1601 DRM_ERROR("Failed initializing DRM.\n"); 1602 return ret; 1603 } 1604 1605 static void __exit vmwgfx_exit(void) 1606 { 1607 pci_unregister_driver(&vmw_pci_driver); 1608 } 1609 1610 module_init(vmwgfx_init); 1611 module_exit(vmwgfx_exit); 1612 1613 MODULE_AUTHOR("VMware Inc. and others"); 1614 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); 1615 MODULE_LICENSE("GPL and additional rights"); 1616 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." 1617 __stringify(VMWGFX_DRIVER_MINOR) "." 1618 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." 1619 "0"); 1620