1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <linux/console.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/module.h> 31 #include <linux/pci.h> 32 #include <linux/mem_encrypt.h> 33 34 #include <drm/drm_drv.h> 35 #include <drm/drm_fb_helper.h> 36 #include <drm/drm_ioctl.h> 37 #include <drm/drm_sysfs.h> 38 #include <drm/ttm/ttm_bo_driver.h> 39 #include <drm/ttm/ttm_placement.h> 40 41 #include "ttm_object.h" 42 #include "vmwgfx_binding.h" 43 #include "vmwgfx_drv.h" 44 45 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 46 47 #define VMW_MIN_INITIAL_WIDTH 800 48 #define VMW_MIN_INITIAL_HEIGHT 600 49 50 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) 51 52 53 /** 54 * Fully encoded drm commands. Might move to vmw_drm.h 55 */ 56 57 #define DRM_IOCTL_VMW_GET_PARAM \ 58 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ 59 struct drm_vmw_getparam_arg) 60 #define DRM_IOCTL_VMW_ALLOC_DMABUF \ 61 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ 62 union drm_vmw_alloc_dmabuf_arg) 63 #define DRM_IOCTL_VMW_UNREF_DMABUF \ 64 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ 65 struct drm_vmw_unref_dmabuf_arg) 66 #define DRM_IOCTL_VMW_CURSOR_BYPASS \ 67 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ 68 struct drm_vmw_cursor_bypass_arg) 69 70 #define DRM_IOCTL_VMW_CONTROL_STREAM \ 71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ 72 struct drm_vmw_control_stream_arg) 73 #define DRM_IOCTL_VMW_CLAIM_STREAM \ 74 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ 75 struct drm_vmw_stream_arg) 76 #define DRM_IOCTL_VMW_UNREF_STREAM \ 77 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ 78 struct drm_vmw_stream_arg) 79 80 #define DRM_IOCTL_VMW_CREATE_CONTEXT \ 81 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ 82 struct drm_vmw_context_arg) 83 #define DRM_IOCTL_VMW_UNREF_CONTEXT \ 84 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ 85 struct drm_vmw_context_arg) 86 #define DRM_IOCTL_VMW_CREATE_SURFACE \ 87 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ 88 union drm_vmw_surface_create_arg) 89 #define DRM_IOCTL_VMW_UNREF_SURFACE \ 90 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ 91 struct drm_vmw_surface_arg) 92 #define DRM_IOCTL_VMW_REF_SURFACE \ 93 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ 94 union drm_vmw_surface_reference_arg) 95 #define DRM_IOCTL_VMW_EXECBUF \ 96 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ 97 struct drm_vmw_execbuf_arg) 98 #define DRM_IOCTL_VMW_GET_3D_CAP \ 99 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ 100 struct drm_vmw_get_3d_cap_arg) 101 #define DRM_IOCTL_VMW_FENCE_WAIT \ 102 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ 103 struct drm_vmw_fence_wait_arg) 104 #define DRM_IOCTL_VMW_FENCE_SIGNALED \ 105 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ 106 struct drm_vmw_fence_signaled_arg) 107 #define DRM_IOCTL_VMW_FENCE_UNREF \ 108 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ 109 struct drm_vmw_fence_arg) 110 #define DRM_IOCTL_VMW_FENCE_EVENT \ 111 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ 112 struct drm_vmw_fence_event_arg) 113 #define DRM_IOCTL_VMW_PRESENT \ 114 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ 115 struct drm_vmw_present_arg) 116 #define DRM_IOCTL_VMW_PRESENT_READBACK \ 117 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ 118 struct drm_vmw_present_readback_arg) 119 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ 120 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ 121 struct drm_vmw_update_layout_arg) 122 #define DRM_IOCTL_VMW_CREATE_SHADER \ 123 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ 124 struct drm_vmw_shader_create_arg) 125 #define DRM_IOCTL_VMW_UNREF_SHADER \ 126 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ 127 struct drm_vmw_shader_arg) 128 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ 129 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ 130 union drm_vmw_gb_surface_create_arg) 131 #define DRM_IOCTL_VMW_GB_SURFACE_REF \ 132 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ 133 union drm_vmw_gb_surface_reference_arg) 134 #define DRM_IOCTL_VMW_SYNCCPU \ 135 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ 136 struct drm_vmw_synccpu_arg) 137 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \ 138 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \ 139 struct drm_vmw_context_arg) 140 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \ 141 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \ 142 union drm_vmw_gb_surface_create_ext_arg) 143 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \ 144 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \ 145 union drm_vmw_gb_surface_reference_ext_arg) 146 #define DRM_IOCTL_VMW_MSG \ 147 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \ 148 struct drm_vmw_msg_arg) 149 150 /* 151 * The core DRM version of this macro doesn't account for 152 * DRM_COMMAND_BASE. 153 */ 154 155 #define VMW_IOCTL_DEF(ioctl, func, flags) \ 156 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func} 157 158 /* 159 * Ioctl definitions. 160 */ 161 162 static const struct drm_ioctl_desc vmw_ioctls[] = { 163 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, 164 DRM_RENDER_ALLOW), 165 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, 166 DRM_RENDER_ALLOW), 167 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, 168 DRM_RENDER_ALLOW), 169 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, 170 vmw_kms_cursor_bypass_ioctl, 171 DRM_MASTER), 172 173 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, 174 DRM_MASTER), 175 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, 176 DRM_MASTER), 177 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, 178 DRM_MASTER), 179 180 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 181 DRM_RENDER_ALLOW), 182 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, 183 DRM_RENDER_ALLOW), 184 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, 185 DRM_RENDER_ALLOW), 186 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, 187 DRM_RENDER_ALLOW), 188 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, 189 DRM_RENDER_ALLOW), 190 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, 191 DRM_RENDER_ALLOW), 192 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, 193 DRM_RENDER_ALLOW), 194 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, 195 vmw_fence_obj_signaled_ioctl, 196 DRM_RENDER_ALLOW), 197 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, 198 DRM_RENDER_ALLOW), 199 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, 200 DRM_RENDER_ALLOW), 201 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, 202 DRM_RENDER_ALLOW), 203 204 /* these allow direct access to the framebuffers mark as master only */ 205 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, 206 DRM_MASTER | DRM_AUTH), 207 VMW_IOCTL_DEF(VMW_PRESENT_READBACK, 208 vmw_present_readback_ioctl, 209 DRM_MASTER | DRM_AUTH), 210 /* 211 * The permissions of the below ioctl are overridden in 212 * vmw_generic_ioctl(). We require either 213 * DRM_MASTER or capable(CAP_SYS_ADMIN). 214 */ 215 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, 216 vmw_kms_update_layout_ioctl, 217 DRM_RENDER_ALLOW), 218 VMW_IOCTL_DEF(VMW_CREATE_SHADER, 219 vmw_shader_define_ioctl, 220 DRM_RENDER_ALLOW), 221 VMW_IOCTL_DEF(VMW_UNREF_SHADER, 222 vmw_shader_destroy_ioctl, 223 DRM_RENDER_ALLOW), 224 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, 225 vmw_gb_surface_define_ioctl, 226 DRM_RENDER_ALLOW), 227 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, 228 vmw_gb_surface_reference_ioctl, 229 DRM_RENDER_ALLOW), 230 VMW_IOCTL_DEF(VMW_SYNCCPU, 231 vmw_user_bo_synccpu_ioctl, 232 DRM_RENDER_ALLOW), 233 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, 234 vmw_extended_context_define_ioctl, 235 DRM_RENDER_ALLOW), 236 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT, 237 vmw_gb_surface_define_ext_ioctl, 238 DRM_RENDER_ALLOW), 239 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT, 240 vmw_gb_surface_reference_ext_ioctl, 241 DRM_RENDER_ALLOW), 242 VMW_IOCTL_DEF(VMW_MSG, 243 vmw_msg_ioctl, 244 DRM_RENDER_ALLOW), 245 }; 246 247 static const struct pci_device_id vmw_pci_id_list[] = { 248 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) }, 249 { } 250 }; 251 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); 252 253 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); 254 static int vmw_force_iommu; 255 static int vmw_restrict_iommu; 256 static int vmw_force_coherent; 257 static int vmw_restrict_dma_mask; 258 static int vmw_assume_16bpp; 259 260 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 261 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 262 void *ptr); 263 264 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 265 module_param_named(enable_fbdev, enable_fbdev, int, 0600); 266 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); 267 module_param_named(force_dma_api, vmw_force_iommu, int, 0600); 268 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); 269 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); 270 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); 271 module_param_named(force_coherent, vmw_force_coherent, int, 0600); 272 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); 273 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); 274 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); 275 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); 276 277 278 static void vmw_print_capabilities2(uint32_t capabilities2) 279 { 280 DRM_INFO("Capabilities2:\n"); 281 if (capabilities2 & SVGA_CAP2_GROW_OTABLE) 282 DRM_INFO(" Grow oTable.\n"); 283 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY) 284 DRM_INFO(" IntraSurface copy.\n"); 285 if (capabilities2 & SVGA_CAP2_DX3) 286 DRM_INFO(" DX3.\n"); 287 } 288 289 static void vmw_print_capabilities(uint32_t capabilities) 290 { 291 DRM_INFO("Capabilities:\n"); 292 if (capabilities & SVGA_CAP_RECT_COPY) 293 DRM_INFO(" Rect copy.\n"); 294 if (capabilities & SVGA_CAP_CURSOR) 295 DRM_INFO(" Cursor.\n"); 296 if (capabilities & SVGA_CAP_CURSOR_BYPASS) 297 DRM_INFO(" Cursor bypass.\n"); 298 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) 299 DRM_INFO(" Cursor bypass 2.\n"); 300 if (capabilities & SVGA_CAP_8BIT_EMULATION) 301 DRM_INFO(" 8bit emulation.\n"); 302 if (capabilities & SVGA_CAP_ALPHA_CURSOR) 303 DRM_INFO(" Alpha cursor.\n"); 304 if (capabilities & SVGA_CAP_3D) 305 DRM_INFO(" 3D.\n"); 306 if (capabilities & SVGA_CAP_EXTENDED_FIFO) 307 DRM_INFO(" Extended Fifo.\n"); 308 if (capabilities & SVGA_CAP_MULTIMON) 309 DRM_INFO(" Multimon.\n"); 310 if (capabilities & SVGA_CAP_PITCHLOCK) 311 DRM_INFO(" Pitchlock.\n"); 312 if (capabilities & SVGA_CAP_IRQMASK) 313 DRM_INFO(" Irq mask.\n"); 314 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) 315 DRM_INFO(" Display Topology.\n"); 316 if (capabilities & SVGA_CAP_GMR) 317 DRM_INFO(" GMR.\n"); 318 if (capabilities & SVGA_CAP_TRACES) 319 DRM_INFO(" Traces.\n"); 320 if (capabilities & SVGA_CAP_GMR2) 321 DRM_INFO(" GMR2.\n"); 322 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) 323 DRM_INFO(" Screen Object 2.\n"); 324 if (capabilities & SVGA_CAP_COMMAND_BUFFERS) 325 DRM_INFO(" Command Buffers.\n"); 326 if (capabilities & SVGA_CAP_CMD_BUFFERS_2) 327 DRM_INFO(" Command Buffers 2.\n"); 328 if (capabilities & SVGA_CAP_GBOBJECTS) 329 DRM_INFO(" Guest Backed Resources.\n"); 330 if (capabilities & SVGA_CAP_DX) 331 DRM_INFO(" DX Features.\n"); 332 if (capabilities & SVGA_CAP_HP_CMD_QUEUE) 333 DRM_INFO(" HP Command Queue.\n"); 334 } 335 336 /** 337 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result 338 * 339 * @dev_priv: A device private structure. 340 * 341 * This function creates a small buffer object that holds the query 342 * result for dummy queries emitted as query barriers. 343 * The function will then map the first page and initialize a pending 344 * occlusion query result structure, Finally it will unmap the buffer. 345 * No interruptible waits are done within this function. 346 * 347 * Returns an error if bo creation or initialization fails. 348 */ 349 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) 350 { 351 int ret; 352 struct vmw_buffer_object *vbo; 353 struct ttm_bo_kmap_obj map; 354 volatile SVGA3dQueryResult *result; 355 bool dummy; 356 357 /* 358 * Create the vbo as pinned, so that a tryreserve will 359 * immediately succeed. This is because we're the only 360 * user of the bo currently. 361 */ 362 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); 363 if (!vbo) 364 return -ENOMEM; 365 366 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE, 367 &vmw_sys_placement, false, true, 368 &vmw_bo_bo_free); 369 if (unlikely(ret != 0)) 370 return ret; 371 372 ret = ttm_bo_reserve(&vbo->base, false, true, NULL); 373 BUG_ON(ret != 0); 374 vmw_bo_pin_reserved(vbo, true); 375 376 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); 377 if (likely(ret == 0)) { 378 result = ttm_kmap_obj_virtual(&map, &dummy); 379 result->totalSize = sizeof(*result); 380 result->state = SVGA3D_QUERYSTATE_PENDING; 381 result->result32 = 0xff; 382 ttm_bo_kunmap(&map); 383 } 384 vmw_bo_pin_reserved(vbo, false); 385 ttm_bo_unreserve(&vbo->base); 386 387 if (unlikely(ret != 0)) { 388 DRM_ERROR("Dummy query buffer map failed.\n"); 389 vmw_bo_unreference(&vbo); 390 } else 391 dev_priv->dummy_query_bo = vbo; 392 393 return ret; 394 } 395 396 /** 397 * vmw_request_device_late - Perform late device setup 398 * 399 * @dev_priv: Pointer to device private. 400 * 401 * This function performs setup of otables and enables large command 402 * buffer submission. These tasks are split out to a separate function 403 * because it reverts vmw_release_device_early and is intended to be used 404 * by an error path in the hibernation code. 405 */ 406 static int vmw_request_device_late(struct vmw_private *dev_priv) 407 { 408 int ret; 409 410 if (dev_priv->has_mob) { 411 ret = vmw_otables_setup(dev_priv); 412 if (unlikely(ret != 0)) { 413 DRM_ERROR("Unable to initialize " 414 "guest Memory OBjects.\n"); 415 return ret; 416 } 417 } 418 419 if (dev_priv->cman) { 420 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096); 421 if (ret) { 422 struct vmw_cmdbuf_man *man = dev_priv->cman; 423 424 dev_priv->cman = NULL; 425 vmw_cmdbuf_man_destroy(man); 426 } 427 } 428 429 return 0; 430 } 431 432 static int vmw_request_device(struct vmw_private *dev_priv) 433 { 434 int ret; 435 436 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); 437 if (unlikely(ret != 0)) { 438 DRM_ERROR("Unable to initialize FIFO.\n"); 439 return ret; 440 } 441 vmw_fence_fifo_up(dev_priv->fman); 442 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); 443 if (IS_ERR(dev_priv->cman)) { 444 dev_priv->cman = NULL; 445 dev_priv->sm_type = VMW_SM_LEGACY; 446 } 447 448 ret = vmw_request_device_late(dev_priv); 449 if (ret) 450 goto out_no_mob; 451 452 ret = vmw_dummy_query_bo_create(dev_priv); 453 if (unlikely(ret != 0)) 454 goto out_no_query_bo; 455 456 return 0; 457 458 out_no_query_bo: 459 if (dev_priv->cman) 460 vmw_cmdbuf_remove_pool(dev_priv->cman); 461 if (dev_priv->has_mob) { 462 struct ttm_resource_manager *man; 463 464 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB); 465 ttm_resource_manager_evict_all(&dev_priv->bdev, man); 466 vmw_otables_takedown(dev_priv); 467 } 468 if (dev_priv->cman) 469 vmw_cmdbuf_man_destroy(dev_priv->cman); 470 out_no_mob: 471 vmw_fence_fifo_down(dev_priv->fman); 472 vmw_fifo_release(dev_priv, &dev_priv->fifo); 473 return ret; 474 } 475 476 /** 477 * vmw_release_device_early - Early part of fifo takedown. 478 * 479 * @dev_priv: Pointer to device private struct. 480 * 481 * This is the first part of command submission takedown, to be called before 482 * buffer management is taken down. 483 */ 484 static void vmw_release_device_early(struct vmw_private *dev_priv) 485 { 486 /* 487 * Previous destructions should've released 488 * the pinned bo. 489 */ 490 491 BUG_ON(dev_priv->pinned_bo != NULL); 492 493 vmw_bo_unreference(&dev_priv->dummy_query_bo); 494 if (dev_priv->cman) 495 vmw_cmdbuf_remove_pool(dev_priv->cman); 496 497 if (dev_priv->has_mob) { 498 struct ttm_resource_manager *man; 499 500 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB); 501 ttm_resource_manager_evict_all(&dev_priv->bdev, man); 502 vmw_otables_takedown(dev_priv); 503 } 504 } 505 506 /** 507 * vmw_release_device_late - Late part of fifo takedown. 508 * 509 * @dev_priv: Pointer to device private struct. 510 * 511 * This is the last part of the command submission takedown, to be called when 512 * command submission is no longer needed. It may wait on pending fences. 513 */ 514 static void vmw_release_device_late(struct vmw_private *dev_priv) 515 { 516 vmw_fence_fifo_down(dev_priv->fman); 517 if (dev_priv->cman) 518 vmw_cmdbuf_man_destroy(dev_priv->cman); 519 520 vmw_fifo_release(dev_priv, &dev_priv->fifo); 521 } 522 523 /* 524 * Sets the initial_[width|height] fields on the given vmw_private. 525 * 526 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then 527 * clamping the value to fb_max_[width|height] fields and the 528 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 529 * If the values appear to be invalid, set them to 530 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 531 */ 532 static void vmw_get_initial_size(struct vmw_private *dev_priv) 533 { 534 uint32_t width; 535 uint32_t height; 536 537 width = vmw_read(dev_priv, SVGA_REG_WIDTH); 538 height = vmw_read(dev_priv, SVGA_REG_HEIGHT); 539 540 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); 541 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); 542 543 if (width > dev_priv->fb_max_width || 544 height > dev_priv->fb_max_height) { 545 546 /* 547 * This is a host error and shouldn't occur. 548 */ 549 550 width = VMW_MIN_INITIAL_WIDTH; 551 height = VMW_MIN_INITIAL_HEIGHT; 552 } 553 554 dev_priv->initial_width = width; 555 dev_priv->initial_height = height; 556 } 557 558 /** 559 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this 560 * system. 561 * 562 * @dev_priv: Pointer to a struct vmw_private 563 * 564 * This functions tries to determine what actions need to be taken by the 565 * driver to make system pages visible to the device. 566 * If this function decides that DMA is not possible, it returns -EINVAL. 567 * The driver may then try to disable features of the device that require 568 * DMA. 569 */ 570 static int vmw_dma_select_mode(struct vmw_private *dev_priv) 571 { 572 static const char *names[vmw_dma_map_max] = { 573 [vmw_dma_phys] = "Using physical TTM page addresses.", 574 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", 575 [vmw_dma_map_populate] = "Caching DMA mappings.", 576 [vmw_dma_map_bind] = "Giving up DMA mappings early."}; 577 578 /* TTM currently doesn't fully support SEV encryption. */ 579 if (mem_encrypt_active()) 580 return -EINVAL; 581 582 if (vmw_force_coherent) 583 dev_priv->map_mode = vmw_dma_alloc_coherent; 584 else if (vmw_restrict_iommu) 585 dev_priv->map_mode = vmw_dma_map_bind; 586 else 587 dev_priv->map_mode = vmw_dma_map_populate; 588 589 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 590 return 0; 591 } 592 593 /** 594 * vmw_dma_masks - set required page- and dma masks 595 * 596 * @dev_priv: Pointer to struct drm-device 597 * 598 * With 32-bit we can only handle 32 bit PFNs. Optionally set that 599 * restriction also for 64-bit systems. 600 */ 601 static int vmw_dma_masks(struct vmw_private *dev_priv) 602 { 603 struct drm_device *dev = &dev_priv->drm; 604 int ret = 0; 605 606 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); 607 if (dev_priv->map_mode != vmw_dma_phys && 608 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { 609 DRM_INFO("Restricting DMA addresses to 44 bits.\n"); 610 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); 611 } 612 613 return ret; 614 } 615 616 static int vmw_vram_manager_init(struct vmw_private *dev_priv) 617 { 618 int ret; 619 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 620 ret = vmw_thp_init(dev_priv); 621 #else 622 ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false, 623 dev_priv->vram_size >> PAGE_SHIFT); 624 #endif 625 ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false); 626 return ret; 627 } 628 629 static void vmw_vram_manager_fini(struct vmw_private *dev_priv) 630 { 631 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 632 vmw_thp_fini(dev_priv); 633 #else 634 ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM); 635 #endif 636 } 637 638 static int vmw_setup_pci_resources(struct vmw_private *dev, 639 unsigned long pci_id) 640 { 641 resource_size_t fifo_start; 642 resource_size_t fifo_size; 643 int ret; 644 struct pci_dev *pdev = to_pci_dev(dev->drm.dev); 645 646 pci_set_master(pdev); 647 648 ret = pci_request_regions(pdev, "vmwgfx probe"); 649 if (ret) 650 return ret; 651 652 dev->io_start = pci_resource_start(pdev, 0); 653 dev->vram_start = pci_resource_start(pdev, 1); 654 dev->vram_size = pci_resource_len(pdev, 1); 655 fifo_start = pci_resource_start(pdev, 2); 656 fifo_size = pci_resource_len(pdev, 2); 657 658 DRM_INFO("FIFO at %pa size is %llu kiB\n", 659 &fifo_start, (uint64_t)fifo_size / 1024); 660 dev->fifo_mem = devm_memremap(dev->drm.dev, 661 fifo_start, 662 fifo_size, 663 MEMREMAP_WB); 664 665 if (IS_ERR(dev->fifo_mem)) { 666 DRM_ERROR("Failed mapping FIFO memory.\n"); 667 pci_release_regions(pdev); 668 return PTR_ERR(dev->fifo_mem); 669 } 670 671 /* 672 * This is approximate size of the vram, the exact size will only 673 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource 674 * size will be equal to or bigger than the size reported by 675 * SVGA_REG_VRAM_SIZE. 676 */ 677 DRM_INFO("VRAM at %pa size is %llu kiB\n", 678 &dev->vram_start, (uint64_t)dev->vram_size / 1024); 679 680 return 0; 681 } 682 683 static int vmw_detect_version(struct vmw_private *dev) 684 { 685 uint32_t svga_id; 686 687 vmw_write(dev, SVGA_REG_ID, SVGA_ID_2); 688 svga_id = vmw_read(dev, SVGA_REG_ID); 689 if (svga_id != SVGA_ID_2) { 690 DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n", 691 svga_id, dev->vmw_chipset); 692 return -ENOSYS; 693 } 694 return 0; 695 } 696 697 static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) 698 { 699 int ret; 700 enum vmw_res_type i; 701 bool refuse_dma = false; 702 char host_log[100] = {0}; 703 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 704 705 dev_priv->vmw_chipset = pci_id; 706 dev_priv->last_read_seqno = (uint32_t) -100; 707 dev_priv->drm.dev_private = dev_priv; 708 709 ret = vmw_setup_pci_resources(dev_priv, pci_id); 710 if (ret) 711 return ret; 712 ret = vmw_detect_version(dev_priv); 713 if (ret) 714 goto out_no_pci_or_version; 715 716 mutex_init(&dev_priv->cmdbuf_mutex); 717 mutex_init(&dev_priv->release_mutex); 718 mutex_init(&dev_priv->binding_mutex); 719 mutex_init(&dev_priv->global_kms_state_mutex); 720 ttm_lock_init(&dev_priv->reservation_sem); 721 spin_lock_init(&dev_priv->resource_lock); 722 spin_lock_init(&dev_priv->hw_lock); 723 spin_lock_init(&dev_priv->waiter_lock); 724 spin_lock_init(&dev_priv->cap_lock); 725 spin_lock_init(&dev_priv->cursor_lock); 726 727 for (i = vmw_res_context; i < vmw_res_max; ++i) { 728 idr_init(&dev_priv->res_idr[i]); 729 INIT_LIST_HEAD(&dev_priv->res_lru[i]); 730 } 731 732 init_waitqueue_head(&dev_priv->fence_queue); 733 init_waitqueue_head(&dev_priv->fifo_queue); 734 dev_priv->fence_queue_waiters = 0; 735 dev_priv->fifo_queue_waiters = 0; 736 737 dev_priv->used_memory_size = 0; 738 739 dev_priv->assume_16bpp = !!vmw_assume_16bpp; 740 741 dev_priv->enable_fb = enable_fbdev; 742 743 744 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 745 746 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) { 747 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2); 748 } 749 750 751 ret = vmw_dma_select_mode(dev_priv); 752 if (unlikely(ret != 0)) { 753 DRM_INFO("Restricting capabilities since DMA not available.\n"); 754 refuse_dma = true; 755 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) 756 DRM_INFO("Disabling 3D acceleration.\n"); 757 } 758 759 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); 760 dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); 761 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); 762 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); 763 764 vmw_get_initial_size(dev_priv); 765 766 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 767 dev_priv->max_gmr_ids = 768 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); 769 dev_priv->max_gmr_pages = 770 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); 771 dev_priv->memory_size = 772 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); 773 dev_priv->memory_size -= dev_priv->vram_size; 774 } else { 775 /* 776 * An arbitrary limit of 512MiB on surface 777 * memory. But all HWV8 hardware supports GMR2. 778 */ 779 dev_priv->memory_size = 512*1024*1024; 780 } 781 dev_priv->max_mob_pages = 0; 782 dev_priv->max_mob_size = 0; 783 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 784 uint64_t mem_size; 785 786 if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2) 787 mem_size = vmw_read(dev_priv, 788 SVGA_REG_GBOBJECT_MEM_SIZE_KB); 789 else 790 mem_size = 791 vmw_read(dev_priv, 792 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); 793 794 /* 795 * Workaround for low memory 2D VMs to compensate for the 796 * allocation taken by fbdev 797 */ 798 if (!(dev_priv->capabilities & SVGA_CAP_3D)) 799 mem_size *= 3; 800 801 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; 802 dev_priv->prim_bb_mem = 803 vmw_read(dev_priv, 804 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); 805 dev_priv->max_mob_size = 806 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); 807 dev_priv->stdu_max_width = 808 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); 809 dev_priv->stdu_max_height = 810 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); 811 812 vmw_write(dev_priv, SVGA_REG_DEV_CAP, 813 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); 814 dev_priv->texture_max_width = vmw_read(dev_priv, 815 SVGA_REG_DEV_CAP); 816 vmw_write(dev_priv, SVGA_REG_DEV_CAP, 817 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); 818 dev_priv->texture_max_height = vmw_read(dev_priv, 819 SVGA_REG_DEV_CAP); 820 } else { 821 dev_priv->texture_max_width = 8192; 822 dev_priv->texture_max_height = 8192; 823 dev_priv->prim_bb_mem = dev_priv->vram_size; 824 } 825 826 vmw_print_capabilities(dev_priv->capabilities); 827 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) 828 vmw_print_capabilities2(dev_priv->capabilities2); 829 830 ret = vmw_dma_masks(dev_priv); 831 if (unlikely(ret != 0)) 832 goto out_err0; 833 834 dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX); 835 836 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 837 DRM_INFO("Max GMR ids is %u\n", 838 (unsigned)dev_priv->max_gmr_ids); 839 DRM_INFO("Max number of GMR pages is %u\n", 840 (unsigned)dev_priv->max_gmr_pages); 841 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", 842 (unsigned)dev_priv->memory_size / 1024); 843 } 844 DRM_INFO("Maximum display memory size is %llu kiB\n", 845 (uint64_t)dev_priv->prim_bb_mem / 1024); 846 847 /* Need mmio memory to check for fifo pitchlock cap. */ 848 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && 849 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && 850 !vmw_fifo_have_pitchlock(dev_priv)) { 851 ret = -ENOSYS; 852 DRM_ERROR("Hardware has no pitchlock\n"); 853 goto out_err0; 854 } 855 856 dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12, 857 &vmw_prime_dmabuf_ops); 858 859 if (unlikely(dev_priv->tdev == NULL)) { 860 DRM_ERROR("Unable to initialize TTM object management.\n"); 861 ret = -ENOMEM; 862 goto out_err0; 863 } 864 865 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 866 ret = vmw_irq_install(&dev_priv->drm, pdev->irq); 867 if (ret != 0) { 868 DRM_ERROR("Failed installing irq: %d\n", ret); 869 goto out_no_irq; 870 } 871 } 872 873 dev_priv->fman = vmw_fence_manager_init(dev_priv); 874 if (unlikely(dev_priv->fman == NULL)) { 875 ret = -ENOMEM; 876 goto out_no_fman; 877 } 878 879 drm_vma_offset_manager_init(&dev_priv->vma_manager, 880 DRM_FILE_PAGE_OFFSET_START, 881 DRM_FILE_PAGE_OFFSET_SIZE); 882 ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver, 883 dev_priv->drm.dev, 884 dev_priv->drm.anon_inode->i_mapping, 885 &dev_priv->vma_manager, 886 dev_priv->map_mode == vmw_dma_alloc_coherent, 887 false); 888 if (unlikely(ret != 0)) { 889 DRM_ERROR("Failed initializing TTM buffer object driver.\n"); 890 goto out_no_bdev; 891 } 892 893 /* 894 * Enable VRAM, but initially don't use it until SVGA is enabled and 895 * unhidden. 896 */ 897 898 ret = vmw_vram_manager_init(dev_priv); 899 if (unlikely(ret != 0)) { 900 DRM_ERROR("Failed initializing memory manager for VRAM.\n"); 901 goto out_no_vram; 902 } 903 904 /* 905 * "Guest Memory Regions" is an aperture like feature with 906 * one slot per bo. There is an upper limit of the number of 907 * slots as well as the bo size. 908 */ 909 dev_priv->has_gmr = true; 910 /* TODO: This is most likely not correct */ 911 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || 912 refuse_dma || 913 vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) { 914 DRM_INFO("No GMR memory available. " 915 "Graphics memory resources are very limited.\n"); 916 dev_priv->has_gmr = false; 917 } 918 919 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) { 920 dev_priv->has_mob = true; 921 922 if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) { 923 DRM_INFO("No MOB memory available. " 924 "3D will be disabled.\n"); 925 dev_priv->has_mob = false; 926 } 927 } 928 929 if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) { 930 spin_lock(&dev_priv->cap_lock); 931 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT); 932 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP)) 933 dev_priv->sm_type = VMW_SM_4; 934 spin_unlock(&dev_priv->cap_lock); 935 } 936 937 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN); 938 939 /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */ 940 if (has_sm4_context(dev_priv) && 941 (dev_priv->capabilities2 & SVGA_CAP2_DX2)) { 942 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41); 943 944 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP)) 945 dev_priv->sm_type = VMW_SM_4_1; 946 947 if (has_sm4_1_context(dev_priv) && 948 (dev_priv->capabilities2 & SVGA_CAP2_DX3)) { 949 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5); 950 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP)) 951 dev_priv->sm_type = VMW_SM_5; 952 } 953 } 954 955 ret = vmw_kms_init(dev_priv); 956 if (unlikely(ret != 0)) 957 goto out_no_kms; 958 vmw_overlay_init(dev_priv); 959 960 ret = vmw_request_device(dev_priv); 961 if (ret) 962 goto out_no_fifo; 963 964 if (dev_priv->sm_type == VMW_SM_5) 965 DRM_INFO("SM5 support available.\n"); 966 if (dev_priv->sm_type == VMW_SM_4_1) 967 DRM_INFO("SM4_1 support available.\n"); 968 if (dev_priv->sm_type == VMW_SM_4) 969 DRM_INFO("SM4 support available.\n"); 970 971 snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d", 972 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR, 973 VMWGFX_DRIVER_PATCHLEVEL); 974 vmw_host_log(host_log); 975 976 if (dev_priv->enable_fb) { 977 vmw_fifo_resource_inc(dev_priv); 978 vmw_svga_enable(dev_priv); 979 vmw_fb_init(dev_priv); 980 } 981 982 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 983 register_pm_notifier(&dev_priv->pm_nb); 984 985 return 0; 986 987 out_no_fifo: 988 vmw_overlay_close(dev_priv); 989 vmw_kms_close(dev_priv); 990 out_no_kms: 991 if (dev_priv->has_mob) 992 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); 993 if (dev_priv->has_gmr) 994 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); 995 vmw_vram_manager_fini(dev_priv); 996 out_no_vram: 997 ttm_device_fini(&dev_priv->bdev); 998 out_no_bdev: 999 vmw_fence_manager_takedown(dev_priv->fman); 1000 out_no_fman: 1001 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 1002 vmw_irq_uninstall(&dev_priv->drm); 1003 out_no_irq: 1004 ttm_object_device_release(&dev_priv->tdev); 1005 out_err0: 1006 for (i = vmw_res_context; i < vmw_res_max; ++i) 1007 idr_destroy(&dev_priv->res_idr[i]); 1008 1009 if (dev_priv->ctx.staged_bindings) 1010 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 1011 out_no_pci_or_version: 1012 pci_release_regions(pdev); 1013 return ret; 1014 } 1015 1016 static void vmw_driver_unload(struct drm_device *dev) 1017 { 1018 struct vmw_private *dev_priv = vmw_priv(dev); 1019 struct pci_dev *pdev = to_pci_dev(dev->dev); 1020 enum vmw_res_type i; 1021 1022 unregister_pm_notifier(&dev_priv->pm_nb); 1023 1024 if (dev_priv->ctx.res_ht_initialized) 1025 drm_ht_remove(&dev_priv->ctx.res_ht); 1026 vfree(dev_priv->ctx.cmd_bounce); 1027 if (dev_priv->enable_fb) { 1028 vmw_fb_off(dev_priv); 1029 vmw_fb_close(dev_priv); 1030 vmw_fifo_resource_dec(dev_priv); 1031 vmw_svga_disable(dev_priv); 1032 } 1033 1034 vmw_kms_close(dev_priv); 1035 vmw_overlay_close(dev_priv); 1036 1037 if (dev_priv->has_gmr) 1038 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); 1039 1040 vmw_release_device_early(dev_priv); 1041 if (dev_priv->has_mob) 1042 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); 1043 vmw_vram_manager_fini(dev_priv); 1044 ttm_device_fini(&dev_priv->bdev); 1045 drm_vma_offset_manager_destroy(&dev_priv->vma_manager); 1046 vmw_release_device_late(dev_priv); 1047 vmw_fence_manager_takedown(dev_priv->fman); 1048 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 1049 vmw_irq_uninstall(&dev_priv->drm); 1050 1051 ttm_object_device_release(&dev_priv->tdev); 1052 if (dev_priv->ctx.staged_bindings) 1053 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 1054 1055 for (i = vmw_res_context; i < vmw_res_max; ++i) 1056 idr_destroy(&dev_priv->res_idr[i]); 1057 1058 pci_release_regions(pdev); 1059 } 1060 1061 static void vmw_postclose(struct drm_device *dev, 1062 struct drm_file *file_priv) 1063 { 1064 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1065 1066 ttm_object_file_release(&vmw_fp->tfile); 1067 kfree(vmw_fp); 1068 } 1069 1070 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) 1071 { 1072 struct vmw_private *dev_priv = vmw_priv(dev); 1073 struct vmw_fpriv *vmw_fp; 1074 int ret = -ENOMEM; 1075 1076 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); 1077 if (unlikely(!vmw_fp)) 1078 return ret; 1079 1080 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); 1081 if (unlikely(vmw_fp->tfile == NULL)) 1082 goto out_no_tfile; 1083 1084 file_priv->driver_priv = vmw_fp; 1085 1086 return 0; 1087 1088 out_no_tfile: 1089 kfree(vmw_fp); 1090 return ret; 1091 } 1092 1093 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, 1094 unsigned long arg, 1095 long (*ioctl_func)(struct file *, unsigned int, 1096 unsigned long)) 1097 { 1098 struct drm_file *file_priv = filp->private_data; 1099 struct drm_device *dev = file_priv->minor->dev; 1100 unsigned int nr = DRM_IOCTL_NR(cmd); 1101 unsigned int flags; 1102 1103 /* 1104 * Do extra checking on driver private ioctls. 1105 */ 1106 1107 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 1108 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 1109 const struct drm_ioctl_desc *ioctl = 1110 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 1111 1112 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { 1113 return ioctl_func(filp, cmd, arg); 1114 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) { 1115 if (!drm_is_current_master(file_priv) && 1116 !capable(CAP_SYS_ADMIN)) 1117 return -EACCES; 1118 } 1119 1120 if (unlikely(ioctl->cmd != cmd)) 1121 goto out_io_encoding; 1122 1123 flags = ioctl->flags; 1124 } else if (!drm_ioctl_flags(nr, &flags)) 1125 return -EINVAL; 1126 1127 return ioctl_func(filp, cmd, arg); 1128 1129 out_io_encoding: 1130 DRM_ERROR("Invalid command format, ioctl %d\n", 1131 nr - DRM_COMMAND_BASE); 1132 1133 return -EINVAL; 1134 } 1135 1136 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, 1137 unsigned long arg) 1138 { 1139 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl); 1140 } 1141 1142 #ifdef CONFIG_COMPAT 1143 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, 1144 unsigned long arg) 1145 { 1146 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl); 1147 } 1148 #endif 1149 1150 static void vmw_master_set(struct drm_device *dev, 1151 struct drm_file *file_priv, 1152 bool from_open) 1153 { 1154 /* 1155 * Inform a new master that the layout may have changed while 1156 * it was gone. 1157 */ 1158 if (!from_open) 1159 drm_sysfs_hotplug_event(dev); 1160 } 1161 1162 static void vmw_master_drop(struct drm_device *dev, 1163 struct drm_file *file_priv) 1164 { 1165 struct vmw_private *dev_priv = vmw_priv(dev); 1166 1167 vmw_kms_legacy_hotspot_clear(dev_priv); 1168 if (!dev_priv->enable_fb) 1169 vmw_svga_disable(dev_priv); 1170 } 1171 1172 /** 1173 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1174 * 1175 * @dev_priv: Pointer to device private struct. 1176 * Needs the reservation sem to be held in non-exclusive mode. 1177 */ 1178 static void __vmw_svga_enable(struct vmw_private *dev_priv) 1179 { 1180 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); 1181 1182 if (!ttm_resource_manager_used(man)) { 1183 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); 1184 ttm_resource_manager_set_used(man, true); 1185 } 1186 } 1187 1188 /** 1189 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1190 * 1191 * @dev_priv: Pointer to device private struct. 1192 */ 1193 void vmw_svga_enable(struct vmw_private *dev_priv) 1194 { 1195 (void) ttm_read_lock(&dev_priv->reservation_sem, false); 1196 __vmw_svga_enable(dev_priv); 1197 ttm_read_unlock(&dev_priv->reservation_sem); 1198 } 1199 1200 /** 1201 * __vmw_svga_disable - Disable SVGA mode and use of VRAM. 1202 * 1203 * @dev_priv: Pointer to device private struct. 1204 * Needs the reservation sem to be held in exclusive mode. 1205 * Will not empty VRAM. VRAM must be emptied by caller. 1206 */ 1207 static void __vmw_svga_disable(struct vmw_private *dev_priv) 1208 { 1209 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); 1210 1211 if (ttm_resource_manager_used(man)) { 1212 ttm_resource_manager_set_used(man, false); 1213 vmw_write(dev_priv, SVGA_REG_ENABLE, 1214 SVGA_REG_ENABLE_HIDE | 1215 SVGA_REG_ENABLE_ENABLE); 1216 } 1217 } 1218 1219 /** 1220 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo 1221 * running. 1222 * 1223 * @dev_priv: Pointer to device private struct. 1224 * Will empty VRAM. 1225 */ 1226 void vmw_svga_disable(struct vmw_private *dev_priv) 1227 { 1228 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); 1229 /* 1230 * Disabling SVGA will turn off device modesetting capabilities, so 1231 * notify KMS about that so that it doesn't cache atomic state that 1232 * isn't valid anymore, for example crtcs turned on. 1233 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex), 1234 * but vmw_kms_lost_device() takes the reservation sem and thus we'll 1235 * end up with lock order reversal. Thus, a master may actually perform 1236 * a new modeset just after we call vmw_kms_lost_device() and race with 1237 * vmw_svga_disable(), but that should at worst cause atomic KMS state 1238 * to be inconsistent with the device, causing modesetting problems. 1239 * 1240 */ 1241 vmw_kms_lost_device(&dev_priv->drm); 1242 ttm_write_lock(&dev_priv->reservation_sem, false); 1243 if (ttm_resource_manager_used(man)) { 1244 if (ttm_resource_manager_evict_all(&dev_priv->bdev, man)) 1245 DRM_ERROR("Failed evicting VRAM buffers.\n"); 1246 ttm_resource_manager_set_used(man, false); 1247 vmw_write(dev_priv, SVGA_REG_ENABLE, 1248 SVGA_REG_ENABLE_HIDE | 1249 SVGA_REG_ENABLE_ENABLE); 1250 } 1251 ttm_write_unlock(&dev_priv->reservation_sem); 1252 } 1253 1254 static void vmw_remove(struct pci_dev *pdev) 1255 { 1256 struct drm_device *dev = pci_get_drvdata(pdev); 1257 1258 ttm_mem_global_release(&ttm_mem_glob); 1259 drm_dev_unregister(dev); 1260 vmw_driver_unload(dev); 1261 } 1262 1263 static unsigned long 1264 vmw_get_unmapped_area(struct file *file, unsigned long uaddr, 1265 unsigned long len, unsigned long pgoff, 1266 unsigned long flags) 1267 { 1268 struct drm_file *file_priv = file->private_data; 1269 struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); 1270 1271 return drm_get_unmapped_area(file, uaddr, len, pgoff, flags, 1272 &dev_priv->vma_manager); 1273 } 1274 1275 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 1276 void *ptr) 1277 { 1278 struct vmw_private *dev_priv = 1279 container_of(nb, struct vmw_private, pm_nb); 1280 1281 switch (val) { 1282 case PM_HIBERNATION_PREPARE: 1283 /* 1284 * Take the reservation sem in write mode, which will make sure 1285 * there are no other processes holding a buffer object 1286 * reservation, meaning we should be able to evict all buffer 1287 * objects if needed. 1288 * Once user-space processes have been frozen, we can release 1289 * the lock again. 1290 */ 1291 ttm_suspend_lock(&dev_priv->reservation_sem); 1292 dev_priv->suspend_locked = true; 1293 break; 1294 case PM_POST_HIBERNATION: 1295 case PM_POST_RESTORE: 1296 if (READ_ONCE(dev_priv->suspend_locked)) { 1297 dev_priv->suspend_locked = false; 1298 ttm_suspend_unlock(&dev_priv->reservation_sem); 1299 } 1300 break; 1301 default: 1302 break; 1303 } 1304 return 0; 1305 } 1306 1307 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1308 { 1309 struct drm_device *dev = pci_get_drvdata(pdev); 1310 struct vmw_private *dev_priv = vmw_priv(dev); 1311 1312 if (dev_priv->refuse_hibernation) 1313 return -EBUSY; 1314 1315 pci_save_state(pdev); 1316 pci_disable_device(pdev); 1317 pci_set_power_state(pdev, PCI_D3hot); 1318 return 0; 1319 } 1320 1321 static int vmw_pci_resume(struct pci_dev *pdev) 1322 { 1323 pci_set_power_state(pdev, PCI_D0); 1324 pci_restore_state(pdev); 1325 return pci_enable_device(pdev); 1326 } 1327 1328 static int vmw_pm_suspend(struct device *kdev) 1329 { 1330 struct pci_dev *pdev = to_pci_dev(kdev); 1331 struct pm_message dummy; 1332 1333 dummy.event = 0; 1334 1335 return vmw_pci_suspend(pdev, dummy); 1336 } 1337 1338 static int vmw_pm_resume(struct device *kdev) 1339 { 1340 struct pci_dev *pdev = to_pci_dev(kdev); 1341 1342 return vmw_pci_resume(pdev); 1343 } 1344 1345 static int vmw_pm_freeze(struct device *kdev) 1346 { 1347 struct pci_dev *pdev = to_pci_dev(kdev); 1348 struct drm_device *dev = pci_get_drvdata(pdev); 1349 struct vmw_private *dev_priv = vmw_priv(dev); 1350 struct ttm_operation_ctx ctx = { 1351 .interruptible = false, 1352 .no_wait_gpu = false 1353 }; 1354 int ret; 1355 1356 /* 1357 * Unlock for vmw_kms_suspend. 1358 * No user-space processes should be running now. 1359 */ 1360 ttm_suspend_unlock(&dev_priv->reservation_sem); 1361 ret = vmw_kms_suspend(&dev_priv->drm); 1362 if (ret) { 1363 ttm_suspend_lock(&dev_priv->reservation_sem); 1364 DRM_ERROR("Failed to freeze modesetting.\n"); 1365 return ret; 1366 } 1367 if (dev_priv->enable_fb) 1368 vmw_fb_off(dev_priv); 1369 1370 ttm_suspend_lock(&dev_priv->reservation_sem); 1371 vmw_execbuf_release_pinned_bo(dev_priv); 1372 vmw_resource_evict_all(dev_priv); 1373 vmw_release_device_early(dev_priv); 1374 while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0); 1375 if (dev_priv->enable_fb) 1376 vmw_fifo_resource_dec(dev_priv); 1377 if (atomic_read(&dev_priv->num_fifo_resources) != 0) { 1378 DRM_ERROR("Can't hibernate while 3D resources are active.\n"); 1379 if (dev_priv->enable_fb) 1380 vmw_fifo_resource_inc(dev_priv); 1381 WARN_ON(vmw_request_device_late(dev_priv)); 1382 dev_priv->suspend_locked = false; 1383 ttm_suspend_unlock(&dev_priv->reservation_sem); 1384 if (dev_priv->suspend_state) 1385 vmw_kms_resume(dev); 1386 if (dev_priv->enable_fb) 1387 vmw_fb_on(dev_priv); 1388 return -EBUSY; 1389 } 1390 1391 vmw_fence_fifo_down(dev_priv->fman); 1392 __vmw_svga_disable(dev_priv); 1393 1394 vmw_release_device_late(dev_priv); 1395 return 0; 1396 } 1397 1398 static int vmw_pm_restore(struct device *kdev) 1399 { 1400 struct pci_dev *pdev = to_pci_dev(kdev); 1401 struct drm_device *dev = pci_get_drvdata(pdev); 1402 struct vmw_private *dev_priv = vmw_priv(dev); 1403 int ret; 1404 1405 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 1406 (void) vmw_read(dev_priv, SVGA_REG_ID); 1407 1408 if (dev_priv->enable_fb) 1409 vmw_fifo_resource_inc(dev_priv); 1410 1411 ret = vmw_request_device(dev_priv); 1412 if (ret) 1413 return ret; 1414 1415 if (dev_priv->enable_fb) 1416 __vmw_svga_enable(dev_priv); 1417 1418 vmw_fence_fifo_up(dev_priv->fman); 1419 dev_priv->suspend_locked = false; 1420 ttm_suspend_unlock(&dev_priv->reservation_sem); 1421 if (dev_priv->suspend_state) 1422 vmw_kms_resume(&dev_priv->drm); 1423 1424 if (dev_priv->enable_fb) 1425 vmw_fb_on(dev_priv); 1426 1427 return 0; 1428 } 1429 1430 static const struct dev_pm_ops vmw_pm_ops = { 1431 .freeze = vmw_pm_freeze, 1432 .thaw = vmw_pm_restore, 1433 .restore = vmw_pm_restore, 1434 .suspend = vmw_pm_suspend, 1435 .resume = vmw_pm_resume, 1436 }; 1437 1438 static const struct file_operations vmwgfx_driver_fops = { 1439 .owner = THIS_MODULE, 1440 .open = drm_open, 1441 .release = drm_release, 1442 .unlocked_ioctl = vmw_unlocked_ioctl, 1443 .mmap = vmw_mmap, 1444 .poll = vmw_fops_poll, 1445 .read = vmw_fops_read, 1446 #if defined(CONFIG_COMPAT) 1447 .compat_ioctl = vmw_compat_ioctl, 1448 #endif 1449 .llseek = noop_llseek, 1450 .get_unmapped_area = vmw_get_unmapped_area, 1451 }; 1452 1453 static const struct drm_driver driver = { 1454 .driver_features = 1455 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC, 1456 .ioctls = vmw_ioctls, 1457 .num_ioctls = ARRAY_SIZE(vmw_ioctls), 1458 .master_set = vmw_master_set, 1459 .master_drop = vmw_master_drop, 1460 .open = vmw_driver_open, 1461 .postclose = vmw_postclose, 1462 1463 .dumb_create = vmw_dumb_create, 1464 .dumb_map_offset = vmw_dumb_map_offset, 1465 .dumb_destroy = vmw_dumb_destroy, 1466 1467 .prime_fd_to_handle = vmw_prime_fd_to_handle, 1468 .prime_handle_to_fd = vmw_prime_handle_to_fd, 1469 1470 .fops = &vmwgfx_driver_fops, 1471 .name = VMWGFX_DRIVER_NAME, 1472 .desc = VMWGFX_DRIVER_DESC, 1473 .date = VMWGFX_DRIVER_DATE, 1474 .major = VMWGFX_DRIVER_MAJOR, 1475 .minor = VMWGFX_DRIVER_MINOR, 1476 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL 1477 }; 1478 1479 static struct pci_driver vmw_pci_driver = { 1480 .name = VMWGFX_DRIVER_NAME, 1481 .id_table = vmw_pci_id_list, 1482 .probe = vmw_probe, 1483 .remove = vmw_remove, 1484 .driver = { 1485 .pm = &vmw_pm_ops 1486 } 1487 }; 1488 1489 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1490 { 1491 struct vmw_private *vmw; 1492 int ret; 1493 1494 ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb"); 1495 if (ret) 1496 return ret; 1497 1498 ret = pcim_enable_device(pdev); 1499 if (ret) 1500 return ret; 1501 1502 vmw = devm_drm_dev_alloc(&pdev->dev, &driver, 1503 struct vmw_private, drm); 1504 if (IS_ERR(vmw)) 1505 return PTR_ERR(vmw); 1506 1507 pci_set_drvdata(pdev, &vmw->drm); 1508 1509 ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev); 1510 if (ret) 1511 return ret; 1512 1513 ret = vmw_driver_load(vmw, ent->device); 1514 if (ret) 1515 return ret; 1516 1517 ret = drm_dev_register(&vmw->drm, 0); 1518 if (ret) { 1519 vmw_driver_unload(&vmw->drm); 1520 return ret; 1521 } 1522 1523 return 0; 1524 } 1525 1526 static int __init vmwgfx_init(void) 1527 { 1528 int ret; 1529 1530 if (vgacon_text_force()) 1531 return -EINVAL; 1532 1533 ret = pci_register_driver(&vmw_pci_driver); 1534 if (ret) 1535 DRM_ERROR("Failed initializing DRM.\n"); 1536 return ret; 1537 } 1538 1539 static void __exit vmwgfx_exit(void) 1540 { 1541 pci_unregister_driver(&vmw_pci_driver); 1542 } 1543 1544 module_init(vmwgfx_init); 1545 module_exit(vmwgfx_exit); 1546 1547 MODULE_AUTHOR("VMware Inc. and others"); 1548 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); 1549 MODULE_LICENSE("GPL and additional rights"); 1550 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." 1551 __stringify(VMWGFX_DRIVER_MINOR) "." 1552 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." 1553 "0"); 1554