1 /************************************************************************** 2 * 3 * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 #include <linux/module.h> 28 #include <linux/console.h> 29 30 #include <drm/drmP.h> 31 #include "vmwgfx_drv.h" 32 #include "vmwgfx_binding.h" 33 #include <drm/ttm/ttm_placement.h> 34 #include <drm/ttm/ttm_bo_driver.h> 35 #include <drm/ttm/ttm_object.h> 36 #include <drm/ttm/ttm_module.h> 37 #include <linux/dma_remapping.h> 38 39 #define VMWGFX_DRIVER_NAME "vmwgfx" 40 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 41 #define VMWGFX_CHIP_SVGAII 0 42 #define VMW_FB_RESERVATION 0 43 44 #define VMW_MIN_INITIAL_WIDTH 800 45 #define VMW_MIN_INITIAL_HEIGHT 600 46 47 #ifndef VMWGFX_GIT_VERSION 48 #define VMWGFX_GIT_VERSION "Unknown" 49 #endif 50 51 #define VMWGFX_REPO "In Tree" 52 53 54 /** 55 * Fully encoded drm commands. Might move to vmw_drm.h 56 */ 57 58 #define DRM_IOCTL_VMW_GET_PARAM \ 59 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ 60 struct drm_vmw_getparam_arg) 61 #define DRM_IOCTL_VMW_ALLOC_DMABUF \ 62 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ 63 union drm_vmw_alloc_dmabuf_arg) 64 #define DRM_IOCTL_VMW_UNREF_DMABUF \ 65 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ 66 struct drm_vmw_unref_dmabuf_arg) 67 #define DRM_IOCTL_VMW_CURSOR_BYPASS \ 68 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ 69 struct drm_vmw_cursor_bypass_arg) 70 71 #define DRM_IOCTL_VMW_CONTROL_STREAM \ 72 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ 73 struct drm_vmw_control_stream_arg) 74 #define DRM_IOCTL_VMW_CLAIM_STREAM \ 75 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ 76 struct drm_vmw_stream_arg) 77 #define DRM_IOCTL_VMW_UNREF_STREAM \ 78 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ 79 struct drm_vmw_stream_arg) 80 81 #define DRM_IOCTL_VMW_CREATE_CONTEXT \ 82 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ 83 struct drm_vmw_context_arg) 84 #define DRM_IOCTL_VMW_UNREF_CONTEXT \ 85 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ 86 struct drm_vmw_context_arg) 87 #define DRM_IOCTL_VMW_CREATE_SURFACE \ 88 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ 89 union drm_vmw_surface_create_arg) 90 #define DRM_IOCTL_VMW_UNREF_SURFACE \ 91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ 92 struct drm_vmw_surface_arg) 93 #define DRM_IOCTL_VMW_REF_SURFACE \ 94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ 95 union drm_vmw_surface_reference_arg) 96 #define DRM_IOCTL_VMW_EXECBUF \ 97 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ 98 struct drm_vmw_execbuf_arg) 99 #define DRM_IOCTL_VMW_GET_3D_CAP \ 100 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ 101 struct drm_vmw_get_3d_cap_arg) 102 #define DRM_IOCTL_VMW_FENCE_WAIT \ 103 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ 104 struct drm_vmw_fence_wait_arg) 105 #define DRM_IOCTL_VMW_FENCE_SIGNALED \ 106 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ 107 struct drm_vmw_fence_signaled_arg) 108 #define DRM_IOCTL_VMW_FENCE_UNREF \ 109 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ 110 struct drm_vmw_fence_arg) 111 #define DRM_IOCTL_VMW_FENCE_EVENT \ 112 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ 113 struct drm_vmw_fence_event_arg) 114 #define DRM_IOCTL_VMW_PRESENT \ 115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ 116 struct drm_vmw_present_arg) 117 #define DRM_IOCTL_VMW_PRESENT_READBACK \ 118 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ 119 struct drm_vmw_present_readback_arg) 120 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ 121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ 122 struct drm_vmw_update_layout_arg) 123 #define DRM_IOCTL_VMW_CREATE_SHADER \ 124 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ 125 struct drm_vmw_shader_create_arg) 126 #define DRM_IOCTL_VMW_UNREF_SHADER \ 127 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ 128 struct drm_vmw_shader_arg) 129 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ 130 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ 131 union drm_vmw_gb_surface_create_arg) 132 #define DRM_IOCTL_VMW_GB_SURFACE_REF \ 133 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ 134 union drm_vmw_gb_surface_reference_arg) 135 #define DRM_IOCTL_VMW_SYNCCPU \ 136 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ 137 struct drm_vmw_synccpu_arg) 138 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \ 139 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \ 140 struct drm_vmw_context_arg) 141 142 /** 143 * The core DRM version of this macro doesn't account for 144 * DRM_COMMAND_BASE. 145 */ 146 147 #define VMW_IOCTL_DEF(ioctl, func, flags) \ 148 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func} 149 150 /** 151 * Ioctl definitions. 152 */ 153 154 static const struct drm_ioctl_desc vmw_ioctls[] = { 155 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, 156 DRM_AUTH | DRM_RENDER_ALLOW), 157 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, 158 DRM_AUTH | DRM_RENDER_ALLOW), 159 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, 160 DRM_RENDER_ALLOW), 161 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, 162 vmw_kms_cursor_bypass_ioctl, 163 DRM_MASTER | DRM_CONTROL_ALLOW), 164 165 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, 166 DRM_MASTER | DRM_CONTROL_ALLOW), 167 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, 168 DRM_MASTER | DRM_CONTROL_ALLOW), 169 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, 170 DRM_MASTER | DRM_CONTROL_ALLOW), 171 172 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 173 DRM_AUTH | DRM_RENDER_ALLOW), 174 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, 175 DRM_RENDER_ALLOW), 176 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, 177 DRM_AUTH | DRM_RENDER_ALLOW), 178 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, 179 DRM_RENDER_ALLOW), 180 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, 181 DRM_AUTH | DRM_RENDER_ALLOW), 182 VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | 183 DRM_RENDER_ALLOW), 184 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, 185 DRM_RENDER_ALLOW), 186 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, 187 vmw_fence_obj_signaled_ioctl, 188 DRM_RENDER_ALLOW), 189 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, 190 DRM_RENDER_ALLOW), 191 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, 192 DRM_AUTH | DRM_RENDER_ALLOW), 193 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, 194 DRM_AUTH | DRM_RENDER_ALLOW), 195 196 /* these allow direct access to the framebuffers mark as master only */ 197 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, 198 DRM_MASTER | DRM_AUTH), 199 VMW_IOCTL_DEF(VMW_PRESENT_READBACK, 200 vmw_present_readback_ioctl, 201 DRM_MASTER | DRM_AUTH), 202 /* 203 * The permissions of the below ioctl are overridden in 204 * vmw_generic_ioctl(). We require either 205 * DRM_MASTER or capable(CAP_SYS_ADMIN). 206 */ 207 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, 208 vmw_kms_update_layout_ioctl, 209 DRM_RENDER_ALLOW), 210 VMW_IOCTL_DEF(VMW_CREATE_SHADER, 211 vmw_shader_define_ioctl, 212 DRM_AUTH | DRM_RENDER_ALLOW), 213 VMW_IOCTL_DEF(VMW_UNREF_SHADER, 214 vmw_shader_destroy_ioctl, 215 DRM_RENDER_ALLOW), 216 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, 217 vmw_gb_surface_define_ioctl, 218 DRM_AUTH | DRM_RENDER_ALLOW), 219 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, 220 vmw_gb_surface_reference_ioctl, 221 DRM_AUTH | DRM_RENDER_ALLOW), 222 VMW_IOCTL_DEF(VMW_SYNCCPU, 223 vmw_user_dmabuf_synccpu_ioctl, 224 DRM_RENDER_ALLOW), 225 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, 226 vmw_extended_context_define_ioctl, 227 DRM_AUTH | DRM_RENDER_ALLOW), 228 }; 229 230 static struct pci_device_id vmw_pci_id_list[] = { 231 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, 232 {0, 0, 0} 233 }; 234 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); 235 236 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); 237 static int vmw_force_iommu; 238 static int vmw_restrict_iommu; 239 static int vmw_force_coherent; 240 static int vmw_restrict_dma_mask; 241 static int vmw_assume_16bpp; 242 243 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 244 static void vmw_master_init(struct vmw_master *); 245 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 246 void *ptr); 247 248 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 249 module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR); 250 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); 251 module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR); 252 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); 253 module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR); 254 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); 255 module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR); 256 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); 257 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR); 258 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); 259 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); 260 261 262 static void vmw_print_capabilities(uint32_t capabilities) 263 { 264 DRM_INFO("Capabilities:\n"); 265 if (capabilities & SVGA_CAP_RECT_COPY) 266 DRM_INFO(" Rect copy.\n"); 267 if (capabilities & SVGA_CAP_CURSOR) 268 DRM_INFO(" Cursor.\n"); 269 if (capabilities & SVGA_CAP_CURSOR_BYPASS) 270 DRM_INFO(" Cursor bypass.\n"); 271 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) 272 DRM_INFO(" Cursor bypass 2.\n"); 273 if (capabilities & SVGA_CAP_8BIT_EMULATION) 274 DRM_INFO(" 8bit emulation.\n"); 275 if (capabilities & SVGA_CAP_ALPHA_CURSOR) 276 DRM_INFO(" Alpha cursor.\n"); 277 if (capabilities & SVGA_CAP_3D) 278 DRM_INFO(" 3D.\n"); 279 if (capabilities & SVGA_CAP_EXTENDED_FIFO) 280 DRM_INFO(" Extended Fifo.\n"); 281 if (capabilities & SVGA_CAP_MULTIMON) 282 DRM_INFO(" Multimon.\n"); 283 if (capabilities & SVGA_CAP_PITCHLOCK) 284 DRM_INFO(" Pitchlock.\n"); 285 if (capabilities & SVGA_CAP_IRQMASK) 286 DRM_INFO(" Irq mask.\n"); 287 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) 288 DRM_INFO(" Display Topology.\n"); 289 if (capabilities & SVGA_CAP_GMR) 290 DRM_INFO(" GMR.\n"); 291 if (capabilities & SVGA_CAP_TRACES) 292 DRM_INFO(" Traces.\n"); 293 if (capabilities & SVGA_CAP_GMR2) 294 DRM_INFO(" GMR2.\n"); 295 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) 296 DRM_INFO(" Screen Object 2.\n"); 297 if (capabilities & SVGA_CAP_COMMAND_BUFFERS) 298 DRM_INFO(" Command Buffers.\n"); 299 if (capabilities & SVGA_CAP_CMD_BUFFERS_2) 300 DRM_INFO(" Command Buffers 2.\n"); 301 if (capabilities & SVGA_CAP_GBOBJECTS) 302 DRM_INFO(" Guest Backed Resources.\n"); 303 if (capabilities & SVGA_CAP_DX) 304 DRM_INFO(" DX Features.\n"); 305 } 306 307 /** 308 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result 309 * 310 * @dev_priv: A device private structure. 311 * 312 * This function creates a small buffer object that holds the query 313 * result for dummy queries emitted as query barriers. 314 * The function will then map the first page and initialize a pending 315 * occlusion query result structure, Finally it will unmap the buffer. 316 * No interruptible waits are done within this function. 317 * 318 * Returns an error if bo creation or initialization fails. 319 */ 320 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) 321 { 322 int ret; 323 struct vmw_dma_buffer *vbo; 324 struct ttm_bo_kmap_obj map; 325 volatile SVGA3dQueryResult *result; 326 bool dummy; 327 328 /* 329 * Create the vbo as pinned, so that a tryreserve will 330 * immediately succeed. This is because we're the only 331 * user of the bo currently. 332 */ 333 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); 334 if (!vbo) 335 return -ENOMEM; 336 337 ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE, 338 &vmw_sys_ne_placement, false, 339 &vmw_dmabuf_bo_free); 340 if (unlikely(ret != 0)) 341 return ret; 342 343 ret = ttm_bo_reserve(&vbo->base, false, true, NULL); 344 BUG_ON(ret != 0); 345 vmw_bo_pin_reserved(vbo, true); 346 347 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); 348 if (likely(ret == 0)) { 349 result = ttm_kmap_obj_virtual(&map, &dummy); 350 result->totalSize = sizeof(*result); 351 result->state = SVGA3D_QUERYSTATE_PENDING; 352 result->result32 = 0xff; 353 ttm_bo_kunmap(&map); 354 } 355 vmw_bo_pin_reserved(vbo, false); 356 ttm_bo_unreserve(&vbo->base); 357 358 if (unlikely(ret != 0)) { 359 DRM_ERROR("Dummy query buffer map failed.\n"); 360 vmw_dmabuf_unreference(&vbo); 361 } else 362 dev_priv->dummy_query_bo = vbo; 363 364 return ret; 365 } 366 367 /** 368 * vmw_request_device_late - Perform late device setup 369 * 370 * @dev_priv: Pointer to device private. 371 * 372 * This function performs setup of otables and enables large command 373 * buffer submission. These tasks are split out to a separate function 374 * because it reverts vmw_release_device_early and is intended to be used 375 * by an error path in the hibernation code. 376 */ 377 static int vmw_request_device_late(struct vmw_private *dev_priv) 378 { 379 int ret; 380 381 if (dev_priv->has_mob) { 382 ret = vmw_otables_setup(dev_priv); 383 if (unlikely(ret != 0)) { 384 DRM_ERROR("Unable to initialize " 385 "guest Memory OBjects.\n"); 386 return ret; 387 } 388 } 389 390 if (dev_priv->cman) { 391 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 392 256*4096, 2*4096); 393 if (ret) { 394 struct vmw_cmdbuf_man *man = dev_priv->cman; 395 396 dev_priv->cman = NULL; 397 vmw_cmdbuf_man_destroy(man); 398 } 399 } 400 401 return 0; 402 } 403 404 static int vmw_request_device(struct vmw_private *dev_priv) 405 { 406 int ret; 407 408 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); 409 if (unlikely(ret != 0)) { 410 DRM_ERROR("Unable to initialize FIFO.\n"); 411 return ret; 412 } 413 vmw_fence_fifo_up(dev_priv->fman); 414 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); 415 if (IS_ERR(dev_priv->cman)) { 416 dev_priv->cman = NULL; 417 dev_priv->has_dx = false; 418 } 419 420 ret = vmw_request_device_late(dev_priv); 421 if (ret) 422 goto out_no_mob; 423 424 ret = vmw_dummy_query_bo_create(dev_priv); 425 if (unlikely(ret != 0)) 426 goto out_no_query_bo; 427 428 return 0; 429 430 out_no_query_bo: 431 if (dev_priv->cman) 432 vmw_cmdbuf_remove_pool(dev_priv->cman); 433 if (dev_priv->has_mob) { 434 (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); 435 vmw_otables_takedown(dev_priv); 436 } 437 if (dev_priv->cman) 438 vmw_cmdbuf_man_destroy(dev_priv->cman); 439 out_no_mob: 440 vmw_fence_fifo_down(dev_priv->fman); 441 vmw_fifo_release(dev_priv, &dev_priv->fifo); 442 return ret; 443 } 444 445 /** 446 * vmw_release_device_early - Early part of fifo takedown. 447 * 448 * @dev_priv: Pointer to device private struct. 449 * 450 * This is the first part of command submission takedown, to be called before 451 * buffer management is taken down. 452 */ 453 static void vmw_release_device_early(struct vmw_private *dev_priv) 454 { 455 /* 456 * Previous destructions should've released 457 * the pinned bo. 458 */ 459 460 BUG_ON(dev_priv->pinned_bo != NULL); 461 462 vmw_dmabuf_unreference(&dev_priv->dummy_query_bo); 463 if (dev_priv->cman) 464 vmw_cmdbuf_remove_pool(dev_priv->cman); 465 466 if (dev_priv->has_mob) { 467 ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); 468 vmw_otables_takedown(dev_priv); 469 } 470 } 471 472 /** 473 * vmw_release_device_late - Late part of fifo takedown. 474 * 475 * @dev_priv: Pointer to device private struct. 476 * 477 * This is the last part of the command submission takedown, to be called when 478 * command submission is no longer needed. It may wait on pending fences. 479 */ 480 static void vmw_release_device_late(struct vmw_private *dev_priv) 481 { 482 vmw_fence_fifo_down(dev_priv->fman); 483 if (dev_priv->cman) 484 vmw_cmdbuf_man_destroy(dev_priv->cman); 485 486 vmw_fifo_release(dev_priv, &dev_priv->fifo); 487 } 488 489 /** 490 * Sets the initial_[width|height] fields on the given vmw_private. 491 * 492 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then 493 * clamping the value to fb_max_[width|height] fields and the 494 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 495 * If the values appear to be invalid, set them to 496 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 497 */ 498 static void vmw_get_initial_size(struct vmw_private *dev_priv) 499 { 500 uint32_t width; 501 uint32_t height; 502 503 width = vmw_read(dev_priv, SVGA_REG_WIDTH); 504 height = vmw_read(dev_priv, SVGA_REG_HEIGHT); 505 506 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); 507 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); 508 509 if (width > dev_priv->fb_max_width || 510 height > dev_priv->fb_max_height) { 511 512 /* 513 * This is a host error and shouldn't occur. 514 */ 515 516 width = VMW_MIN_INITIAL_WIDTH; 517 height = VMW_MIN_INITIAL_HEIGHT; 518 } 519 520 dev_priv->initial_width = width; 521 dev_priv->initial_height = height; 522 } 523 524 /** 525 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this 526 * system. 527 * 528 * @dev_priv: Pointer to a struct vmw_private 529 * 530 * This functions tries to determine the IOMMU setup and what actions 531 * need to be taken by the driver to make system pages visible to the 532 * device. 533 * If this function decides that DMA is not possible, it returns -EINVAL. 534 * The driver may then try to disable features of the device that require 535 * DMA. 536 */ 537 static int vmw_dma_select_mode(struct vmw_private *dev_priv) 538 { 539 static const char *names[vmw_dma_map_max] = { 540 [vmw_dma_phys] = "Using physical TTM page addresses.", 541 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", 542 [vmw_dma_map_populate] = "Keeping DMA mappings.", 543 [vmw_dma_map_bind] = "Giving up DMA mappings early."}; 544 #ifdef CONFIG_X86 545 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); 546 547 #ifdef CONFIG_INTEL_IOMMU 548 if (intel_iommu_enabled) { 549 dev_priv->map_mode = vmw_dma_map_populate; 550 goto out_fixup; 551 } 552 #endif 553 554 if (!(vmw_force_iommu || vmw_force_coherent)) { 555 dev_priv->map_mode = vmw_dma_phys; 556 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 557 return 0; 558 } 559 560 dev_priv->map_mode = vmw_dma_map_populate; 561 562 if (dma_ops->sync_single_for_cpu) 563 dev_priv->map_mode = vmw_dma_alloc_coherent; 564 #ifdef CONFIG_SWIOTLB 565 if (swiotlb_nr_tbl() == 0) 566 dev_priv->map_mode = vmw_dma_map_populate; 567 #endif 568 569 #ifdef CONFIG_INTEL_IOMMU 570 out_fixup: 571 #endif 572 if (dev_priv->map_mode == vmw_dma_map_populate && 573 vmw_restrict_iommu) 574 dev_priv->map_mode = vmw_dma_map_bind; 575 576 if (vmw_force_coherent) 577 dev_priv->map_mode = vmw_dma_alloc_coherent; 578 579 #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) 580 /* 581 * No coherent page pool 582 */ 583 if (dev_priv->map_mode == vmw_dma_alloc_coherent) 584 return -EINVAL; 585 #endif 586 587 #else /* CONFIG_X86 */ 588 dev_priv->map_mode = vmw_dma_map_populate; 589 #endif /* CONFIG_X86 */ 590 591 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 592 593 return 0; 594 } 595 596 /** 597 * vmw_dma_masks - set required page- and dma masks 598 * 599 * @dev: Pointer to struct drm-device 600 * 601 * With 32-bit we can only handle 32 bit PFNs. Optionally set that 602 * restriction also for 64-bit systems. 603 */ 604 #ifdef CONFIG_INTEL_IOMMU 605 static int vmw_dma_masks(struct vmw_private *dev_priv) 606 { 607 struct drm_device *dev = dev_priv->dev; 608 609 if (intel_iommu_enabled && 610 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { 611 DRM_INFO("Restricting DMA addresses to 44 bits.\n"); 612 return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); 613 } 614 return 0; 615 } 616 #else 617 static int vmw_dma_masks(struct vmw_private *dev_priv) 618 { 619 return 0; 620 } 621 #endif 622 623 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 624 { 625 struct vmw_private *dev_priv; 626 int ret; 627 uint32_t svga_id; 628 enum vmw_res_type i; 629 bool refuse_dma = false; 630 char host_log[100] = {0}; 631 632 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 633 if (unlikely(dev_priv == NULL)) { 634 DRM_ERROR("Failed allocating a device private struct.\n"); 635 return -ENOMEM; 636 } 637 638 pci_set_master(dev->pdev); 639 640 dev_priv->dev = dev; 641 dev_priv->vmw_chipset = chipset; 642 dev_priv->last_read_seqno = (uint32_t) -100; 643 mutex_init(&dev_priv->cmdbuf_mutex); 644 mutex_init(&dev_priv->release_mutex); 645 mutex_init(&dev_priv->binding_mutex); 646 mutex_init(&dev_priv->global_kms_state_mutex); 647 rwlock_init(&dev_priv->resource_lock); 648 ttm_lock_init(&dev_priv->reservation_sem); 649 spin_lock_init(&dev_priv->hw_lock); 650 spin_lock_init(&dev_priv->waiter_lock); 651 spin_lock_init(&dev_priv->cap_lock); 652 spin_lock_init(&dev_priv->svga_lock); 653 654 for (i = vmw_res_context; i < vmw_res_max; ++i) { 655 idr_init(&dev_priv->res_idr[i]); 656 INIT_LIST_HEAD(&dev_priv->res_lru[i]); 657 } 658 659 mutex_init(&dev_priv->init_mutex); 660 init_waitqueue_head(&dev_priv->fence_queue); 661 init_waitqueue_head(&dev_priv->fifo_queue); 662 dev_priv->fence_queue_waiters = 0; 663 dev_priv->fifo_queue_waiters = 0; 664 665 dev_priv->used_memory_size = 0; 666 667 dev_priv->io_start = pci_resource_start(dev->pdev, 0); 668 dev_priv->vram_start = pci_resource_start(dev->pdev, 1); 669 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 670 671 dev_priv->assume_16bpp = !!vmw_assume_16bpp; 672 673 dev_priv->enable_fb = enable_fbdev; 674 675 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 676 svga_id = vmw_read(dev_priv, SVGA_REG_ID); 677 if (svga_id != SVGA_ID_2) { 678 ret = -ENOSYS; 679 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); 680 goto out_err0; 681 } 682 683 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 684 ret = vmw_dma_select_mode(dev_priv); 685 if (unlikely(ret != 0)) { 686 DRM_INFO("Restricting capabilities due to IOMMU setup.\n"); 687 refuse_dma = true; 688 } 689 690 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); 691 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); 692 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); 693 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); 694 695 vmw_get_initial_size(dev_priv); 696 697 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 698 dev_priv->max_gmr_ids = 699 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); 700 dev_priv->max_gmr_pages = 701 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); 702 dev_priv->memory_size = 703 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); 704 dev_priv->memory_size -= dev_priv->vram_size; 705 } else { 706 /* 707 * An arbitrary limit of 512MiB on surface 708 * memory. But all HWV8 hardware supports GMR2. 709 */ 710 dev_priv->memory_size = 512*1024*1024; 711 } 712 dev_priv->max_mob_pages = 0; 713 dev_priv->max_mob_size = 0; 714 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 715 uint64_t mem_size = 716 vmw_read(dev_priv, 717 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); 718 719 /* 720 * Workaround for low memory 2D VMs to compensate for the 721 * allocation taken by fbdev 722 */ 723 if (!(dev_priv->capabilities & SVGA_CAP_3D)) 724 mem_size *= 2; 725 726 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; 727 dev_priv->prim_bb_mem = 728 vmw_read(dev_priv, 729 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); 730 dev_priv->max_mob_size = 731 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); 732 dev_priv->stdu_max_width = 733 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); 734 dev_priv->stdu_max_height = 735 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); 736 737 vmw_write(dev_priv, SVGA_REG_DEV_CAP, 738 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); 739 dev_priv->texture_max_width = vmw_read(dev_priv, 740 SVGA_REG_DEV_CAP); 741 vmw_write(dev_priv, SVGA_REG_DEV_CAP, 742 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); 743 dev_priv->texture_max_height = vmw_read(dev_priv, 744 SVGA_REG_DEV_CAP); 745 } else { 746 dev_priv->texture_max_width = 8192; 747 dev_priv->texture_max_height = 8192; 748 dev_priv->prim_bb_mem = dev_priv->vram_size; 749 } 750 751 vmw_print_capabilities(dev_priv->capabilities); 752 753 ret = vmw_dma_masks(dev_priv); 754 if (unlikely(ret != 0)) 755 goto out_err0; 756 757 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 758 DRM_INFO("Max GMR ids is %u\n", 759 (unsigned)dev_priv->max_gmr_ids); 760 DRM_INFO("Max number of GMR pages is %u\n", 761 (unsigned)dev_priv->max_gmr_pages); 762 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", 763 (unsigned)dev_priv->memory_size / 1024); 764 } 765 DRM_INFO("Maximum display memory size is %u kiB\n", 766 dev_priv->prim_bb_mem / 1024); 767 DRM_INFO("VRAM at 0x%08x size is %u kiB\n", 768 dev_priv->vram_start, dev_priv->vram_size / 1024); 769 DRM_INFO("MMIO at 0x%08x size is %u kiB\n", 770 dev_priv->mmio_start, dev_priv->mmio_size / 1024); 771 772 ret = vmw_ttm_global_init(dev_priv); 773 if (unlikely(ret != 0)) 774 goto out_err0; 775 776 777 vmw_master_init(&dev_priv->fbdev_master); 778 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 779 dev_priv->active_master = &dev_priv->fbdev_master; 780 781 dev_priv->mmio_virt = memremap(dev_priv->mmio_start, 782 dev_priv->mmio_size, MEMREMAP_WB); 783 784 if (unlikely(dev_priv->mmio_virt == NULL)) { 785 ret = -ENOMEM; 786 DRM_ERROR("Failed mapping MMIO.\n"); 787 goto out_err3; 788 } 789 790 /* Need mmio memory to check for fifo pitchlock cap. */ 791 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && 792 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && 793 !vmw_fifo_have_pitchlock(dev_priv)) { 794 ret = -ENOSYS; 795 DRM_ERROR("Hardware has no pitchlock\n"); 796 goto out_err4; 797 } 798 799 dev_priv->tdev = ttm_object_device_init 800 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); 801 802 if (unlikely(dev_priv->tdev == NULL)) { 803 DRM_ERROR("Unable to initialize TTM object management.\n"); 804 ret = -ENOMEM; 805 goto out_err4; 806 } 807 808 dev->dev_private = dev_priv; 809 810 ret = pci_request_regions(dev->pdev, "vmwgfx probe"); 811 dev_priv->stealth = (ret != 0); 812 if (dev_priv->stealth) { 813 /** 814 * Request at least the mmio PCI resource. 815 */ 816 817 DRM_INFO("It appears like vesafb is loaded. " 818 "Ignore above error if any.\n"); 819 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); 820 if (unlikely(ret != 0)) { 821 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); 822 goto out_no_device; 823 } 824 } 825 826 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 827 ret = drm_irq_install(dev, dev->pdev->irq); 828 if (ret != 0) { 829 DRM_ERROR("Failed installing irq: %d\n", ret); 830 goto out_no_irq; 831 } 832 } 833 834 dev_priv->fman = vmw_fence_manager_init(dev_priv); 835 if (unlikely(dev_priv->fman == NULL)) { 836 ret = -ENOMEM; 837 goto out_no_fman; 838 } 839 840 ret = ttm_bo_device_init(&dev_priv->bdev, 841 dev_priv->bo_global_ref.ref.object, 842 &vmw_bo_driver, 843 dev->anon_inode->i_mapping, 844 VMWGFX_FILE_PAGE_OFFSET, 845 false); 846 if (unlikely(ret != 0)) { 847 DRM_ERROR("Failed initializing TTM buffer object driver.\n"); 848 goto out_no_bdev; 849 } 850 851 /* 852 * Enable VRAM, but initially don't use it until SVGA is enabled and 853 * unhidden. 854 */ 855 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, 856 (dev_priv->vram_size >> PAGE_SHIFT)); 857 if (unlikely(ret != 0)) { 858 DRM_ERROR("Failed initializing memory manager for VRAM.\n"); 859 goto out_no_vram; 860 } 861 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; 862 863 dev_priv->has_gmr = true; 864 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || 865 refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, 866 VMW_PL_GMR) != 0) { 867 DRM_INFO("No GMR memory available. " 868 "Graphics memory resources are very limited.\n"); 869 dev_priv->has_gmr = false; 870 } 871 872 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 873 dev_priv->has_mob = true; 874 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, 875 VMW_PL_MOB) != 0) { 876 DRM_INFO("No MOB memory available. " 877 "3D will be disabled.\n"); 878 dev_priv->has_mob = false; 879 } 880 } 881 882 if (dev_priv->has_mob) { 883 spin_lock(&dev_priv->cap_lock); 884 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX); 885 dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP); 886 spin_unlock(&dev_priv->cap_lock); 887 } 888 889 890 ret = vmw_kms_init(dev_priv); 891 if (unlikely(ret != 0)) 892 goto out_no_kms; 893 vmw_overlay_init(dev_priv); 894 895 ret = vmw_request_device(dev_priv); 896 if (ret) 897 goto out_no_fifo; 898 899 DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no."); 900 901 snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s", 902 VMWGFX_REPO, VMWGFX_GIT_VERSION); 903 vmw_host_log(host_log); 904 905 memset(host_log, 0, sizeof(host_log)); 906 snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d", 907 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR, 908 VMWGFX_DRIVER_PATCHLEVEL); 909 vmw_host_log(host_log); 910 911 if (dev_priv->enable_fb) { 912 vmw_fifo_resource_inc(dev_priv); 913 vmw_svga_enable(dev_priv); 914 vmw_fb_init(dev_priv); 915 } 916 917 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 918 register_pm_notifier(&dev_priv->pm_nb); 919 920 return 0; 921 922 out_no_fifo: 923 vmw_overlay_close(dev_priv); 924 vmw_kms_close(dev_priv); 925 out_no_kms: 926 if (dev_priv->has_mob) 927 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); 928 if (dev_priv->has_gmr) 929 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 930 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 931 out_no_vram: 932 (void)ttm_bo_device_release(&dev_priv->bdev); 933 out_no_bdev: 934 vmw_fence_manager_takedown(dev_priv->fman); 935 out_no_fman: 936 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 937 drm_irq_uninstall(dev_priv->dev); 938 out_no_irq: 939 if (dev_priv->stealth) 940 pci_release_region(dev->pdev, 2); 941 else 942 pci_release_regions(dev->pdev); 943 out_no_device: 944 ttm_object_device_release(&dev_priv->tdev); 945 out_err4: 946 memunmap(dev_priv->mmio_virt); 947 out_err3: 948 vmw_ttm_global_release(dev_priv); 949 out_err0: 950 for (i = vmw_res_context; i < vmw_res_max; ++i) 951 idr_destroy(&dev_priv->res_idr[i]); 952 953 if (dev_priv->ctx.staged_bindings) 954 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 955 kfree(dev_priv); 956 return ret; 957 } 958 959 static void vmw_driver_unload(struct drm_device *dev) 960 { 961 struct vmw_private *dev_priv = vmw_priv(dev); 962 enum vmw_res_type i; 963 964 unregister_pm_notifier(&dev_priv->pm_nb); 965 966 if (dev_priv->ctx.res_ht_initialized) 967 drm_ht_remove(&dev_priv->ctx.res_ht); 968 vfree(dev_priv->ctx.cmd_bounce); 969 if (dev_priv->enable_fb) { 970 vmw_fb_off(dev_priv); 971 vmw_fb_close(dev_priv); 972 vmw_fifo_resource_dec(dev_priv); 973 vmw_svga_disable(dev_priv); 974 } 975 976 vmw_kms_close(dev_priv); 977 vmw_overlay_close(dev_priv); 978 979 if (dev_priv->has_gmr) 980 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 981 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 982 983 vmw_release_device_early(dev_priv); 984 if (dev_priv->has_mob) 985 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); 986 (void) ttm_bo_device_release(&dev_priv->bdev); 987 vmw_release_device_late(dev_priv); 988 vmw_fence_manager_takedown(dev_priv->fman); 989 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 990 drm_irq_uninstall(dev_priv->dev); 991 if (dev_priv->stealth) 992 pci_release_region(dev->pdev, 2); 993 else 994 pci_release_regions(dev->pdev); 995 996 ttm_object_device_release(&dev_priv->tdev); 997 memunmap(dev_priv->mmio_virt); 998 if (dev_priv->ctx.staged_bindings) 999 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 1000 vmw_ttm_global_release(dev_priv); 1001 1002 for (i = vmw_res_context; i < vmw_res_max; ++i) 1003 idr_destroy(&dev_priv->res_idr[i]); 1004 1005 kfree(dev_priv); 1006 } 1007 1008 static void vmw_postclose(struct drm_device *dev, 1009 struct drm_file *file_priv) 1010 { 1011 struct vmw_fpriv *vmw_fp; 1012 1013 vmw_fp = vmw_fpriv(file_priv); 1014 1015 if (vmw_fp->locked_master) { 1016 struct vmw_master *vmaster = 1017 vmw_master(vmw_fp->locked_master); 1018 1019 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 1020 ttm_vt_unlock(&vmaster->lock); 1021 drm_master_put(&vmw_fp->locked_master); 1022 } 1023 1024 ttm_object_file_release(&vmw_fp->tfile); 1025 kfree(vmw_fp); 1026 } 1027 1028 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) 1029 { 1030 struct vmw_private *dev_priv = vmw_priv(dev); 1031 struct vmw_fpriv *vmw_fp; 1032 int ret = -ENOMEM; 1033 1034 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); 1035 if (unlikely(vmw_fp == NULL)) 1036 return ret; 1037 1038 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); 1039 if (unlikely(vmw_fp->tfile == NULL)) 1040 goto out_no_tfile; 1041 1042 file_priv->driver_priv = vmw_fp; 1043 1044 return 0; 1045 1046 out_no_tfile: 1047 kfree(vmw_fp); 1048 return ret; 1049 } 1050 1051 static struct vmw_master *vmw_master_check(struct drm_device *dev, 1052 struct drm_file *file_priv, 1053 unsigned int flags) 1054 { 1055 int ret; 1056 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1057 struct vmw_master *vmaster; 1058 1059 if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH)) 1060 return NULL; 1061 1062 ret = mutex_lock_interruptible(&dev->master_mutex); 1063 if (unlikely(ret != 0)) 1064 return ERR_PTR(-ERESTARTSYS); 1065 1066 if (drm_is_current_master(file_priv)) { 1067 mutex_unlock(&dev->master_mutex); 1068 return NULL; 1069 } 1070 1071 /* 1072 * Check if we were previously master, but now dropped. In that 1073 * case, allow at least render node functionality. 1074 */ 1075 if (vmw_fp->locked_master) { 1076 mutex_unlock(&dev->master_mutex); 1077 1078 if (flags & DRM_RENDER_ALLOW) 1079 return NULL; 1080 1081 DRM_ERROR("Dropped master trying to access ioctl that " 1082 "requires authentication.\n"); 1083 return ERR_PTR(-EACCES); 1084 } 1085 mutex_unlock(&dev->master_mutex); 1086 1087 /* 1088 * Take the TTM lock. Possibly sleep waiting for the authenticating 1089 * master to become master again, or for a SIGTERM if the 1090 * authenticating master exits. 1091 */ 1092 vmaster = vmw_master(file_priv->master); 1093 ret = ttm_read_lock(&vmaster->lock, true); 1094 if (unlikely(ret != 0)) 1095 vmaster = ERR_PTR(ret); 1096 1097 return vmaster; 1098 } 1099 1100 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, 1101 unsigned long arg, 1102 long (*ioctl_func)(struct file *, unsigned int, 1103 unsigned long)) 1104 { 1105 struct drm_file *file_priv = filp->private_data; 1106 struct drm_device *dev = file_priv->minor->dev; 1107 unsigned int nr = DRM_IOCTL_NR(cmd); 1108 struct vmw_master *vmaster; 1109 unsigned int flags; 1110 long ret; 1111 1112 /* 1113 * Do extra checking on driver private ioctls. 1114 */ 1115 1116 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 1117 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 1118 const struct drm_ioctl_desc *ioctl = 1119 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 1120 1121 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { 1122 ret = (long) drm_ioctl_permit(ioctl->flags, file_priv); 1123 if (unlikely(ret != 0)) 1124 return ret; 1125 1126 if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN)) 1127 goto out_io_encoding; 1128 1129 return (long) vmw_execbuf_ioctl(dev, arg, file_priv, 1130 _IOC_SIZE(cmd)); 1131 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) { 1132 if (!drm_is_current_master(file_priv) && 1133 !capable(CAP_SYS_ADMIN)) 1134 return -EACCES; 1135 } 1136 1137 if (unlikely(ioctl->cmd != cmd)) 1138 goto out_io_encoding; 1139 1140 flags = ioctl->flags; 1141 } else if (!drm_ioctl_flags(nr, &flags)) 1142 return -EINVAL; 1143 1144 vmaster = vmw_master_check(dev, file_priv, flags); 1145 if (IS_ERR(vmaster)) { 1146 ret = PTR_ERR(vmaster); 1147 1148 if (ret != -ERESTARTSYS) 1149 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n", 1150 nr, ret); 1151 return ret; 1152 } 1153 1154 ret = ioctl_func(filp, cmd, arg); 1155 if (vmaster) 1156 ttm_read_unlock(&vmaster->lock); 1157 1158 return ret; 1159 1160 out_io_encoding: 1161 DRM_ERROR("Invalid command format, ioctl %d\n", 1162 nr - DRM_COMMAND_BASE); 1163 1164 return -EINVAL; 1165 } 1166 1167 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, 1168 unsigned long arg) 1169 { 1170 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl); 1171 } 1172 1173 #ifdef CONFIG_COMPAT 1174 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, 1175 unsigned long arg) 1176 { 1177 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl); 1178 } 1179 #endif 1180 1181 static void vmw_lastclose(struct drm_device *dev) 1182 { 1183 } 1184 1185 static void vmw_master_init(struct vmw_master *vmaster) 1186 { 1187 ttm_lock_init(&vmaster->lock); 1188 } 1189 1190 static int vmw_master_create(struct drm_device *dev, 1191 struct drm_master *master) 1192 { 1193 struct vmw_master *vmaster; 1194 1195 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); 1196 if (unlikely(vmaster == NULL)) 1197 return -ENOMEM; 1198 1199 vmw_master_init(vmaster); 1200 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 1201 master->driver_priv = vmaster; 1202 1203 return 0; 1204 } 1205 1206 static void vmw_master_destroy(struct drm_device *dev, 1207 struct drm_master *master) 1208 { 1209 struct vmw_master *vmaster = vmw_master(master); 1210 1211 master->driver_priv = NULL; 1212 kfree(vmaster); 1213 } 1214 1215 static int vmw_master_set(struct drm_device *dev, 1216 struct drm_file *file_priv, 1217 bool from_open) 1218 { 1219 struct vmw_private *dev_priv = vmw_priv(dev); 1220 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1221 struct vmw_master *active = dev_priv->active_master; 1222 struct vmw_master *vmaster = vmw_master(file_priv->master); 1223 int ret = 0; 1224 1225 if (active) { 1226 BUG_ON(active != &dev_priv->fbdev_master); 1227 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); 1228 if (unlikely(ret != 0)) 1229 return ret; 1230 1231 ttm_lock_set_kill(&active->lock, true, SIGTERM); 1232 dev_priv->active_master = NULL; 1233 } 1234 1235 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); 1236 if (!from_open) { 1237 ttm_vt_unlock(&vmaster->lock); 1238 BUG_ON(vmw_fp->locked_master != file_priv->master); 1239 drm_master_put(&vmw_fp->locked_master); 1240 } 1241 1242 dev_priv->active_master = vmaster; 1243 drm_sysfs_hotplug_event(dev); 1244 1245 return 0; 1246 } 1247 1248 static void vmw_master_drop(struct drm_device *dev, 1249 struct drm_file *file_priv) 1250 { 1251 struct vmw_private *dev_priv = vmw_priv(dev); 1252 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1253 struct vmw_master *vmaster = vmw_master(file_priv->master); 1254 int ret; 1255 1256 /** 1257 * Make sure the master doesn't disappear while we have 1258 * it locked. 1259 */ 1260 1261 vmw_fp->locked_master = drm_master_get(file_priv->master); 1262 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 1263 vmw_kms_legacy_hotspot_clear(dev_priv); 1264 if (unlikely((ret != 0))) { 1265 DRM_ERROR("Unable to lock TTM at VT switch.\n"); 1266 drm_master_put(&vmw_fp->locked_master); 1267 } 1268 1269 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); 1270 1271 if (!dev_priv->enable_fb) 1272 vmw_svga_disable(dev_priv); 1273 1274 dev_priv->active_master = &dev_priv->fbdev_master; 1275 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 1276 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 1277 1278 if (dev_priv->enable_fb) 1279 vmw_fb_on(dev_priv); 1280 } 1281 1282 /** 1283 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1284 * 1285 * @dev_priv: Pointer to device private struct. 1286 * Needs the reservation sem to be held in non-exclusive mode. 1287 */ 1288 static void __vmw_svga_enable(struct vmw_private *dev_priv) 1289 { 1290 spin_lock(&dev_priv->svga_lock); 1291 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1292 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); 1293 dev_priv->bdev.man[TTM_PL_VRAM].use_type = true; 1294 } 1295 spin_unlock(&dev_priv->svga_lock); 1296 } 1297 1298 /** 1299 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1300 * 1301 * @dev_priv: Pointer to device private struct. 1302 */ 1303 void vmw_svga_enable(struct vmw_private *dev_priv) 1304 { 1305 (void) ttm_read_lock(&dev_priv->reservation_sem, false); 1306 __vmw_svga_enable(dev_priv); 1307 ttm_read_unlock(&dev_priv->reservation_sem); 1308 } 1309 1310 /** 1311 * __vmw_svga_disable - Disable SVGA mode and use of VRAM. 1312 * 1313 * @dev_priv: Pointer to device private struct. 1314 * Needs the reservation sem to be held in exclusive mode. 1315 * Will not empty VRAM. VRAM must be emptied by caller. 1316 */ 1317 static void __vmw_svga_disable(struct vmw_private *dev_priv) 1318 { 1319 spin_lock(&dev_priv->svga_lock); 1320 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1321 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; 1322 vmw_write(dev_priv, SVGA_REG_ENABLE, 1323 SVGA_REG_ENABLE_HIDE | 1324 SVGA_REG_ENABLE_ENABLE); 1325 } 1326 spin_unlock(&dev_priv->svga_lock); 1327 } 1328 1329 /** 1330 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo 1331 * running. 1332 * 1333 * @dev_priv: Pointer to device private struct. 1334 * Will empty VRAM. 1335 */ 1336 void vmw_svga_disable(struct vmw_private *dev_priv) 1337 { 1338 ttm_write_lock(&dev_priv->reservation_sem, false); 1339 spin_lock(&dev_priv->svga_lock); 1340 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1341 dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; 1342 spin_unlock(&dev_priv->svga_lock); 1343 if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) 1344 DRM_ERROR("Failed evicting VRAM buffers.\n"); 1345 vmw_write(dev_priv, SVGA_REG_ENABLE, 1346 SVGA_REG_ENABLE_HIDE | 1347 SVGA_REG_ENABLE_ENABLE); 1348 } else 1349 spin_unlock(&dev_priv->svga_lock); 1350 ttm_write_unlock(&dev_priv->reservation_sem); 1351 } 1352 1353 static void vmw_remove(struct pci_dev *pdev) 1354 { 1355 struct drm_device *dev = pci_get_drvdata(pdev); 1356 1357 pci_disable_device(pdev); 1358 drm_put_dev(dev); 1359 } 1360 1361 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 1362 void *ptr) 1363 { 1364 struct vmw_private *dev_priv = 1365 container_of(nb, struct vmw_private, pm_nb); 1366 1367 switch (val) { 1368 case PM_HIBERNATION_PREPARE: 1369 if (dev_priv->enable_fb) 1370 vmw_fb_off(dev_priv); 1371 ttm_suspend_lock(&dev_priv->reservation_sem); 1372 1373 /* 1374 * This empties VRAM and unbinds all GMR bindings. 1375 * Buffer contents is moved to swappable memory. 1376 */ 1377 vmw_execbuf_release_pinned_bo(dev_priv); 1378 vmw_resource_evict_all(dev_priv); 1379 vmw_release_device_early(dev_priv); 1380 ttm_bo_swapout_all(&dev_priv->bdev); 1381 vmw_fence_fifo_down(dev_priv->fman); 1382 break; 1383 case PM_POST_HIBERNATION: 1384 case PM_POST_RESTORE: 1385 vmw_fence_fifo_up(dev_priv->fman); 1386 ttm_suspend_unlock(&dev_priv->reservation_sem); 1387 if (dev_priv->enable_fb) 1388 vmw_fb_on(dev_priv); 1389 break; 1390 case PM_RESTORE_PREPARE: 1391 break; 1392 default: 1393 break; 1394 } 1395 return 0; 1396 } 1397 1398 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1399 { 1400 struct drm_device *dev = pci_get_drvdata(pdev); 1401 struct vmw_private *dev_priv = vmw_priv(dev); 1402 1403 if (dev_priv->refuse_hibernation) 1404 return -EBUSY; 1405 1406 pci_save_state(pdev); 1407 pci_disable_device(pdev); 1408 pci_set_power_state(pdev, PCI_D3hot); 1409 return 0; 1410 } 1411 1412 static int vmw_pci_resume(struct pci_dev *pdev) 1413 { 1414 pci_set_power_state(pdev, PCI_D0); 1415 pci_restore_state(pdev); 1416 return pci_enable_device(pdev); 1417 } 1418 1419 static int vmw_pm_suspend(struct device *kdev) 1420 { 1421 struct pci_dev *pdev = to_pci_dev(kdev); 1422 struct pm_message dummy; 1423 1424 dummy.event = 0; 1425 1426 return vmw_pci_suspend(pdev, dummy); 1427 } 1428 1429 static int vmw_pm_resume(struct device *kdev) 1430 { 1431 struct pci_dev *pdev = to_pci_dev(kdev); 1432 1433 return vmw_pci_resume(pdev); 1434 } 1435 1436 static int vmw_pm_freeze(struct device *kdev) 1437 { 1438 struct pci_dev *pdev = to_pci_dev(kdev); 1439 struct drm_device *dev = pci_get_drvdata(pdev); 1440 struct vmw_private *dev_priv = vmw_priv(dev); 1441 1442 dev_priv->suspended = true; 1443 if (dev_priv->enable_fb) 1444 vmw_fifo_resource_dec(dev_priv); 1445 1446 if (atomic_read(&dev_priv->num_fifo_resources) != 0) { 1447 DRM_ERROR("Can't hibernate while 3D resources are active.\n"); 1448 if (dev_priv->enable_fb) 1449 vmw_fifo_resource_inc(dev_priv); 1450 WARN_ON(vmw_request_device_late(dev_priv)); 1451 dev_priv->suspended = false; 1452 return -EBUSY; 1453 } 1454 1455 if (dev_priv->enable_fb) 1456 __vmw_svga_disable(dev_priv); 1457 1458 vmw_release_device_late(dev_priv); 1459 1460 return 0; 1461 } 1462 1463 static int vmw_pm_restore(struct device *kdev) 1464 { 1465 struct pci_dev *pdev = to_pci_dev(kdev); 1466 struct drm_device *dev = pci_get_drvdata(pdev); 1467 struct vmw_private *dev_priv = vmw_priv(dev); 1468 int ret; 1469 1470 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 1471 (void) vmw_read(dev_priv, SVGA_REG_ID); 1472 1473 if (dev_priv->enable_fb) 1474 vmw_fifo_resource_inc(dev_priv); 1475 1476 ret = vmw_request_device(dev_priv); 1477 if (ret) 1478 return ret; 1479 1480 if (dev_priv->enable_fb) 1481 __vmw_svga_enable(dev_priv); 1482 1483 dev_priv->suspended = false; 1484 1485 return 0; 1486 } 1487 1488 static const struct dev_pm_ops vmw_pm_ops = { 1489 .freeze = vmw_pm_freeze, 1490 .thaw = vmw_pm_restore, 1491 .restore = vmw_pm_restore, 1492 .suspend = vmw_pm_suspend, 1493 .resume = vmw_pm_resume, 1494 }; 1495 1496 static const struct file_operations vmwgfx_driver_fops = { 1497 .owner = THIS_MODULE, 1498 .open = drm_open, 1499 .release = drm_release, 1500 .unlocked_ioctl = vmw_unlocked_ioctl, 1501 .mmap = vmw_mmap, 1502 .poll = vmw_fops_poll, 1503 .read = vmw_fops_read, 1504 #if defined(CONFIG_COMPAT) 1505 .compat_ioctl = vmw_compat_ioctl, 1506 #endif 1507 .llseek = noop_llseek, 1508 }; 1509 1510 static struct drm_driver driver = { 1511 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 1512 DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER, 1513 .load = vmw_driver_load, 1514 .unload = vmw_driver_unload, 1515 .lastclose = vmw_lastclose, 1516 .irq_preinstall = vmw_irq_preinstall, 1517 .irq_postinstall = vmw_irq_postinstall, 1518 .irq_uninstall = vmw_irq_uninstall, 1519 .irq_handler = vmw_irq_handler, 1520 .get_vblank_counter = vmw_get_vblank_counter, 1521 .enable_vblank = vmw_enable_vblank, 1522 .disable_vblank = vmw_disable_vblank, 1523 .ioctls = vmw_ioctls, 1524 .num_ioctls = ARRAY_SIZE(vmw_ioctls), 1525 .master_create = vmw_master_create, 1526 .master_destroy = vmw_master_destroy, 1527 .master_set = vmw_master_set, 1528 .master_drop = vmw_master_drop, 1529 .open = vmw_driver_open, 1530 .postclose = vmw_postclose, 1531 .set_busid = drm_pci_set_busid, 1532 1533 .dumb_create = vmw_dumb_create, 1534 .dumb_map_offset = vmw_dumb_map_offset, 1535 .dumb_destroy = vmw_dumb_destroy, 1536 1537 .prime_fd_to_handle = vmw_prime_fd_to_handle, 1538 .prime_handle_to_fd = vmw_prime_handle_to_fd, 1539 1540 .fops = &vmwgfx_driver_fops, 1541 .name = VMWGFX_DRIVER_NAME, 1542 .desc = VMWGFX_DRIVER_DESC, 1543 .date = VMWGFX_DRIVER_DATE, 1544 .major = VMWGFX_DRIVER_MAJOR, 1545 .minor = VMWGFX_DRIVER_MINOR, 1546 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL 1547 }; 1548 1549 static struct pci_driver vmw_pci_driver = { 1550 .name = VMWGFX_DRIVER_NAME, 1551 .id_table = vmw_pci_id_list, 1552 .probe = vmw_probe, 1553 .remove = vmw_remove, 1554 .driver = { 1555 .pm = &vmw_pm_ops 1556 } 1557 }; 1558 1559 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1560 { 1561 return drm_get_pci_dev(pdev, ent, &driver); 1562 } 1563 1564 static int __init vmwgfx_init(void) 1565 { 1566 int ret; 1567 1568 if (vgacon_text_force()) 1569 return -EINVAL; 1570 1571 ret = drm_pci_init(&driver, &vmw_pci_driver); 1572 if (ret) 1573 DRM_ERROR("Failed initializing DRM.\n"); 1574 return ret; 1575 } 1576 1577 static void __exit vmwgfx_exit(void) 1578 { 1579 drm_pci_exit(&driver, &vmw_pci_driver); 1580 } 1581 1582 module_init(vmwgfx_init); 1583 module_exit(vmwgfx_exit); 1584 1585 MODULE_AUTHOR("VMware Inc. and others"); 1586 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); 1587 MODULE_LICENSE("GPL and additional rights"); 1588 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." 1589 __stringify(VMWGFX_DRIVER_MINOR) "." 1590 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." 1591 "0"); 1592