1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <linux/console.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/module.h> 31 #include <linux/pci.h> 32 #include <linux/mem_encrypt.h> 33 34 #include <drm/drm_aperture.h> 35 #include <drm/drm_drv.h> 36 #include <drm/drm_ioctl.h> 37 #include <drm/drm_sysfs.h> 38 #include <drm/ttm/ttm_bo_driver.h> 39 #include <drm/ttm/ttm_range_manager.h> 40 #include <drm/ttm/ttm_placement.h> 41 #include <generated/utsrelease.h> 42 43 #include "ttm_object.h" 44 #include "vmwgfx_binding.h" 45 #include "vmwgfx_drv.h" 46 47 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 48 49 #define VMW_MIN_INITIAL_WIDTH 800 50 #define VMW_MIN_INITIAL_HEIGHT 600 51 52 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) 53 54 55 /* 56 * Fully encoded drm commands. Might move to vmw_drm.h 57 */ 58 59 #define DRM_IOCTL_VMW_GET_PARAM \ 60 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ 61 struct drm_vmw_getparam_arg) 62 #define DRM_IOCTL_VMW_ALLOC_DMABUF \ 63 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ 64 union drm_vmw_alloc_dmabuf_arg) 65 #define DRM_IOCTL_VMW_UNREF_DMABUF \ 66 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ 67 struct drm_vmw_unref_dmabuf_arg) 68 #define DRM_IOCTL_VMW_CURSOR_BYPASS \ 69 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ 70 struct drm_vmw_cursor_bypass_arg) 71 72 #define DRM_IOCTL_VMW_CONTROL_STREAM \ 73 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ 74 struct drm_vmw_control_stream_arg) 75 #define DRM_IOCTL_VMW_CLAIM_STREAM \ 76 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ 77 struct drm_vmw_stream_arg) 78 #define DRM_IOCTL_VMW_UNREF_STREAM \ 79 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ 80 struct drm_vmw_stream_arg) 81 82 #define DRM_IOCTL_VMW_CREATE_CONTEXT \ 83 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ 84 struct drm_vmw_context_arg) 85 #define DRM_IOCTL_VMW_UNREF_CONTEXT \ 86 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ 87 struct drm_vmw_context_arg) 88 #define DRM_IOCTL_VMW_CREATE_SURFACE \ 89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ 90 union drm_vmw_surface_create_arg) 91 #define DRM_IOCTL_VMW_UNREF_SURFACE \ 92 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ 93 struct drm_vmw_surface_arg) 94 #define DRM_IOCTL_VMW_REF_SURFACE \ 95 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ 96 union drm_vmw_surface_reference_arg) 97 #define DRM_IOCTL_VMW_EXECBUF \ 98 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ 99 struct drm_vmw_execbuf_arg) 100 #define DRM_IOCTL_VMW_GET_3D_CAP \ 101 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ 102 struct drm_vmw_get_3d_cap_arg) 103 #define DRM_IOCTL_VMW_FENCE_WAIT \ 104 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ 105 struct drm_vmw_fence_wait_arg) 106 #define DRM_IOCTL_VMW_FENCE_SIGNALED \ 107 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ 108 struct drm_vmw_fence_signaled_arg) 109 #define DRM_IOCTL_VMW_FENCE_UNREF \ 110 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ 111 struct drm_vmw_fence_arg) 112 #define DRM_IOCTL_VMW_FENCE_EVENT \ 113 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ 114 struct drm_vmw_fence_event_arg) 115 #define DRM_IOCTL_VMW_PRESENT \ 116 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ 117 struct drm_vmw_present_arg) 118 #define DRM_IOCTL_VMW_PRESENT_READBACK \ 119 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ 120 struct drm_vmw_present_readback_arg) 121 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ 122 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ 123 struct drm_vmw_update_layout_arg) 124 #define DRM_IOCTL_VMW_CREATE_SHADER \ 125 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ 126 struct drm_vmw_shader_create_arg) 127 #define DRM_IOCTL_VMW_UNREF_SHADER \ 128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ 129 struct drm_vmw_shader_arg) 130 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ 131 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ 132 union drm_vmw_gb_surface_create_arg) 133 #define DRM_IOCTL_VMW_GB_SURFACE_REF \ 134 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ 135 union drm_vmw_gb_surface_reference_arg) 136 #define DRM_IOCTL_VMW_SYNCCPU \ 137 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ 138 struct drm_vmw_synccpu_arg) 139 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \ 140 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \ 141 struct drm_vmw_context_arg) 142 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \ 143 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \ 144 union drm_vmw_gb_surface_create_ext_arg) 145 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \ 146 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \ 147 union drm_vmw_gb_surface_reference_ext_arg) 148 #define DRM_IOCTL_VMW_MSG \ 149 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \ 150 struct drm_vmw_msg_arg) 151 152 /* 153 * The core DRM version of this macro doesn't account for 154 * DRM_COMMAND_BASE. 155 */ 156 157 #define VMW_IOCTL_DEF(ioctl, func, flags) \ 158 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func} 159 160 /* 161 * Ioctl definitions. 162 */ 163 164 static const struct drm_ioctl_desc vmw_ioctls[] = { 165 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, 166 DRM_RENDER_ALLOW), 167 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, 168 DRM_RENDER_ALLOW), 169 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, 170 DRM_RENDER_ALLOW), 171 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, 172 vmw_kms_cursor_bypass_ioctl, 173 DRM_MASTER), 174 175 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, 176 DRM_MASTER), 177 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, 178 DRM_MASTER), 179 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, 180 DRM_MASTER), 181 182 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 183 DRM_RENDER_ALLOW), 184 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, 185 DRM_RENDER_ALLOW), 186 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, 187 DRM_RENDER_ALLOW), 188 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, 189 DRM_RENDER_ALLOW), 190 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, 191 DRM_RENDER_ALLOW), 192 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, 193 DRM_RENDER_ALLOW), 194 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, 195 DRM_RENDER_ALLOW), 196 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, 197 vmw_fence_obj_signaled_ioctl, 198 DRM_RENDER_ALLOW), 199 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, 200 DRM_RENDER_ALLOW), 201 VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl, 202 DRM_RENDER_ALLOW), 203 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, 204 DRM_RENDER_ALLOW), 205 206 /* these allow direct access to the framebuffers mark as master only */ 207 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, 208 DRM_MASTER | DRM_AUTH), 209 VMW_IOCTL_DEF(VMW_PRESENT_READBACK, 210 vmw_present_readback_ioctl, 211 DRM_MASTER | DRM_AUTH), 212 /* 213 * The permissions of the below ioctl are overridden in 214 * vmw_generic_ioctl(). We require either 215 * DRM_MASTER or capable(CAP_SYS_ADMIN). 216 */ 217 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, 218 vmw_kms_update_layout_ioctl, 219 DRM_RENDER_ALLOW), 220 VMW_IOCTL_DEF(VMW_CREATE_SHADER, 221 vmw_shader_define_ioctl, 222 DRM_RENDER_ALLOW), 223 VMW_IOCTL_DEF(VMW_UNREF_SHADER, 224 vmw_shader_destroy_ioctl, 225 DRM_RENDER_ALLOW), 226 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE, 227 vmw_gb_surface_define_ioctl, 228 DRM_RENDER_ALLOW), 229 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF, 230 vmw_gb_surface_reference_ioctl, 231 DRM_RENDER_ALLOW), 232 VMW_IOCTL_DEF(VMW_SYNCCPU, 233 vmw_user_bo_synccpu_ioctl, 234 DRM_RENDER_ALLOW), 235 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, 236 vmw_extended_context_define_ioctl, 237 DRM_RENDER_ALLOW), 238 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE_EXT, 239 vmw_gb_surface_define_ext_ioctl, 240 DRM_RENDER_ALLOW), 241 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF_EXT, 242 vmw_gb_surface_reference_ext_ioctl, 243 DRM_RENDER_ALLOW), 244 VMW_IOCTL_DEF(VMW_MSG, 245 vmw_msg_ioctl, 246 DRM_RENDER_ALLOW), 247 }; 248 249 static const struct pci_device_id vmw_pci_id_list[] = { 250 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) }, 251 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) }, 252 { } 253 }; 254 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); 255 256 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); 257 static int vmw_force_iommu; 258 static int vmw_restrict_iommu; 259 static int vmw_force_coherent; 260 static int vmw_restrict_dma_mask; 261 static int vmw_assume_16bpp; 262 263 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 264 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 265 void *ptr); 266 267 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 268 module_param_named(enable_fbdev, enable_fbdev, int, 0600); 269 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); 270 module_param_named(force_dma_api, vmw_force_iommu, int, 0600); 271 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); 272 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); 273 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); 274 module_param_named(force_coherent, vmw_force_coherent, int, 0600); 275 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); 276 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); 277 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); 278 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); 279 280 281 static void vmw_print_capabilities2(uint32_t capabilities2) 282 { 283 DRM_INFO("Capabilities2:\n"); 284 if (capabilities2 & SVGA_CAP2_GROW_OTABLE) 285 DRM_INFO(" Grow oTable.\n"); 286 if (capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY) 287 DRM_INFO(" IntraSurface copy.\n"); 288 if (capabilities2 & SVGA_CAP2_DX3) 289 DRM_INFO(" DX3.\n"); 290 } 291 292 static void vmw_print_capabilities(uint32_t capabilities) 293 { 294 DRM_INFO("Capabilities:\n"); 295 if (capabilities & SVGA_CAP_RECT_COPY) 296 DRM_INFO(" Rect copy.\n"); 297 if (capabilities & SVGA_CAP_CURSOR) 298 DRM_INFO(" Cursor.\n"); 299 if (capabilities & SVGA_CAP_CURSOR_BYPASS) 300 DRM_INFO(" Cursor bypass.\n"); 301 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) 302 DRM_INFO(" Cursor bypass 2.\n"); 303 if (capabilities & SVGA_CAP_8BIT_EMULATION) 304 DRM_INFO(" 8bit emulation.\n"); 305 if (capabilities & SVGA_CAP_ALPHA_CURSOR) 306 DRM_INFO(" Alpha cursor.\n"); 307 if (capabilities & SVGA_CAP_3D) 308 DRM_INFO(" 3D.\n"); 309 if (capabilities & SVGA_CAP_EXTENDED_FIFO) 310 DRM_INFO(" Extended Fifo.\n"); 311 if (capabilities & SVGA_CAP_MULTIMON) 312 DRM_INFO(" Multimon.\n"); 313 if (capabilities & SVGA_CAP_PITCHLOCK) 314 DRM_INFO(" Pitchlock.\n"); 315 if (capabilities & SVGA_CAP_IRQMASK) 316 DRM_INFO(" Irq mask.\n"); 317 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) 318 DRM_INFO(" Display Topology.\n"); 319 if (capabilities & SVGA_CAP_GMR) 320 DRM_INFO(" GMR.\n"); 321 if (capabilities & SVGA_CAP_TRACES) 322 DRM_INFO(" Traces.\n"); 323 if (capabilities & SVGA_CAP_GMR2) 324 DRM_INFO(" GMR2.\n"); 325 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) 326 DRM_INFO(" Screen Object 2.\n"); 327 if (capabilities & SVGA_CAP_COMMAND_BUFFERS) 328 DRM_INFO(" Command Buffers.\n"); 329 if (capabilities & SVGA_CAP_CMD_BUFFERS_2) 330 DRM_INFO(" Command Buffers 2.\n"); 331 if (capabilities & SVGA_CAP_GBOBJECTS) 332 DRM_INFO(" Guest Backed Resources.\n"); 333 if (capabilities & SVGA_CAP_DX) 334 DRM_INFO(" DX Features.\n"); 335 if (capabilities & SVGA_CAP_HP_CMD_QUEUE) 336 DRM_INFO(" HP Command Queue.\n"); 337 } 338 339 /** 340 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result 341 * 342 * @dev_priv: A device private structure. 343 * 344 * This function creates a small buffer object that holds the query 345 * result for dummy queries emitted as query barriers. 346 * The function will then map the first page and initialize a pending 347 * occlusion query result structure, Finally it will unmap the buffer. 348 * No interruptible waits are done within this function. 349 * 350 * Returns an error if bo creation or initialization fails. 351 */ 352 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) 353 { 354 int ret; 355 struct vmw_buffer_object *vbo; 356 struct ttm_bo_kmap_obj map; 357 volatile SVGA3dQueryResult *result; 358 bool dummy; 359 360 /* 361 * Create the vbo as pinned, so that a tryreserve will 362 * immediately succeed. This is because we're the only 363 * user of the bo currently. 364 */ 365 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); 366 if (!vbo) 367 return -ENOMEM; 368 369 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE, 370 &vmw_sys_placement, false, true, 371 &vmw_bo_bo_free); 372 if (unlikely(ret != 0)) 373 return ret; 374 375 ret = ttm_bo_reserve(&vbo->base, false, true, NULL); 376 BUG_ON(ret != 0); 377 vmw_bo_pin_reserved(vbo, true); 378 379 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); 380 if (likely(ret == 0)) { 381 result = ttm_kmap_obj_virtual(&map, &dummy); 382 result->totalSize = sizeof(*result); 383 result->state = SVGA3D_QUERYSTATE_PENDING; 384 result->result32 = 0xff; 385 ttm_bo_kunmap(&map); 386 } 387 vmw_bo_pin_reserved(vbo, false); 388 ttm_bo_unreserve(&vbo->base); 389 390 if (unlikely(ret != 0)) { 391 DRM_ERROR("Dummy query buffer map failed.\n"); 392 vmw_bo_unreference(&vbo); 393 } else 394 dev_priv->dummy_query_bo = vbo; 395 396 return ret; 397 } 398 399 static int vmw_device_init(struct vmw_private *dev_priv) 400 { 401 bool uses_fb_traces = false; 402 403 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH)); 404 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); 405 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); 406 407 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 408 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 409 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); 410 411 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE | 412 SVGA_REG_ENABLE_HIDE); 413 414 uses_fb_traces = !vmw_cmd_supported(dev_priv) && 415 (dev_priv->capabilities & SVGA_CAP_TRACES) != 0; 416 417 vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces); 418 dev_priv->fifo = vmw_fifo_create(dev_priv); 419 if (IS_ERR(dev_priv->fifo)) { 420 int err = PTR_ERR(dev_priv->fifo); 421 dev_priv->fifo = NULL; 422 return err; 423 } else if (!dev_priv->fifo) { 424 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); 425 } 426 427 dev_priv->last_read_seqno = vmw_fence_read(dev_priv); 428 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); 429 return 0; 430 } 431 432 static void vmw_device_fini(struct vmw_private *vmw) 433 { 434 /* 435 * Legacy sync 436 */ 437 vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 438 while (vmw_read(vmw, SVGA_REG_BUSY) != 0) 439 ; 440 441 vmw->last_read_seqno = vmw_fence_read(vmw); 442 443 vmw_write(vmw, SVGA_REG_CONFIG_DONE, 444 vmw->config_done_state); 445 vmw_write(vmw, SVGA_REG_ENABLE, 446 vmw->enable_state); 447 vmw_write(vmw, SVGA_REG_TRACES, 448 vmw->traces_state); 449 450 vmw_fifo_destroy(vmw); 451 } 452 453 /** 454 * vmw_request_device_late - Perform late device setup 455 * 456 * @dev_priv: Pointer to device private. 457 * 458 * This function performs setup of otables and enables large command 459 * buffer submission. These tasks are split out to a separate function 460 * because it reverts vmw_release_device_early and is intended to be used 461 * by an error path in the hibernation code. 462 */ 463 static int vmw_request_device_late(struct vmw_private *dev_priv) 464 { 465 int ret; 466 467 if (dev_priv->has_mob) { 468 ret = vmw_otables_setup(dev_priv); 469 if (unlikely(ret != 0)) { 470 DRM_ERROR("Unable to initialize " 471 "guest Memory OBjects.\n"); 472 return ret; 473 } 474 } 475 476 if (dev_priv->cman) { 477 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096); 478 if (ret) { 479 struct vmw_cmdbuf_man *man = dev_priv->cman; 480 481 dev_priv->cman = NULL; 482 vmw_cmdbuf_man_destroy(man); 483 } 484 } 485 486 return 0; 487 } 488 489 static int vmw_request_device(struct vmw_private *dev_priv) 490 { 491 int ret; 492 493 ret = vmw_device_init(dev_priv); 494 if (unlikely(ret != 0)) { 495 DRM_ERROR("Unable to initialize the device.\n"); 496 return ret; 497 } 498 vmw_fence_fifo_up(dev_priv->fman); 499 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); 500 if (IS_ERR(dev_priv->cman)) { 501 dev_priv->cman = NULL; 502 dev_priv->sm_type = VMW_SM_LEGACY; 503 } 504 505 ret = vmw_request_device_late(dev_priv); 506 if (ret) 507 goto out_no_mob; 508 509 ret = vmw_dummy_query_bo_create(dev_priv); 510 if (unlikely(ret != 0)) 511 goto out_no_query_bo; 512 513 return 0; 514 515 out_no_query_bo: 516 if (dev_priv->cman) 517 vmw_cmdbuf_remove_pool(dev_priv->cman); 518 if (dev_priv->has_mob) { 519 struct ttm_resource_manager *man; 520 521 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB); 522 ttm_resource_manager_evict_all(&dev_priv->bdev, man); 523 vmw_otables_takedown(dev_priv); 524 } 525 if (dev_priv->cman) 526 vmw_cmdbuf_man_destroy(dev_priv->cman); 527 out_no_mob: 528 vmw_fence_fifo_down(dev_priv->fman); 529 vmw_device_fini(dev_priv); 530 return ret; 531 } 532 533 /** 534 * vmw_release_device_early - Early part of fifo takedown. 535 * 536 * @dev_priv: Pointer to device private struct. 537 * 538 * This is the first part of command submission takedown, to be called before 539 * buffer management is taken down. 540 */ 541 static void vmw_release_device_early(struct vmw_private *dev_priv) 542 { 543 /* 544 * Previous destructions should've released 545 * the pinned bo. 546 */ 547 548 BUG_ON(dev_priv->pinned_bo != NULL); 549 550 vmw_bo_unreference(&dev_priv->dummy_query_bo); 551 if (dev_priv->cman) 552 vmw_cmdbuf_remove_pool(dev_priv->cman); 553 554 if (dev_priv->has_mob) { 555 struct ttm_resource_manager *man; 556 557 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB); 558 ttm_resource_manager_evict_all(&dev_priv->bdev, man); 559 vmw_otables_takedown(dev_priv); 560 } 561 } 562 563 /** 564 * vmw_release_device_late - Late part of fifo takedown. 565 * 566 * @dev_priv: Pointer to device private struct. 567 * 568 * This is the last part of the command submission takedown, to be called when 569 * command submission is no longer needed. It may wait on pending fences. 570 */ 571 static void vmw_release_device_late(struct vmw_private *dev_priv) 572 { 573 vmw_fence_fifo_down(dev_priv->fman); 574 if (dev_priv->cman) 575 vmw_cmdbuf_man_destroy(dev_priv->cman); 576 577 vmw_device_fini(dev_priv); 578 } 579 580 /* 581 * Sets the initial_[width|height] fields on the given vmw_private. 582 * 583 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then 584 * clamping the value to fb_max_[width|height] fields and the 585 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 586 * If the values appear to be invalid, set them to 587 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 588 */ 589 static void vmw_get_initial_size(struct vmw_private *dev_priv) 590 { 591 uint32_t width; 592 uint32_t height; 593 594 width = vmw_read(dev_priv, SVGA_REG_WIDTH); 595 height = vmw_read(dev_priv, SVGA_REG_HEIGHT); 596 597 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); 598 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); 599 600 if (width > dev_priv->fb_max_width || 601 height > dev_priv->fb_max_height) { 602 603 /* 604 * This is a host error and shouldn't occur. 605 */ 606 607 width = VMW_MIN_INITIAL_WIDTH; 608 height = VMW_MIN_INITIAL_HEIGHT; 609 } 610 611 dev_priv->initial_width = width; 612 dev_priv->initial_height = height; 613 } 614 615 /** 616 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this 617 * system. 618 * 619 * @dev_priv: Pointer to a struct vmw_private 620 * 621 * This functions tries to determine what actions need to be taken by the 622 * driver to make system pages visible to the device. 623 * If this function decides that DMA is not possible, it returns -EINVAL. 624 * The driver may then try to disable features of the device that require 625 * DMA. 626 */ 627 static int vmw_dma_select_mode(struct vmw_private *dev_priv) 628 { 629 static const char *names[vmw_dma_map_max] = { 630 [vmw_dma_phys] = "Using physical TTM page addresses.", 631 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", 632 [vmw_dma_map_populate] = "Caching DMA mappings.", 633 [vmw_dma_map_bind] = "Giving up DMA mappings early."}; 634 635 /* TTM currently doesn't fully support SEV encryption. */ 636 if (mem_encrypt_active()) 637 return -EINVAL; 638 639 if (vmw_force_coherent) 640 dev_priv->map_mode = vmw_dma_alloc_coherent; 641 else if (vmw_restrict_iommu) 642 dev_priv->map_mode = vmw_dma_map_bind; 643 else 644 dev_priv->map_mode = vmw_dma_map_populate; 645 646 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 647 return 0; 648 } 649 650 /** 651 * vmw_dma_masks - set required page- and dma masks 652 * 653 * @dev_priv: Pointer to struct drm-device 654 * 655 * With 32-bit we can only handle 32 bit PFNs. Optionally set that 656 * restriction also for 64-bit systems. 657 */ 658 static int vmw_dma_masks(struct vmw_private *dev_priv) 659 { 660 struct drm_device *dev = &dev_priv->drm; 661 int ret = 0; 662 663 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); 664 if (dev_priv->map_mode != vmw_dma_phys && 665 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { 666 DRM_INFO("Restricting DMA addresses to 44 bits.\n"); 667 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); 668 } 669 670 return ret; 671 } 672 673 static int vmw_vram_manager_init(struct vmw_private *dev_priv) 674 { 675 int ret; 676 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 677 ret = vmw_thp_init(dev_priv); 678 #else 679 ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false, 680 dev_priv->vram_size >> PAGE_SHIFT); 681 #endif 682 ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false); 683 return ret; 684 } 685 686 static void vmw_vram_manager_fini(struct vmw_private *dev_priv) 687 { 688 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 689 vmw_thp_fini(dev_priv); 690 #else 691 ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM); 692 #endif 693 } 694 695 static int vmw_setup_pci_resources(struct vmw_private *dev, 696 unsigned long pci_id) 697 { 698 resource_size_t rmmio_start; 699 resource_size_t rmmio_size; 700 resource_size_t fifo_start; 701 resource_size_t fifo_size; 702 int ret; 703 struct pci_dev *pdev = to_pci_dev(dev->drm.dev); 704 705 pci_set_master(pdev); 706 707 ret = pci_request_regions(pdev, "vmwgfx probe"); 708 if (ret) 709 return ret; 710 711 dev->pci_id = pci_id; 712 if (pci_id == VMWGFX_PCI_ID_SVGA3) { 713 rmmio_start = pci_resource_start(pdev, 0); 714 rmmio_size = pci_resource_len(pdev, 0); 715 dev->vram_start = pci_resource_start(pdev, 2); 716 dev->vram_size = pci_resource_len(pdev, 2); 717 718 DRM_INFO("Register MMIO at 0x%pa size is %llu kiB\n", 719 &rmmio_start, (uint64_t)rmmio_size / 1024); 720 dev->rmmio = devm_ioremap(dev->drm.dev, 721 rmmio_start, 722 rmmio_size); 723 if (!dev->rmmio) { 724 DRM_ERROR("Failed mapping registers mmio memory.\n"); 725 pci_release_regions(pdev); 726 return -ENOMEM; 727 } 728 } else if (pci_id == VMWGFX_PCI_ID_SVGA2) { 729 dev->io_start = pci_resource_start(pdev, 0); 730 dev->vram_start = pci_resource_start(pdev, 1); 731 dev->vram_size = pci_resource_len(pdev, 1); 732 fifo_start = pci_resource_start(pdev, 2); 733 fifo_size = pci_resource_len(pdev, 2); 734 735 DRM_INFO("FIFO at %pa size is %llu kiB\n", 736 &fifo_start, (uint64_t)fifo_size / 1024); 737 dev->fifo_mem = devm_memremap(dev->drm.dev, 738 fifo_start, 739 fifo_size, 740 MEMREMAP_WB); 741 742 if (IS_ERR(dev->fifo_mem)) { 743 DRM_ERROR("Failed mapping FIFO memory.\n"); 744 pci_release_regions(pdev); 745 return PTR_ERR(dev->fifo_mem); 746 } 747 } else { 748 pci_release_regions(pdev); 749 return -EINVAL; 750 } 751 752 /* 753 * This is approximate size of the vram, the exact size will only 754 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource 755 * size will be equal to or bigger than the size reported by 756 * SVGA_REG_VRAM_SIZE. 757 */ 758 DRM_INFO("VRAM at %pa size is %llu kiB\n", 759 &dev->vram_start, (uint64_t)dev->vram_size / 1024); 760 761 return 0; 762 } 763 764 static int vmw_detect_version(struct vmw_private *dev) 765 { 766 uint32_t svga_id; 767 768 vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ? 769 SVGA_ID_3 : SVGA_ID_2); 770 svga_id = vmw_read(dev, SVGA_REG_ID); 771 if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) { 772 DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n", 773 svga_id, dev->vmw_chipset); 774 return -ENOSYS; 775 } 776 BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3)); 777 DRM_INFO("Running on SVGA version %d.\n", (svga_id & 0xff)); 778 return 0; 779 } 780 781 static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) 782 { 783 int ret; 784 enum vmw_res_type i; 785 bool refuse_dma = false; 786 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 787 788 dev_priv->vmw_chipset = pci_id; 789 dev_priv->drm.dev_private = dev_priv; 790 791 mutex_init(&dev_priv->cmdbuf_mutex); 792 mutex_init(&dev_priv->binding_mutex); 793 spin_lock_init(&dev_priv->resource_lock); 794 spin_lock_init(&dev_priv->hw_lock); 795 spin_lock_init(&dev_priv->waiter_lock); 796 spin_lock_init(&dev_priv->cap_lock); 797 spin_lock_init(&dev_priv->cursor_lock); 798 799 ret = vmw_setup_pci_resources(dev_priv, pci_id); 800 if (ret) 801 return ret; 802 ret = vmw_detect_version(dev_priv); 803 if (ret) 804 goto out_no_pci_or_version; 805 806 807 for (i = vmw_res_context; i < vmw_res_max; ++i) { 808 idr_init_base(&dev_priv->res_idr[i], 1); 809 INIT_LIST_HEAD(&dev_priv->res_lru[i]); 810 } 811 812 init_waitqueue_head(&dev_priv->fence_queue); 813 init_waitqueue_head(&dev_priv->fifo_queue); 814 dev_priv->fence_queue_waiters = 0; 815 dev_priv->fifo_queue_waiters = 0; 816 817 dev_priv->used_memory_size = 0; 818 819 dev_priv->assume_16bpp = !!vmw_assume_16bpp; 820 821 dev_priv->enable_fb = enable_fbdev; 822 823 824 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 825 826 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) { 827 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2); 828 } 829 830 831 ret = vmw_dma_select_mode(dev_priv); 832 if (unlikely(ret != 0)) { 833 DRM_INFO("Restricting capabilities since DMA not available.\n"); 834 refuse_dma = true; 835 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) 836 DRM_INFO("Disabling 3D acceleration.\n"); 837 } 838 839 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); 840 dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); 841 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); 842 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); 843 844 vmw_get_initial_size(dev_priv); 845 846 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 847 dev_priv->max_gmr_ids = 848 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); 849 dev_priv->max_gmr_pages = 850 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); 851 dev_priv->memory_size = 852 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); 853 dev_priv->memory_size -= dev_priv->vram_size; 854 } else { 855 /* 856 * An arbitrary limit of 512MiB on surface 857 * memory. But all HWV8 hardware supports GMR2. 858 */ 859 dev_priv->memory_size = 512*1024*1024; 860 } 861 dev_priv->max_mob_pages = 0; 862 dev_priv->max_mob_size = 0; 863 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 864 uint64_t mem_size; 865 866 if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2) 867 mem_size = vmw_read(dev_priv, 868 SVGA_REG_GBOBJECT_MEM_SIZE_KB); 869 else 870 mem_size = 871 vmw_read(dev_priv, 872 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); 873 874 /* 875 * Workaround for low memory 2D VMs to compensate for the 876 * allocation taken by fbdev 877 */ 878 if (!(dev_priv->capabilities & SVGA_CAP_3D)) 879 mem_size *= 3; 880 881 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; 882 dev_priv->prim_bb_mem = 883 vmw_read(dev_priv, 884 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); 885 dev_priv->max_mob_size = 886 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); 887 dev_priv->stdu_max_width = 888 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); 889 dev_priv->stdu_max_height = 890 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); 891 892 vmw_write(dev_priv, SVGA_REG_DEV_CAP, 893 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); 894 dev_priv->texture_max_width = vmw_read(dev_priv, 895 SVGA_REG_DEV_CAP); 896 vmw_write(dev_priv, SVGA_REG_DEV_CAP, 897 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); 898 dev_priv->texture_max_height = vmw_read(dev_priv, 899 SVGA_REG_DEV_CAP); 900 } else { 901 dev_priv->texture_max_width = 8192; 902 dev_priv->texture_max_height = 8192; 903 dev_priv->prim_bb_mem = dev_priv->vram_size; 904 } 905 906 vmw_print_capabilities(dev_priv->capabilities); 907 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) 908 vmw_print_capabilities2(dev_priv->capabilities2); 909 DRM_INFO("Supports command queues = %d\n", 910 vmw_cmd_supported((dev_priv))); 911 912 ret = vmw_dma_masks(dev_priv); 913 if (unlikely(ret != 0)) 914 goto out_err0; 915 916 dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX); 917 918 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 919 DRM_INFO("Max GMR ids is %u\n", 920 (unsigned)dev_priv->max_gmr_ids); 921 DRM_INFO("Max number of GMR pages is %u\n", 922 (unsigned)dev_priv->max_gmr_pages); 923 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", 924 (unsigned)dev_priv->memory_size / 1024); 925 } 926 DRM_INFO("Maximum display memory size is %llu kiB\n", 927 (uint64_t)dev_priv->prim_bb_mem / 1024); 928 929 /* Need mmio memory to check for fifo pitchlock cap. */ 930 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && 931 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && 932 !vmw_fifo_have_pitchlock(dev_priv)) { 933 ret = -ENOSYS; 934 DRM_ERROR("Hardware has no pitchlock\n"); 935 goto out_err0; 936 } 937 938 dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12, 939 &vmw_prime_dmabuf_ops); 940 941 if (unlikely(dev_priv->tdev == NULL)) { 942 DRM_ERROR("Unable to initialize TTM object management.\n"); 943 ret = -ENOMEM; 944 goto out_err0; 945 } 946 947 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 948 ret = vmw_irq_install(&dev_priv->drm, pdev->irq); 949 if (ret != 0) { 950 DRM_ERROR("Failed installing irq: %d\n", ret); 951 goto out_no_irq; 952 } 953 } 954 955 dev_priv->fman = vmw_fence_manager_init(dev_priv); 956 if (unlikely(dev_priv->fman == NULL)) { 957 ret = -ENOMEM; 958 goto out_no_fman; 959 } 960 961 drm_vma_offset_manager_init(&dev_priv->vma_manager, 962 DRM_FILE_PAGE_OFFSET_START, 963 DRM_FILE_PAGE_OFFSET_SIZE); 964 ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver, 965 dev_priv->drm.dev, 966 dev_priv->drm.anon_inode->i_mapping, 967 &dev_priv->vma_manager, 968 dev_priv->map_mode == vmw_dma_alloc_coherent, 969 false); 970 if (unlikely(ret != 0)) { 971 DRM_ERROR("Failed initializing TTM buffer object driver.\n"); 972 goto out_no_bdev; 973 } 974 975 /* 976 * Enable VRAM, but initially don't use it until SVGA is enabled and 977 * unhidden. 978 */ 979 980 ret = vmw_vram_manager_init(dev_priv); 981 if (unlikely(ret != 0)) { 982 DRM_ERROR("Failed initializing memory manager for VRAM.\n"); 983 goto out_no_vram; 984 } 985 986 /* 987 * "Guest Memory Regions" is an aperture like feature with 988 * one slot per bo. There is an upper limit of the number of 989 * slots as well as the bo size. 990 */ 991 dev_priv->has_gmr = true; 992 /* TODO: This is most likely not correct */ 993 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || 994 refuse_dma || 995 vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) { 996 DRM_INFO("No GMR memory available. " 997 "Graphics memory resources are very limited.\n"); 998 dev_priv->has_gmr = false; 999 } 1000 1001 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) { 1002 dev_priv->has_mob = true; 1003 1004 if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) { 1005 DRM_INFO("No MOB memory available. " 1006 "3D will be disabled.\n"); 1007 dev_priv->has_mob = false; 1008 } 1009 } 1010 1011 if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) { 1012 spin_lock(&dev_priv->cap_lock); 1013 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DXCONTEXT); 1014 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP)) 1015 dev_priv->sm_type = VMW_SM_4; 1016 spin_unlock(&dev_priv->cap_lock); 1017 } 1018 1019 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN); 1020 1021 /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */ 1022 if (has_sm4_context(dev_priv) && 1023 (dev_priv->capabilities2 & SVGA_CAP2_DX2)) { 1024 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM41); 1025 1026 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP)) 1027 dev_priv->sm_type = VMW_SM_4_1; 1028 1029 if (has_sm4_1_context(dev_priv) && 1030 (dev_priv->capabilities2 & SVGA_CAP2_DX3)) { 1031 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_SM5); 1032 if (vmw_read(dev_priv, SVGA_REG_DEV_CAP)) 1033 dev_priv->sm_type = VMW_SM_5; 1034 } 1035 } 1036 1037 ret = vmw_kms_init(dev_priv); 1038 if (unlikely(ret != 0)) 1039 goto out_no_kms; 1040 vmw_overlay_init(dev_priv); 1041 1042 ret = vmw_request_device(dev_priv); 1043 if (ret) 1044 goto out_no_fifo; 1045 1046 if (dev_priv->sm_type == VMW_SM_5) 1047 DRM_INFO("SM5 support available.\n"); 1048 if (dev_priv->sm_type == VMW_SM_4_1) 1049 DRM_INFO("SM4_1 support available.\n"); 1050 if (dev_priv->sm_type == VMW_SM_4) 1051 DRM_INFO("SM4 support available.\n"); 1052 DRM_INFO("Running without reservation semaphore\n"); 1053 1054 vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)", 1055 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR, 1056 VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE); 1057 1058 if (dev_priv->enable_fb) { 1059 vmw_fifo_resource_inc(dev_priv); 1060 vmw_svga_enable(dev_priv); 1061 vmw_fb_init(dev_priv); 1062 } 1063 1064 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 1065 register_pm_notifier(&dev_priv->pm_nb); 1066 1067 return 0; 1068 1069 out_no_fifo: 1070 vmw_overlay_close(dev_priv); 1071 vmw_kms_close(dev_priv); 1072 out_no_kms: 1073 if (dev_priv->has_mob) 1074 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); 1075 if (dev_priv->has_gmr) 1076 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); 1077 vmw_vram_manager_fini(dev_priv); 1078 out_no_vram: 1079 ttm_device_fini(&dev_priv->bdev); 1080 out_no_bdev: 1081 vmw_fence_manager_takedown(dev_priv->fman); 1082 out_no_fman: 1083 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 1084 vmw_irq_uninstall(&dev_priv->drm); 1085 out_no_irq: 1086 ttm_object_device_release(&dev_priv->tdev); 1087 out_err0: 1088 for (i = vmw_res_context; i < vmw_res_max; ++i) 1089 idr_destroy(&dev_priv->res_idr[i]); 1090 1091 if (dev_priv->ctx.staged_bindings) 1092 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 1093 out_no_pci_or_version: 1094 pci_release_regions(pdev); 1095 return ret; 1096 } 1097 1098 static void vmw_driver_unload(struct drm_device *dev) 1099 { 1100 struct vmw_private *dev_priv = vmw_priv(dev); 1101 struct pci_dev *pdev = to_pci_dev(dev->dev); 1102 enum vmw_res_type i; 1103 1104 unregister_pm_notifier(&dev_priv->pm_nb); 1105 1106 if (dev_priv->ctx.res_ht_initialized) 1107 drm_ht_remove(&dev_priv->ctx.res_ht); 1108 vfree(dev_priv->ctx.cmd_bounce); 1109 if (dev_priv->enable_fb) { 1110 vmw_fb_off(dev_priv); 1111 vmw_fb_close(dev_priv); 1112 vmw_fifo_resource_dec(dev_priv); 1113 vmw_svga_disable(dev_priv); 1114 } 1115 1116 vmw_kms_close(dev_priv); 1117 vmw_overlay_close(dev_priv); 1118 1119 if (dev_priv->has_gmr) 1120 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); 1121 1122 vmw_release_device_early(dev_priv); 1123 if (dev_priv->has_mob) 1124 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); 1125 vmw_vram_manager_fini(dev_priv); 1126 ttm_device_fini(&dev_priv->bdev); 1127 drm_vma_offset_manager_destroy(&dev_priv->vma_manager); 1128 vmw_release_device_late(dev_priv); 1129 vmw_fence_manager_takedown(dev_priv->fman); 1130 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 1131 vmw_irq_uninstall(&dev_priv->drm); 1132 1133 ttm_object_device_release(&dev_priv->tdev); 1134 if (dev_priv->ctx.staged_bindings) 1135 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 1136 1137 for (i = vmw_res_context; i < vmw_res_max; ++i) 1138 idr_destroy(&dev_priv->res_idr[i]); 1139 1140 pci_release_regions(pdev); 1141 } 1142 1143 static void vmw_postclose(struct drm_device *dev, 1144 struct drm_file *file_priv) 1145 { 1146 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1147 1148 ttm_object_file_release(&vmw_fp->tfile); 1149 kfree(vmw_fp); 1150 } 1151 1152 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) 1153 { 1154 struct vmw_private *dev_priv = vmw_priv(dev); 1155 struct vmw_fpriv *vmw_fp; 1156 int ret = -ENOMEM; 1157 1158 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); 1159 if (unlikely(!vmw_fp)) 1160 return ret; 1161 1162 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); 1163 if (unlikely(vmw_fp->tfile == NULL)) 1164 goto out_no_tfile; 1165 1166 file_priv->driver_priv = vmw_fp; 1167 1168 return 0; 1169 1170 out_no_tfile: 1171 kfree(vmw_fp); 1172 return ret; 1173 } 1174 1175 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, 1176 unsigned long arg, 1177 long (*ioctl_func)(struct file *, unsigned int, 1178 unsigned long)) 1179 { 1180 struct drm_file *file_priv = filp->private_data; 1181 struct drm_device *dev = file_priv->minor->dev; 1182 unsigned int nr = DRM_IOCTL_NR(cmd); 1183 unsigned int flags; 1184 1185 /* 1186 * Do extra checking on driver private ioctls. 1187 */ 1188 1189 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 1190 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 1191 const struct drm_ioctl_desc *ioctl = 1192 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 1193 1194 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { 1195 return ioctl_func(filp, cmd, arg); 1196 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) { 1197 if (!drm_is_current_master(file_priv) && 1198 !capable(CAP_SYS_ADMIN)) 1199 return -EACCES; 1200 } 1201 1202 if (unlikely(ioctl->cmd != cmd)) 1203 goto out_io_encoding; 1204 1205 flags = ioctl->flags; 1206 } else if (!drm_ioctl_flags(nr, &flags)) 1207 return -EINVAL; 1208 1209 return ioctl_func(filp, cmd, arg); 1210 1211 out_io_encoding: 1212 DRM_ERROR("Invalid command format, ioctl %d\n", 1213 nr - DRM_COMMAND_BASE); 1214 1215 return -EINVAL; 1216 } 1217 1218 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, 1219 unsigned long arg) 1220 { 1221 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl); 1222 } 1223 1224 #ifdef CONFIG_COMPAT 1225 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, 1226 unsigned long arg) 1227 { 1228 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl); 1229 } 1230 #endif 1231 1232 static void vmw_master_set(struct drm_device *dev, 1233 struct drm_file *file_priv, 1234 bool from_open) 1235 { 1236 /* 1237 * Inform a new master that the layout may have changed while 1238 * it was gone. 1239 */ 1240 if (!from_open) 1241 drm_sysfs_hotplug_event(dev); 1242 } 1243 1244 static void vmw_master_drop(struct drm_device *dev, 1245 struct drm_file *file_priv) 1246 { 1247 struct vmw_private *dev_priv = vmw_priv(dev); 1248 1249 vmw_kms_legacy_hotspot_clear(dev_priv); 1250 if (!dev_priv->enable_fb) 1251 vmw_svga_disable(dev_priv); 1252 } 1253 1254 /** 1255 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1256 * 1257 * @dev_priv: Pointer to device private struct. 1258 * Needs the reservation sem to be held in non-exclusive mode. 1259 */ 1260 static void __vmw_svga_enable(struct vmw_private *dev_priv) 1261 { 1262 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); 1263 1264 if (!ttm_resource_manager_used(man)) { 1265 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE); 1266 ttm_resource_manager_set_used(man, true); 1267 } 1268 } 1269 1270 /** 1271 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1272 * 1273 * @dev_priv: Pointer to device private struct. 1274 */ 1275 void vmw_svga_enable(struct vmw_private *dev_priv) 1276 { 1277 __vmw_svga_enable(dev_priv); 1278 } 1279 1280 /** 1281 * __vmw_svga_disable - Disable SVGA mode and use of VRAM. 1282 * 1283 * @dev_priv: Pointer to device private struct. 1284 * Needs the reservation sem to be held in exclusive mode. 1285 * Will not empty VRAM. VRAM must be emptied by caller. 1286 */ 1287 static void __vmw_svga_disable(struct vmw_private *dev_priv) 1288 { 1289 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); 1290 1291 if (ttm_resource_manager_used(man)) { 1292 ttm_resource_manager_set_used(man, false); 1293 vmw_write(dev_priv, SVGA_REG_ENABLE, 1294 SVGA_REG_ENABLE_HIDE | 1295 SVGA_REG_ENABLE_ENABLE); 1296 } 1297 } 1298 1299 /** 1300 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo 1301 * running. 1302 * 1303 * @dev_priv: Pointer to device private struct. 1304 * Will empty VRAM. 1305 */ 1306 void vmw_svga_disable(struct vmw_private *dev_priv) 1307 { 1308 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); 1309 /* 1310 * Disabling SVGA will turn off device modesetting capabilities, so 1311 * notify KMS about that so that it doesn't cache atomic state that 1312 * isn't valid anymore, for example crtcs turned on. 1313 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex), 1314 * but vmw_kms_lost_device() takes the reservation sem and thus we'll 1315 * end up with lock order reversal. Thus, a master may actually perform 1316 * a new modeset just after we call vmw_kms_lost_device() and race with 1317 * vmw_svga_disable(), but that should at worst cause atomic KMS state 1318 * to be inconsistent with the device, causing modesetting problems. 1319 * 1320 */ 1321 vmw_kms_lost_device(&dev_priv->drm); 1322 if (ttm_resource_manager_used(man)) { 1323 if (ttm_resource_manager_evict_all(&dev_priv->bdev, man)) 1324 DRM_ERROR("Failed evicting VRAM buffers.\n"); 1325 ttm_resource_manager_set_used(man, false); 1326 vmw_write(dev_priv, SVGA_REG_ENABLE, 1327 SVGA_REG_ENABLE_HIDE | 1328 SVGA_REG_ENABLE_ENABLE); 1329 } 1330 } 1331 1332 static void vmw_remove(struct pci_dev *pdev) 1333 { 1334 struct drm_device *dev = pci_get_drvdata(pdev); 1335 1336 ttm_mem_global_release(&ttm_mem_glob); 1337 drm_dev_unregister(dev); 1338 vmw_driver_unload(dev); 1339 } 1340 1341 static unsigned long 1342 vmw_get_unmapped_area(struct file *file, unsigned long uaddr, 1343 unsigned long len, unsigned long pgoff, 1344 unsigned long flags) 1345 { 1346 struct drm_file *file_priv = file->private_data; 1347 struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); 1348 1349 return drm_get_unmapped_area(file, uaddr, len, pgoff, flags, 1350 &dev_priv->vma_manager); 1351 } 1352 1353 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 1354 void *ptr) 1355 { 1356 struct vmw_private *dev_priv = 1357 container_of(nb, struct vmw_private, pm_nb); 1358 1359 switch (val) { 1360 case PM_HIBERNATION_PREPARE: 1361 /* 1362 * Take the reservation sem in write mode, which will make sure 1363 * there are no other processes holding a buffer object 1364 * reservation, meaning we should be able to evict all buffer 1365 * objects if needed. 1366 * Once user-space processes have been frozen, we can release 1367 * the lock again. 1368 */ 1369 dev_priv->suspend_locked = true; 1370 break; 1371 case PM_POST_HIBERNATION: 1372 case PM_POST_RESTORE: 1373 if (READ_ONCE(dev_priv->suspend_locked)) { 1374 dev_priv->suspend_locked = false; 1375 } 1376 break; 1377 default: 1378 break; 1379 } 1380 return 0; 1381 } 1382 1383 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1384 { 1385 struct drm_device *dev = pci_get_drvdata(pdev); 1386 struct vmw_private *dev_priv = vmw_priv(dev); 1387 1388 if (dev_priv->refuse_hibernation) 1389 return -EBUSY; 1390 1391 pci_save_state(pdev); 1392 pci_disable_device(pdev); 1393 pci_set_power_state(pdev, PCI_D3hot); 1394 return 0; 1395 } 1396 1397 static int vmw_pci_resume(struct pci_dev *pdev) 1398 { 1399 pci_set_power_state(pdev, PCI_D0); 1400 pci_restore_state(pdev); 1401 return pci_enable_device(pdev); 1402 } 1403 1404 static int vmw_pm_suspend(struct device *kdev) 1405 { 1406 struct pci_dev *pdev = to_pci_dev(kdev); 1407 struct pm_message dummy; 1408 1409 dummy.event = 0; 1410 1411 return vmw_pci_suspend(pdev, dummy); 1412 } 1413 1414 static int vmw_pm_resume(struct device *kdev) 1415 { 1416 struct pci_dev *pdev = to_pci_dev(kdev); 1417 1418 return vmw_pci_resume(pdev); 1419 } 1420 1421 static int vmw_pm_freeze(struct device *kdev) 1422 { 1423 struct pci_dev *pdev = to_pci_dev(kdev); 1424 struct drm_device *dev = pci_get_drvdata(pdev); 1425 struct vmw_private *dev_priv = vmw_priv(dev); 1426 struct ttm_operation_ctx ctx = { 1427 .interruptible = false, 1428 .no_wait_gpu = false 1429 }; 1430 int ret; 1431 1432 /* 1433 * No user-space processes should be running now. 1434 */ 1435 ret = vmw_kms_suspend(&dev_priv->drm); 1436 if (ret) { 1437 DRM_ERROR("Failed to freeze modesetting.\n"); 1438 return ret; 1439 } 1440 if (dev_priv->enable_fb) 1441 vmw_fb_off(dev_priv); 1442 1443 vmw_execbuf_release_pinned_bo(dev_priv); 1444 vmw_resource_evict_all(dev_priv); 1445 vmw_release_device_early(dev_priv); 1446 while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0); 1447 if (dev_priv->enable_fb) 1448 vmw_fifo_resource_dec(dev_priv); 1449 if (atomic_read(&dev_priv->num_fifo_resources) != 0) { 1450 DRM_ERROR("Can't hibernate while 3D resources are active.\n"); 1451 if (dev_priv->enable_fb) 1452 vmw_fifo_resource_inc(dev_priv); 1453 WARN_ON(vmw_request_device_late(dev_priv)); 1454 dev_priv->suspend_locked = false; 1455 if (dev_priv->suspend_state) 1456 vmw_kms_resume(dev); 1457 if (dev_priv->enable_fb) 1458 vmw_fb_on(dev_priv); 1459 return -EBUSY; 1460 } 1461 1462 vmw_fence_fifo_down(dev_priv->fman); 1463 __vmw_svga_disable(dev_priv); 1464 1465 vmw_release_device_late(dev_priv); 1466 return 0; 1467 } 1468 1469 static int vmw_pm_restore(struct device *kdev) 1470 { 1471 struct pci_dev *pdev = to_pci_dev(kdev); 1472 struct drm_device *dev = pci_get_drvdata(pdev); 1473 struct vmw_private *dev_priv = vmw_priv(dev); 1474 int ret; 1475 1476 vmw_detect_version(dev_priv); 1477 1478 if (dev_priv->enable_fb) 1479 vmw_fifo_resource_inc(dev_priv); 1480 1481 ret = vmw_request_device(dev_priv); 1482 if (ret) 1483 return ret; 1484 1485 if (dev_priv->enable_fb) 1486 __vmw_svga_enable(dev_priv); 1487 1488 vmw_fence_fifo_up(dev_priv->fman); 1489 dev_priv->suspend_locked = false; 1490 if (dev_priv->suspend_state) 1491 vmw_kms_resume(&dev_priv->drm); 1492 1493 if (dev_priv->enable_fb) 1494 vmw_fb_on(dev_priv); 1495 1496 return 0; 1497 } 1498 1499 static const struct dev_pm_ops vmw_pm_ops = { 1500 .freeze = vmw_pm_freeze, 1501 .thaw = vmw_pm_restore, 1502 .restore = vmw_pm_restore, 1503 .suspend = vmw_pm_suspend, 1504 .resume = vmw_pm_resume, 1505 }; 1506 1507 static const struct file_operations vmwgfx_driver_fops = { 1508 .owner = THIS_MODULE, 1509 .open = drm_open, 1510 .release = drm_release, 1511 .unlocked_ioctl = vmw_unlocked_ioctl, 1512 .mmap = vmw_mmap, 1513 .poll = drm_poll, 1514 .read = drm_read, 1515 #if defined(CONFIG_COMPAT) 1516 .compat_ioctl = vmw_compat_ioctl, 1517 #endif 1518 .llseek = noop_llseek, 1519 .get_unmapped_area = vmw_get_unmapped_area, 1520 }; 1521 1522 static const struct drm_driver driver = { 1523 .driver_features = 1524 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC, 1525 .ioctls = vmw_ioctls, 1526 .num_ioctls = ARRAY_SIZE(vmw_ioctls), 1527 .master_set = vmw_master_set, 1528 .master_drop = vmw_master_drop, 1529 .open = vmw_driver_open, 1530 .postclose = vmw_postclose, 1531 1532 .dumb_create = vmw_dumb_create, 1533 .dumb_map_offset = vmw_dumb_map_offset, 1534 .dumb_destroy = vmw_dumb_destroy, 1535 1536 .prime_fd_to_handle = vmw_prime_fd_to_handle, 1537 .prime_handle_to_fd = vmw_prime_handle_to_fd, 1538 1539 .fops = &vmwgfx_driver_fops, 1540 .name = VMWGFX_DRIVER_NAME, 1541 .desc = VMWGFX_DRIVER_DESC, 1542 .date = VMWGFX_DRIVER_DATE, 1543 .major = VMWGFX_DRIVER_MAJOR, 1544 .minor = VMWGFX_DRIVER_MINOR, 1545 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL 1546 }; 1547 1548 static struct pci_driver vmw_pci_driver = { 1549 .name = VMWGFX_DRIVER_NAME, 1550 .id_table = vmw_pci_id_list, 1551 .probe = vmw_probe, 1552 .remove = vmw_remove, 1553 .driver = { 1554 .pm = &vmw_pm_ops 1555 } 1556 }; 1557 1558 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1559 { 1560 struct vmw_private *vmw; 1561 int ret; 1562 1563 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb"); 1564 if (ret) 1565 return ret; 1566 1567 ret = pcim_enable_device(pdev); 1568 if (ret) 1569 return ret; 1570 1571 vmw = devm_drm_dev_alloc(&pdev->dev, &driver, 1572 struct vmw_private, drm); 1573 if (IS_ERR(vmw)) 1574 return PTR_ERR(vmw); 1575 1576 pci_set_drvdata(pdev, &vmw->drm); 1577 1578 ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev); 1579 if (ret) 1580 return ret; 1581 1582 ret = vmw_driver_load(vmw, ent->device); 1583 if (ret) 1584 return ret; 1585 1586 ret = drm_dev_register(&vmw->drm, 0); 1587 if (ret) { 1588 vmw_driver_unload(&vmw->drm); 1589 return ret; 1590 } 1591 1592 return 0; 1593 } 1594 1595 static int __init vmwgfx_init(void) 1596 { 1597 int ret; 1598 1599 if (vgacon_text_force()) 1600 return -EINVAL; 1601 1602 ret = pci_register_driver(&vmw_pci_driver); 1603 if (ret) 1604 DRM_ERROR("Failed initializing DRM.\n"); 1605 return ret; 1606 } 1607 1608 static void __exit vmwgfx_exit(void) 1609 { 1610 pci_unregister_driver(&vmw_pci_driver); 1611 } 1612 1613 module_init(vmwgfx_init); 1614 module_exit(vmwgfx_exit); 1615 1616 MODULE_AUTHOR("VMware Inc. and others"); 1617 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); 1618 MODULE_LICENSE("GPL and additional rights"); 1619 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." 1620 __stringify(VMWGFX_DRIVER_MINOR) "." 1621 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." 1622 "0"); 1623