1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2016 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <linux/dma-mapping.h> 29 #include <linux/module.h> 30 #include <linux/pci.h> 31 #include <linux/cc_platform.h> 32 33 #include <drm/drm_aperture.h> 34 #include <drm/drm_drv.h> 35 #include <drm/drm_ioctl.h> 36 #include <drm/drm_sysfs.h> 37 #include <drm/ttm/ttm_bo_driver.h> 38 #include <drm/ttm/ttm_range_manager.h> 39 #include <drm/ttm/ttm_placement.h> 40 #include <generated/utsrelease.h> 41 42 #include "ttm_object.h" 43 #include "vmwgfx_binding.h" 44 #include "vmwgfx_devcaps.h" 45 #include "vmwgfx_drv.h" 46 #include "vmwgfx_mksstat.h" 47 48 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 49 50 #define VMW_MIN_INITIAL_WIDTH 800 51 #define VMW_MIN_INITIAL_HEIGHT 600 52 53 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) 54 55 56 /* 57 * Fully encoded drm commands. Might move to vmw_drm.h 58 */ 59 60 #define DRM_IOCTL_VMW_GET_PARAM \ 61 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ 62 struct drm_vmw_getparam_arg) 63 #define DRM_IOCTL_VMW_ALLOC_DMABUF \ 64 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ 65 union drm_vmw_alloc_dmabuf_arg) 66 #define DRM_IOCTL_VMW_UNREF_DMABUF \ 67 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ 68 struct drm_vmw_unref_dmabuf_arg) 69 #define DRM_IOCTL_VMW_CURSOR_BYPASS \ 70 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ 71 struct drm_vmw_cursor_bypass_arg) 72 73 #define DRM_IOCTL_VMW_CONTROL_STREAM \ 74 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ 75 struct drm_vmw_control_stream_arg) 76 #define DRM_IOCTL_VMW_CLAIM_STREAM \ 77 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ 78 struct drm_vmw_stream_arg) 79 #define DRM_IOCTL_VMW_UNREF_STREAM \ 80 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ 81 struct drm_vmw_stream_arg) 82 83 #define DRM_IOCTL_VMW_CREATE_CONTEXT \ 84 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ 85 struct drm_vmw_context_arg) 86 #define DRM_IOCTL_VMW_UNREF_CONTEXT \ 87 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ 88 struct drm_vmw_context_arg) 89 #define DRM_IOCTL_VMW_CREATE_SURFACE \ 90 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ 91 union drm_vmw_surface_create_arg) 92 #define DRM_IOCTL_VMW_UNREF_SURFACE \ 93 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ 94 struct drm_vmw_surface_arg) 95 #define DRM_IOCTL_VMW_REF_SURFACE \ 96 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ 97 union drm_vmw_surface_reference_arg) 98 #define DRM_IOCTL_VMW_EXECBUF \ 99 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ 100 struct drm_vmw_execbuf_arg) 101 #define DRM_IOCTL_VMW_GET_3D_CAP \ 102 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ 103 struct drm_vmw_get_3d_cap_arg) 104 #define DRM_IOCTL_VMW_FENCE_WAIT \ 105 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ 106 struct drm_vmw_fence_wait_arg) 107 #define DRM_IOCTL_VMW_FENCE_SIGNALED \ 108 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ 109 struct drm_vmw_fence_signaled_arg) 110 #define DRM_IOCTL_VMW_FENCE_UNREF \ 111 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ 112 struct drm_vmw_fence_arg) 113 #define DRM_IOCTL_VMW_FENCE_EVENT \ 114 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ 115 struct drm_vmw_fence_event_arg) 116 #define DRM_IOCTL_VMW_PRESENT \ 117 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ 118 struct drm_vmw_present_arg) 119 #define DRM_IOCTL_VMW_PRESENT_READBACK \ 120 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ 121 struct drm_vmw_present_readback_arg) 122 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ 123 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ 124 struct drm_vmw_update_layout_arg) 125 #define DRM_IOCTL_VMW_CREATE_SHADER \ 126 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \ 127 struct drm_vmw_shader_create_arg) 128 #define DRM_IOCTL_VMW_UNREF_SHADER \ 129 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \ 130 struct drm_vmw_shader_arg) 131 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \ 132 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \ 133 union drm_vmw_gb_surface_create_arg) 134 #define DRM_IOCTL_VMW_GB_SURFACE_REF \ 135 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \ 136 union drm_vmw_gb_surface_reference_arg) 137 #define DRM_IOCTL_VMW_SYNCCPU \ 138 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ 139 struct drm_vmw_synccpu_arg) 140 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \ 141 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \ 142 struct drm_vmw_context_arg) 143 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE_EXT \ 144 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE_EXT, \ 145 union drm_vmw_gb_surface_create_ext_arg) 146 #define DRM_IOCTL_VMW_GB_SURFACE_REF_EXT \ 147 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF_EXT, \ 148 union drm_vmw_gb_surface_reference_ext_arg) 149 #define DRM_IOCTL_VMW_MSG \ 150 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \ 151 struct drm_vmw_msg_arg) 152 #define DRM_IOCTL_VMW_MKSSTAT_RESET \ 153 DRM_IO(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_RESET) 154 #define DRM_IOCTL_VMW_MKSSTAT_ADD \ 155 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_ADD, \ 156 struct drm_vmw_mksstat_add_arg) 157 #define DRM_IOCTL_VMW_MKSSTAT_REMOVE \ 158 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_MKSSTAT_REMOVE, \ 159 struct drm_vmw_mksstat_remove_arg) 160 161 /* 162 * Ioctl definitions. 163 */ 164 165 static const struct drm_ioctl_desc vmw_ioctls[] = { 166 DRM_IOCTL_DEF_DRV(VMW_GET_PARAM, vmw_getparam_ioctl, 167 DRM_RENDER_ALLOW), 168 DRM_IOCTL_DEF_DRV(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl, 169 DRM_RENDER_ALLOW), 170 DRM_IOCTL_DEF_DRV(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl, 171 DRM_RENDER_ALLOW), 172 DRM_IOCTL_DEF_DRV(VMW_CURSOR_BYPASS, 173 vmw_kms_cursor_bypass_ioctl, 174 DRM_MASTER), 175 176 DRM_IOCTL_DEF_DRV(VMW_CONTROL_STREAM, vmw_overlay_ioctl, 177 DRM_MASTER), 178 DRM_IOCTL_DEF_DRV(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, 179 DRM_MASTER), 180 DRM_IOCTL_DEF_DRV(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, 181 DRM_MASTER), 182 183 DRM_IOCTL_DEF_DRV(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 184 DRM_RENDER_ALLOW), 185 DRM_IOCTL_DEF_DRV(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, 186 DRM_RENDER_ALLOW), 187 DRM_IOCTL_DEF_DRV(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, 188 DRM_RENDER_ALLOW), 189 DRM_IOCTL_DEF_DRV(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, 190 DRM_RENDER_ALLOW), 191 DRM_IOCTL_DEF_DRV(VMW_REF_SURFACE, vmw_surface_reference_ioctl, 192 DRM_RENDER_ALLOW), 193 DRM_IOCTL_DEF_DRV(VMW_EXECBUF, vmw_execbuf_ioctl, 194 DRM_RENDER_ALLOW), 195 DRM_IOCTL_DEF_DRV(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, 196 DRM_RENDER_ALLOW), 197 DRM_IOCTL_DEF_DRV(VMW_FENCE_SIGNALED, 198 vmw_fence_obj_signaled_ioctl, 199 DRM_RENDER_ALLOW), 200 DRM_IOCTL_DEF_DRV(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, 201 DRM_RENDER_ALLOW), 202 DRM_IOCTL_DEF_DRV(VMW_FENCE_EVENT, vmw_fence_event_ioctl, 203 DRM_RENDER_ALLOW), 204 DRM_IOCTL_DEF_DRV(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, 205 DRM_RENDER_ALLOW), 206 207 /* these allow direct access to the framebuffers mark as master only */ 208 DRM_IOCTL_DEF_DRV(VMW_PRESENT, vmw_present_ioctl, 209 DRM_MASTER | DRM_AUTH), 210 DRM_IOCTL_DEF_DRV(VMW_PRESENT_READBACK, 211 vmw_present_readback_ioctl, 212 DRM_MASTER | DRM_AUTH), 213 /* 214 * The permissions of the below ioctl are overridden in 215 * vmw_generic_ioctl(). We require either 216 * DRM_MASTER or capable(CAP_SYS_ADMIN). 217 */ 218 DRM_IOCTL_DEF_DRV(VMW_UPDATE_LAYOUT, 219 vmw_kms_update_layout_ioctl, 220 DRM_RENDER_ALLOW), 221 DRM_IOCTL_DEF_DRV(VMW_CREATE_SHADER, 222 vmw_shader_define_ioctl, 223 DRM_RENDER_ALLOW), 224 DRM_IOCTL_DEF_DRV(VMW_UNREF_SHADER, 225 vmw_shader_destroy_ioctl, 226 DRM_RENDER_ALLOW), 227 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE, 228 vmw_gb_surface_define_ioctl, 229 DRM_RENDER_ALLOW), 230 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF, 231 vmw_gb_surface_reference_ioctl, 232 DRM_RENDER_ALLOW), 233 DRM_IOCTL_DEF_DRV(VMW_SYNCCPU, 234 vmw_user_bo_synccpu_ioctl, 235 DRM_RENDER_ALLOW), 236 DRM_IOCTL_DEF_DRV(VMW_CREATE_EXTENDED_CONTEXT, 237 vmw_extended_context_define_ioctl, 238 DRM_RENDER_ALLOW), 239 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_CREATE_EXT, 240 vmw_gb_surface_define_ext_ioctl, 241 DRM_RENDER_ALLOW), 242 DRM_IOCTL_DEF_DRV(VMW_GB_SURFACE_REF_EXT, 243 vmw_gb_surface_reference_ext_ioctl, 244 DRM_RENDER_ALLOW), 245 DRM_IOCTL_DEF_DRV(VMW_MSG, 246 vmw_msg_ioctl, 247 DRM_RENDER_ALLOW), 248 DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_RESET, 249 vmw_mksstat_reset_ioctl, 250 DRM_RENDER_ALLOW), 251 DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_ADD, 252 vmw_mksstat_add_ioctl, 253 DRM_RENDER_ALLOW), 254 DRM_IOCTL_DEF_DRV(VMW_MKSSTAT_REMOVE, 255 vmw_mksstat_remove_ioctl, 256 DRM_RENDER_ALLOW), 257 }; 258 259 static const struct pci_device_id vmw_pci_id_list[] = { 260 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) }, 261 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) }, 262 { } 263 }; 264 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); 265 266 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); 267 static int vmw_restrict_iommu; 268 static int vmw_force_coherent; 269 static int vmw_restrict_dma_mask; 270 static int vmw_assume_16bpp; 271 272 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 273 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 274 void *ptr); 275 276 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 277 module_param_named(enable_fbdev, enable_fbdev, int, 0600); 278 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); 279 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); 280 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); 281 module_param_named(force_coherent, vmw_force_coherent, int, 0600); 282 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); 283 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); 284 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); 285 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); 286 287 288 struct bitmap_name { 289 uint32 value; 290 const char *name; 291 }; 292 293 static const struct bitmap_name cap1_names[] = { 294 { SVGA_CAP_RECT_COPY, "rect copy" }, 295 { SVGA_CAP_CURSOR, "cursor" }, 296 { SVGA_CAP_CURSOR_BYPASS, "cursor bypass" }, 297 { SVGA_CAP_CURSOR_BYPASS_2, "cursor bypass 2" }, 298 { SVGA_CAP_8BIT_EMULATION, "8bit emulation" }, 299 { SVGA_CAP_ALPHA_CURSOR, "alpha cursor" }, 300 { SVGA_CAP_3D, "3D" }, 301 { SVGA_CAP_EXTENDED_FIFO, "extended fifo" }, 302 { SVGA_CAP_MULTIMON, "multimon" }, 303 { SVGA_CAP_PITCHLOCK, "pitchlock" }, 304 { SVGA_CAP_IRQMASK, "irq mask" }, 305 { SVGA_CAP_DISPLAY_TOPOLOGY, "display topology" }, 306 { SVGA_CAP_GMR, "gmr" }, 307 { SVGA_CAP_TRACES, "traces" }, 308 { SVGA_CAP_GMR2, "gmr2" }, 309 { SVGA_CAP_SCREEN_OBJECT_2, "screen object 2" }, 310 { SVGA_CAP_COMMAND_BUFFERS, "command buffers" }, 311 { SVGA_CAP_CMD_BUFFERS_2, "command buffers 2" }, 312 { SVGA_CAP_GBOBJECTS, "gbobject" }, 313 { SVGA_CAP_DX, "dx" }, 314 { SVGA_CAP_HP_CMD_QUEUE, "hp cmd queue" }, 315 { SVGA_CAP_NO_BB_RESTRICTION, "no bb restriction" }, 316 { SVGA_CAP_CAP2_REGISTER, "cap2 register" }, 317 }; 318 319 320 static const struct bitmap_name cap2_names[] = { 321 { SVGA_CAP2_GROW_OTABLE, "grow otable" }, 322 { SVGA_CAP2_INTRA_SURFACE_COPY, "intra surface copy" }, 323 { SVGA_CAP2_DX2, "dx2" }, 324 { SVGA_CAP2_GB_MEMSIZE_2, "gb memsize 2" }, 325 { SVGA_CAP2_SCREENDMA_REG, "screendma reg" }, 326 { SVGA_CAP2_OTABLE_PTDEPTH_2, "otable ptdepth2" }, 327 { SVGA_CAP2_NON_MS_TO_MS_STRETCHBLT, "non ms to ms stretchblt" }, 328 { SVGA_CAP2_CURSOR_MOB, "cursor mob" }, 329 { SVGA_CAP2_MSHINT, "mshint" }, 330 { SVGA_CAP2_CB_MAX_SIZE_4MB, "cb max size 4mb" }, 331 { SVGA_CAP2_DX3, "dx3" }, 332 { SVGA_CAP2_FRAME_TYPE, "frame type" }, 333 { SVGA_CAP2_COTABLE_COPY, "cotable copy" }, 334 { SVGA_CAP2_TRACE_FULL_FB, "trace full fb" }, 335 { SVGA_CAP2_EXTRA_REGS, "extra regs" }, 336 { SVGA_CAP2_LO_STAGING, "lo staging" }, 337 }; 338 339 static void vmw_print_bitmap(struct drm_device *drm, 340 const char *prefix, uint32_t bitmap, 341 const struct bitmap_name *bnames, 342 uint32_t num_names) 343 { 344 char buf[512]; 345 uint32_t i; 346 uint32_t offset = 0; 347 for (i = 0; i < num_names; ++i) { 348 if ((bitmap & bnames[i].value) != 0) { 349 offset += snprintf(buf + offset, 350 ARRAY_SIZE(buf) - offset, 351 "%s, ", bnames[i].name); 352 bitmap &= ~bnames[i].value; 353 } 354 } 355 356 drm_info(drm, "%s: %s\n", prefix, buf); 357 if (bitmap != 0) 358 drm_dbg(drm, "%s: unknown enums: %x\n", prefix, bitmap); 359 } 360 361 362 static void vmw_print_sm_type(struct vmw_private *dev_priv) 363 { 364 static const char *names[] = { 365 [VMW_SM_LEGACY] = "Legacy", 366 [VMW_SM_4] = "SM4", 367 [VMW_SM_4_1] = "SM4_1", 368 [VMW_SM_5] = "SM_5", 369 [VMW_SM_MAX] = "Invalid" 370 }; 371 BUILD_BUG_ON(ARRAY_SIZE(names) != (VMW_SM_MAX + 1)); 372 drm_info(&dev_priv->drm, "Available shader model: %s.\n", 373 names[dev_priv->sm_type]); 374 } 375 376 /** 377 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result 378 * 379 * @dev_priv: A device private structure. 380 * 381 * This function creates a small buffer object that holds the query 382 * result for dummy queries emitted as query barriers. 383 * The function will then map the first page and initialize a pending 384 * occlusion query result structure, Finally it will unmap the buffer. 385 * No interruptible waits are done within this function. 386 * 387 * Returns an error if bo creation or initialization fails. 388 */ 389 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) 390 { 391 int ret; 392 struct vmw_buffer_object *vbo; 393 struct ttm_bo_kmap_obj map; 394 volatile SVGA3dQueryResult *result; 395 bool dummy; 396 397 /* 398 * Create the vbo as pinned, so that a tryreserve will 399 * immediately succeed. This is because we're the only 400 * user of the bo currently. 401 */ 402 vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); 403 if (!vbo) 404 return -ENOMEM; 405 406 ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE, 407 &vmw_sys_placement, false, true, 408 &vmw_bo_bo_free); 409 if (unlikely(ret != 0)) 410 return ret; 411 412 ret = ttm_bo_reserve(&vbo->base, false, true, NULL); 413 BUG_ON(ret != 0); 414 vmw_bo_pin_reserved(vbo, true); 415 416 ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); 417 if (likely(ret == 0)) { 418 result = ttm_kmap_obj_virtual(&map, &dummy); 419 result->totalSize = sizeof(*result); 420 result->state = SVGA3D_QUERYSTATE_PENDING; 421 result->result32 = 0xff; 422 ttm_bo_kunmap(&map); 423 } 424 vmw_bo_pin_reserved(vbo, false); 425 ttm_bo_unreserve(&vbo->base); 426 427 if (unlikely(ret != 0)) { 428 DRM_ERROR("Dummy query buffer map failed.\n"); 429 vmw_bo_unreference(&vbo); 430 } else 431 dev_priv->dummy_query_bo = vbo; 432 433 return ret; 434 } 435 436 static int vmw_device_init(struct vmw_private *dev_priv) 437 { 438 bool uses_fb_traces = false; 439 440 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 441 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 442 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); 443 444 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE | 445 SVGA_REG_ENABLE_HIDE); 446 447 uses_fb_traces = !vmw_cmd_supported(dev_priv) && 448 (dev_priv->capabilities & SVGA_CAP_TRACES) != 0; 449 450 vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces); 451 dev_priv->fifo = vmw_fifo_create(dev_priv); 452 if (IS_ERR(dev_priv->fifo)) { 453 int err = PTR_ERR(dev_priv->fifo); 454 dev_priv->fifo = NULL; 455 return err; 456 } else if (!dev_priv->fifo) { 457 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); 458 } 459 460 dev_priv->last_read_seqno = vmw_fence_read(dev_priv); 461 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); 462 return 0; 463 } 464 465 static void vmw_device_fini(struct vmw_private *vmw) 466 { 467 /* 468 * Legacy sync 469 */ 470 vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 471 while (vmw_read(vmw, SVGA_REG_BUSY) != 0) 472 ; 473 474 vmw->last_read_seqno = vmw_fence_read(vmw); 475 476 vmw_write(vmw, SVGA_REG_CONFIG_DONE, 477 vmw->config_done_state); 478 vmw_write(vmw, SVGA_REG_ENABLE, 479 vmw->enable_state); 480 vmw_write(vmw, SVGA_REG_TRACES, 481 vmw->traces_state); 482 483 vmw_fifo_destroy(vmw); 484 } 485 486 /** 487 * vmw_request_device_late - Perform late device setup 488 * 489 * @dev_priv: Pointer to device private. 490 * 491 * This function performs setup of otables and enables large command 492 * buffer submission. These tasks are split out to a separate function 493 * because it reverts vmw_release_device_early and is intended to be used 494 * by an error path in the hibernation code. 495 */ 496 static int vmw_request_device_late(struct vmw_private *dev_priv) 497 { 498 int ret; 499 500 if (dev_priv->has_mob) { 501 ret = vmw_otables_setup(dev_priv); 502 if (unlikely(ret != 0)) { 503 DRM_ERROR("Unable to initialize " 504 "guest Memory OBjects.\n"); 505 return ret; 506 } 507 } 508 509 if (dev_priv->cman) { 510 ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096); 511 if (ret) { 512 struct vmw_cmdbuf_man *man = dev_priv->cman; 513 514 dev_priv->cman = NULL; 515 vmw_cmdbuf_man_destroy(man); 516 } 517 } 518 519 return 0; 520 } 521 522 static int vmw_request_device(struct vmw_private *dev_priv) 523 { 524 int ret; 525 526 ret = vmw_device_init(dev_priv); 527 if (unlikely(ret != 0)) { 528 DRM_ERROR("Unable to initialize the device.\n"); 529 return ret; 530 } 531 vmw_fence_fifo_up(dev_priv->fman); 532 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); 533 if (IS_ERR(dev_priv->cman)) { 534 dev_priv->cman = NULL; 535 dev_priv->sm_type = VMW_SM_LEGACY; 536 } 537 538 ret = vmw_request_device_late(dev_priv); 539 if (ret) 540 goto out_no_mob; 541 542 ret = vmw_dummy_query_bo_create(dev_priv); 543 if (unlikely(ret != 0)) 544 goto out_no_query_bo; 545 546 return 0; 547 548 out_no_query_bo: 549 if (dev_priv->cman) 550 vmw_cmdbuf_remove_pool(dev_priv->cman); 551 if (dev_priv->has_mob) { 552 struct ttm_resource_manager *man; 553 554 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB); 555 ttm_resource_manager_evict_all(&dev_priv->bdev, man); 556 vmw_otables_takedown(dev_priv); 557 } 558 if (dev_priv->cman) 559 vmw_cmdbuf_man_destroy(dev_priv->cman); 560 out_no_mob: 561 vmw_fence_fifo_down(dev_priv->fman); 562 vmw_device_fini(dev_priv); 563 return ret; 564 } 565 566 /** 567 * vmw_release_device_early - Early part of fifo takedown. 568 * 569 * @dev_priv: Pointer to device private struct. 570 * 571 * This is the first part of command submission takedown, to be called before 572 * buffer management is taken down. 573 */ 574 static void vmw_release_device_early(struct vmw_private *dev_priv) 575 { 576 /* 577 * Previous destructions should've released 578 * the pinned bo. 579 */ 580 581 BUG_ON(dev_priv->pinned_bo != NULL); 582 583 vmw_bo_unreference(&dev_priv->dummy_query_bo); 584 if (dev_priv->cman) 585 vmw_cmdbuf_remove_pool(dev_priv->cman); 586 587 if (dev_priv->has_mob) { 588 struct ttm_resource_manager *man; 589 590 man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB); 591 ttm_resource_manager_evict_all(&dev_priv->bdev, man); 592 vmw_otables_takedown(dev_priv); 593 } 594 } 595 596 /** 597 * vmw_release_device_late - Late part of fifo takedown. 598 * 599 * @dev_priv: Pointer to device private struct. 600 * 601 * This is the last part of the command submission takedown, to be called when 602 * command submission is no longer needed. It may wait on pending fences. 603 */ 604 static void vmw_release_device_late(struct vmw_private *dev_priv) 605 { 606 vmw_fence_fifo_down(dev_priv->fman); 607 if (dev_priv->cman) 608 vmw_cmdbuf_man_destroy(dev_priv->cman); 609 610 vmw_device_fini(dev_priv); 611 } 612 613 /* 614 * Sets the initial_[width|height] fields on the given vmw_private. 615 * 616 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then 617 * clamping the value to fb_max_[width|height] fields and the 618 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 619 * If the values appear to be invalid, set them to 620 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 621 */ 622 static void vmw_get_initial_size(struct vmw_private *dev_priv) 623 { 624 uint32_t width; 625 uint32_t height; 626 627 width = vmw_read(dev_priv, SVGA_REG_WIDTH); 628 height = vmw_read(dev_priv, SVGA_REG_HEIGHT); 629 630 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); 631 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); 632 633 if (width > dev_priv->fb_max_width || 634 height > dev_priv->fb_max_height) { 635 636 /* 637 * This is a host error and shouldn't occur. 638 */ 639 640 width = VMW_MIN_INITIAL_WIDTH; 641 height = VMW_MIN_INITIAL_HEIGHT; 642 } 643 644 dev_priv->initial_width = width; 645 dev_priv->initial_height = height; 646 } 647 648 /** 649 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this 650 * system. 651 * 652 * @dev_priv: Pointer to a struct vmw_private 653 * 654 * This functions tries to determine what actions need to be taken by the 655 * driver to make system pages visible to the device. 656 * If this function decides that DMA is not possible, it returns -EINVAL. 657 * The driver may then try to disable features of the device that require 658 * DMA. 659 */ 660 static int vmw_dma_select_mode(struct vmw_private *dev_priv) 661 { 662 static const char *names[vmw_dma_map_max] = { 663 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", 664 [vmw_dma_map_populate] = "Caching DMA mappings.", 665 [vmw_dma_map_bind] = "Giving up DMA mappings early."}; 666 667 /* TTM currently doesn't fully support SEV encryption. */ 668 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) 669 return -EINVAL; 670 671 if (vmw_force_coherent) 672 dev_priv->map_mode = vmw_dma_alloc_coherent; 673 else if (vmw_restrict_iommu) 674 dev_priv->map_mode = vmw_dma_map_bind; 675 else 676 dev_priv->map_mode = vmw_dma_map_populate; 677 678 drm_info(&dev_priv->drm, 679 "DMA map mode: %s\n", names[dev_priv->map_mode]); 680 return 0; 681 } 682 683 /** 684 * vmw_dma_masks - set required page- and dma masks 685 * 686 * @dev_priv: Pointer to struct drm-device 687 * 688 * With 32-bit we can only handle 32 bit PFNs. Optionally set that 689 * restriction also for 64-bit systems. 690 */ 691 static int vmw_dma_masks(struct vmw_private *dev_priv) 692 { 693 struct drm_device *dev = &dev_priv->drm; 694 int ret = 0; 695 696 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); 697 if (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask) { 698 drm_info(&dev_priv->drm, 699 "Restricting DMA addresses to 44 bits.\n"); 700 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); 701 } 702 703 return ret; 704 } 705 706 static int vmw_vram_manager_init(struct vmw_private *dev_priv) 707 { 708 int ret; 709 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 710 ret = vmw_thp_init(dev_priv); 711 #else 712 ret = ttm_range_man_init(&dev_priv->bdev, TTM_PL_VRAM, false, 713 dev_priv->vram_size >> PAGE_SHIFT); 714 #endif 715 ttm_resource_manager_set_used(ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM), false); 716 return ret; 717 } 718 719 static void vmw_vram_manager_fini(struct vmw_private *dev_priv) 720 { 721 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 722 vmw_thp_fini(dev_priv); 723 #else 724 ttm_range_man_fini(&dev_priv->bdev, TTM_PL_VRAM); 725 #endif 726 } 727 728 static int vmw_setup_pci_resources(struct vmw_private *dev, 729 u32 pci_id) 730 { 731 resource_size_t rmmio_start; 732 resource_size_t rmmio_size; 733 resource_size_t fifo_start; 734 resource_size_t fifo_size; 735 int ret; 736 struct pci_dev *pdev = to_pci_dev(dev->drm.dev); 737 738 pci_set_master(pdev); 739 740 ret = pci_request_regions(pdev, "vmwgfx probe"); 741 if (ret) 742 return ret; 743 744 dev->pci_id = pci_id; 745 if (pci_id == VMWGFX_PCI_ID_SVGA3) { 746 rmmio_start = pci_resource_start(pdev, 0); 747 rmmio_size = pci_resource_len(pdev, 0); 748 dev->vram_start = pci_resource_start(pdev, 2); 749 dev->vram_size = pci_resource_len(pdev, 2); 750 751 drm_info(&dev->drm, 752 "Register MMIO at 0x%pa size is %llu kiB\n", 753 &rmmio_start, (uint64_t)rmmio_size / 1024); 754 dev->rmmio = devm_ioremap(dev->drm.dev, 755 rmmio_start, 756 rmmio_size); 757 if (!dev->rmmio) { 758 drm_err(&dev->drm, 759 "Failed mapping registers mmio memory.\n"); 760 pci_release_regions(pdev); 761 return -ENOMEM; 762 } 763 } else if (pci_id == VMWGFX_PCI_ID_SVGA2) { 764 dev->io_start = pci_resource_start(pdev, 0); 765 dev->vram_start = pci_resource_start(pdev, 1); 766 dev->vram_size = pci_resource_len(pdev, 1); 767 fifo_start = pci_resource_start(pdev, 2); 768 fifo_size = pci_resource_len(pdev, 2); 769 770 drm_info(&dev->drm, 771 "FIFO at %pa size is %llu kiB\n", 772 &fifo_start, (uint64_t)fifo_size / 1024); 773 dev->fifo_mem = devm_memremap(dev->drm.dev, 774 fifo_start, 775 fifo_size, 776 MEMREMAP_WB); 777 778 if (IS_ERR(dev->fifo_mem)) { 779 drm_err(&dev->drm, 780 "Failed mapping FIFO memory.\n"); 781 pci_release_regions(pdev); 782 return PTR_ERR(dev->fifo_mem); 783 } 784 } else { 785 pci_release_regions(pdev); 786 return -EINVAL; 787 } 788 789 /* 790 * This is approximate size of the vram, the exact size will only 791 * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource 792 * size will be equal to or bigger than the size reported by 793 * SVGA_REG_VRAM_SIZE. 794 */ 795 drm_info(&dev->drm, 796 "VRAM at %pa size is %llu kiB\n", 797 &dev->vram_start, (uint64_t)dev->vram_size / 1024); 798 799 return 0; 800 } 801 802 static int vmw_detect_version(struct vmw_private *dev) 803 { 804 uint32_t svga_id; 805 806 vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ? 807 SVGA_ID_3 : SVGA_ID_2); 808 svga_id = vmw_read(dev, SVGA_REG_ID); 809 if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) { 810 drm_err(&dev->drm, 811 "Unsupported SVGA ID 0x%x on chipset 0x%x\n", 812 svga_id, dev->pci_id); 813 return -ENOSYS; 814 } 815 BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3)); 816 drm_info(&dev->drm, 817 "Running on SVGA version %d.\n", (svga_id & 0xff)); 818 return 0; 819 } 820 821 static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) 822 { 823 int ret; 824 enum vmw_res_type i; 825 bool refuse_dma = false; 826 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 827 828 dev_priv->drm.dev_private = dev_priv; 829 830 mutex_init(&dev_priv->cmdbuf_mutex); 831 mutex_init(&dev_priv->binding_mutex); 832 spin_lock_init(&dev_priv->resource_lock); 833 spin_lock_init(&dev_priv->hw_lock); 834 spin_lock_init(&dev_priv->waiter_lock); 835 spin_lock_init(&dev_priv->cursor_lock); 836 837 ret = vmw_setup_pci_resources(dev_priv, pci_id); 838 if (ret) 839 return ret; 840 ret = vmw_detect_version(dev_priv); 841 if (ret) 842 goto out_no_pci_or_version; 843 844 845 for (i = vmw_res_context; i < vmw_res_max; ++i) { 846 idr_init_base(&dev_priv->res_idr[i], 1); 847 INIT_LIST_HEAD(&dev_priv->res_lru[i]); 848 } 849 850 init_waitqueue_head(&dev_priv->fence_queue); 851 init_waitqueue_head(&dev_priv->fifo_queue); 852 dev_priv->fence_queue_waiters = 0; 853 dev_priv->fifo_queue_waiters = 0; 854 855 dev_priv->used_memory_size = 0; 856 857 dev_priv->assume_16bpp = !!vmw_assume_16bpp; 858 859 dev_priv->enable_fb = enable_fbdev; 860 861 862 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 863 864 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) { 865 dev_priv->capabilities2 = vmw_read(dev_priv, SVGA_REG_CAP2); 866 } 867 868 869 ret = vmw_dma_select_mode(dev_priv); 870 if (unlikely(ret != 0)) { 871 drm_info(&dev_priv->drm, 872 "Restricting capabilities since DMA not available.\n"); 873 refuse_dma = true; 874 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) 875 drm_info(&dev_priv->drm, 876 "Disabling 3D acceleration.\n"); 877 } 878 879 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); 880 dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); 881 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); 882 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); 883 884 vmw_get_initial_size(dev_priv); 885 886 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 887 dev_priv->max_gmr_ids = 888 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); 889 dev_priv->max_gmr_pages = 890 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); 891 dev_priv->memory_size = 892 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); 893 dev_priv->memory_size -= dev_priv->vram_size; 894 } else { 895 /* 896 * An arbitrary limit of 512MiB on surface 897 * memory. But all HWV8 hardware supports GMR2. 898 */ 899 dev_priv->memory_size = 512*1024*1024; 900 } 901 dev_priv->max_mob_pages = 0; 902 dev_priv->max_mob_size = 0; 903 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 904 uint64_t mem_size; 905 906 if (dev_priv->capabilities2 & SVGA_CAP2_GB_MEMSIZE_2) 907 mem_size = vmw_read(dev_priv, 908 SVGA_REG_GBOBJECT_MEM_SIZE_KB); 909 else 910 mem_size = 911 vmw_read(dev_priv, 912 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); 913 914 /* 915 * Workaround for low memory 2D VMs to compensate for the 916 * allocation taken by fbdev 917 */ 918 if (!(dev_priv->capabilities & SVGA_CAP_3D)) 919 mem_size *= 3; 920 921 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; 922 dev_priv->max_primary_mem = 923 vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM); 924 dev_priv->max_mob_size = 925 vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); 926 dev_priv->stdu_max_width = 927 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); 928 dev_priv->stdu_max_height = 929 vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); 930 931 vmw_write(dev_priv, SVGA_REG_DEV_CAP, 932 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); 933 dev_priv->texture_max_width = vmw_read(dev_priv, 934 SVGA_REG_DEV_CAP); 935 vmw_write(dev_priv, SVGA_REG_DEV_CAP, 936 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); 937 dev_priv->texture_max_height = vmw_read(dev_priv, 938 SVGA_REG_DEV_CAP); 939 } else { 940 dev_priv->texture_max_width = 8192; 941 dev_priv->texture_max_height = 8192; 942 dev_priv->max_primary_mem = dev_priv->vram_size; 943 } 944 drm_info(&dev_priv->drm, 945 "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n", 946 (u64)dev_priv->vram_size / 1024, 947 (u64)dev_priv->fifo_mem_size / 1024, 948 dev_priv->memory_size / 1024); 949 950 drm_info(&dev_priv->drm, 951 "MOB limits: max mob size = %u kB, max mob pages = %u\n", 952 dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages); 953 954 vmw_print_bitmap(&dev_priv->drm, "Capabilities", 955 dev_priv->capabilities, 956 cap1_names, ARRAY_SIZE(cap1_names)); 957 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) 958 vmw_print_bitmap(&dev_priv->drm, "Capabilities2", 959 dev_priv->capabilities2, 960 cap2_names, ARRAY_SIZE(cap2_names)); 961 962 ret = vmw_dma_masks(dev_priv); 963 if (unlikely(ret != 0)) 964 goto out_err0; 965 966 dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX); 967 968 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 969 drm_info(&dev_priv->drm, 970 "Max GMR ids is %u\n", 971 (unsigned)dev_priv->max_gmr_ids); 972 drm_info(&dev_priv->drm, 973 "Max number of GMR pages is %u\n", 974 (unsigned)dev_priv->max_gmr_pages); 975 } 976 drm_info(&dev_priv->drm, 977 "Maximum display memory size is %llu kiB\n", 978 (uint64_t)dev_priv->max_primary_mem / 1024); 979 980 /* Need mmio memory to check for fifo pitchlock cap. */ 981 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && 982 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && 983 !vmw_fifo_have_pitchlock(dev_priv)) { 984 ret = -ENOSYS; 985 DRM_ERROR("Hardware has no pitchlock\n"); 986 goto out_err0; 987 } 988 989 dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12, 990 &vmw_prime_dmabuf_ops); 991 992 if (unlikely(dev_priv->tdev == NULL)) { 993 drm_err(&dev_priv->drm, 994 "Unable to initialize TTM object management.\n"); 995 ret = -ENOMEM; 996 goto out_err0; 997 } 998 999 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 1000 ret = vmw_irq_install(&dev_priv->drm, pdev->irq); 1001 if (ret != 0) { 1002 drm_err(&dev_priv->drm, 1003 "Failed installing irq: %d\n", ret); 1004 goto out_no_irq; 1005 } 1006 } 1007 1008 dev_priv->fman = vmw_fence_manager_init(dev_priv); 1009 if (unlikely(dev_priv->fman == NULL)) { 1010 ret = -ENOMEM; 1011 goto out_no_fman; 1012 } 1013 1014 drm_vma_offset_manager_init(&dev_priv->vma_manager, 1015 DRM_FILE_PAGE_OFFSET_START, 1016 DRM_FILE_PAGE_OFFSET_SIZE); 1017 ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver, 1018 dev_priv->drm.dev, 1019 dev_priv->drm.anon_inode->i_mapping, 1020 &dev_priv->vma_manager, 1021 dev_priv->map_mode == vmw_dma_alloc_coherent, 1022 false); 1023 if (unlikely(ret != 0)) { 1024 drm_err(&dev_priv->drm, 1025 "Failed initializing TTM buffer object driver.\n"); 1026 goto out_no_bdev; 1027 } 1028 1029 /* 1030 * Enable VRAM, but initially don't use it until SVGA is enabled and 1031 * unhidden. 1032 */ 1033 1034 ret = vmw_vram_manager_init(dev_priv); 1035 if (unlikely(ret != 0)) { 1036 drm_err(&dev_priv->drm, 1037 "Failed initializing memory manager for VRAM.\n"); 1038 goto out_no_vram; 1039 } 1040 1041 ret = vmw_devcaps_create(dev_priv); 1042 if (unlikely(ret != 0)) { 1043 drm_err(&dev_priv->drm, 1044 "Failed initializing device caps.\n"); 1045 goto out_no_vram; 1046 } 1047 1048 /* 1049 * "Guest Memory Regions" is an aperture like feature with 1050 * one slot per bo. There is an upper limit of the number of 1051 * slots as well as the bo size. 1052 */ 1053 dev_priv->has_gmr = true; 1054 /* TODO: This is most likely not correct */ 1055 if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || 1056 refuse_dma || 1057 vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) { 1058 drm_info(&dev_priv->drm, 1059 "No GMR memory available. " 1060 "Graphics memory resources are very limited.\n"); 1061 dev_priv->has_gmr = false; 1062 } 1063 1064 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) { 1065 dev_priv->has_mob = true; 1066 1067 if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) { 1068 drm_info(&dev_priv->drm, 1069 "No MOB memory available. " 1070 "3D will be disabled.\n"); 1071 dev_priv->has_mob = false; 1072 } 1073 if (vmw_sys_man_init(dev_priv) != 0) { 1074 drm_info(&dev_priv->drm, 1075 "No MOB page table memory available. " 1076 "3D will be disabled.\n"); 1077 dev_priv->has_mob = false; 1078 } 1079 } 1080 1081 if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) { 1082 if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_DXCONTEXT)) 1083 dev_priv->sm_type = VMW_SM_4; 1084 } 1085 1086 vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN); 1087 1088 /* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */ 1089 if (has_sm4_context(dev_priv) && 1090 (dev_priv->capabilities2 & SVGA_CAP2_DX2)) { 1091 if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM41)) 1092 dev_priv->sm_type = VMW_SM_4_1; 1093 if (has_sm4_1_context(dev_priv) && 1094 (dev_priv->capabilities2 & SVGA_CAP2_DX3)) { 1095 if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) 1096 dev_priv->sm_type = VMW_SM_5; 1097 } 1098 } 1099 1100 ret = vmw_kms_init(dev_priv); 1101 if (unlikely(ret != 0)) 1102 goto out_no_kms; 1103 vmw_overlay_init(dev_priv); 1104 1105 ret = vmw_request_device(dev_priv); 1106 if (ret) 1107 goto out_no_fifo; 1108 1109 vmw_print_sm_type(dev_priv); 1110 vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)", 1111 VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR, 1112 VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE); 1113 1114 if (dev_priv->enable_fb) { 1115 vmw_fifo_resource_inc(dev_priv); 1116 vmw_svga_enable(dev_priv); 1117 vmw_fb_init(dev_priv); 1118 } 1119 1120 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 1121 register_pm_notifier(&dev_priv->pm_nb); 1122 1123 return 0; 1124 1125 out_no_fifo: 1126 vmw_overlay_close(dev_priv); 1127 vmw_kms_close(dev_priv); 1128 out_no_kms: 1129 if (dev_priv->has_mob) { 1130 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); 1131 vmw_sys_man_fini(dev_priv); 1132 } 1133 if (dev_priv->has_gmr) 1134 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); 1135 vmw_devcaps_destroy(dev_priv); 1136 vmw_vram_manager_fini(dev_priv); 1137 out_no_vram: 1138 ttm_device_fini(&dev_priv->bdev); 1139 out_no_bdev: 1140 vmw_fence_manager_takedown(dev_priv->fman); 1141 out_no_fman: 1142 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 1143 vmw_irq_uninstall(&dev_priv->drm); 1144 out_no_irq: 1145 ttm_object_device_release(&dev_priv->tdev); 1146 out_err0: 1147 for (i = vmw_res_context; i < vmw_res_max; ++i) 1148 idr_destroy(&dev_priv->res_idr[i]); 1149 1150 if (dev_priv->ctx.staged_bindings) 1151 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 1152 out_no_pci_or_version: 1153 pci_release_regions(pdev); 1154 return ret; 1155 } 1156 1157 static void vmw_driver_unload(struct drm_device *dev) 1158 { 1159 struct vmw_private *dev_priv = vmw_priv(dev); 1160 struct pci_dev *pdev = to_pci_dev(dev->dev); 1161 enum vmw_res_type i; 1162 1163 unregister_pm_notifier(&dev_priv->pm_nb); 1164 1165 if (dev_priv->ctx.res_ht_initialized) 1166 vmwgfx_ht_remove(&dev_priv->ctx.res_ht); 1167 vfree(dev_priv->ctx.cmd_bounce); 1168 if (dev_priv->enable_fb) { 1169 vmw_fb_off(dev_priv); 1170 vmw_fb_close(dev_priv); 1171 vmw_fifo_resource_dec(dev_priv); 1172 vmw_svga_disable(dev_priv); 1173 } 1174 1175 vmw_kms_close(dev_priv); 1176 vmw_overlay_close(dev_priv); 1177 1178 if (dev_priv->has_gmr) 1179 vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); 1180 1181 vmw_release_device_early(dev_priv); 1182 if (dev_priv->has_mob) { 1183 vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); 1184 vmw_sys_man_fini(dev_priv); 1185 } 1186 vmw_devcaps_destroy(dev_priv); 1187 vmw_vram_manager_fini(dev_priv); 1188 ttm_device_fini(&dev_priv->bdev); 1189 drm_vma_offset_manager_destroy(&dev_priv->vma_manager); 1190 vmw_release_device_late(dev_priv); 1191 vmw_fence_manager_takedown(dev_priv->fman); 1192 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 1193 vmw_irq_uninstall(&dev_priv->drm); 1194 1195 ttm_object_device_release(&dev_priv->tdev); 1196 if (dev_priv->ctx.staged_bindings) 1197 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 1198 1199 for (i = vmw_res_context; i < vmw_res_max; ++i) 1200 idr_destroy(&dev_priv->res_idr[i]); 1201 1202 vmw_mksstat_remove_all(dev_priv); 1203 1204 pci_release_regions(pdev); 1205 } 1206 1207 static void vmw_postclose(struct drm_device *dev, 1208 struct drm_file *file_priv) 1209 { 1210 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1211 1212 ttm_object_file_release(&vmw_fp->tfile); 1213 kfree(vmw_fp); 1214 } 1215 1216 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) 1217 { 1218 struct vmw_private *dev_priv = vmw_priv(dev); 1219 struct vmw_fpriv *vmw_fp; 1220 int ret = -ENOMEM; 1221 1222 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); 1223 if (unlikely(!vmw_fp)) 1224 return ret; 1225 1226 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); 1227 if (unlikely(vmw_fp->tfile == NULL)) 1228 goto out_no_tfile; 1229 1230 file_priv->driver_priv = vmw_fp; 1231 1232 return 0; 1233 1234 out_no_tfile: 1235 kfree(vmw_fp); 1236 return ret; 1237 } 1238 1239 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, 1240 unsigned long arg, 1241 long (*ioctl_func)(struct file *, unsigned int, 1242 unsigned long)) 1243 { 1244 struct drm_file *file_priv = filp->private_data; 1245 struct drm_device *dev = file_priv->minor->dev; 1246 unsigned int nr = DRM_IOCTL_NR(cmd); 1247 unsigned int flags; 1248 1249 /* 1250 * Do extra checking on driver private ioctls. 1251 */ 1252 1253 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 1254 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 1255 const struct drm_ioctl_desc *ioctl = 1256 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 1257 1258 if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { 1259 return ioctl_func(filp, cmd, arg); 1260 } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) { 1261 if (!drm_is_current_master(file_priv) && 1262 !capable(CAP_SYS_ADMIN)) 1263 return -EACCES; 1264 } 1265 1266 if (unlikely(ioctl->cmd != cmd)) 1267 goto out_io_encoding; 1268 1269 flags = ioctl->flags; 1270 } else if (!drm_ioctl_flags(nr, &flags)) 1271 return -EINVAL; 1272 1273 return ioctl_func(filp, cmd, arg); 1274 1275 out_io_encoding: 1276 DRM_ERROR("Invalid command format, ioctl %d\n", 1277 nr - DRM_COMMAND_BASE); 1278 1279 return -EINVAL; 1280 } 1281 1282 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, 1283 unsigned long arg) 1284 { 1285 return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl); 1286 } 1287 1288 #ifdef CONFIG_COMPAT 1289 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd, 1290 unsigned long arg) 1291 { 1292 return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl); 1293 } 1294 #endif 1295 1296 static void vmw_master_set(struct drm_device *dev, 1297 struct drm_file *file_priv, 1298 bool from_open) 1299 { 1300 /* 1301 * Inform a new master that the layout may have changed while 1302 * it was gone. 1303 */ 1304 if (!from_open) 1305 drm_sysfs_hotplug_event(dev); 1306 } 1307 1308 static void vmw_master_drop(struct drm_device *dev, 1309 struct drm_file *file_priv) 1310 { 1311 struct vmw_private *dev_priv = vmw_priv(dev); 1312 1313 vmw_kms_legacy_hotspot_clear(dev_priv); 1314 if (!dev_priv->enable_fb) 1315 vmw_svga_disable(dev_priv); 1316 } 1317 1318 /** 1319 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1320 * 1321 * @dev_priv: Pointer to device private struct. 1322 * Needs the reservation sem to be held in non-exclusive mode. 1323 */ 1324 static void __vmw_svga_enable(struct vmw_private *dev_priv) 1325 { 1326 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); 1327 1328 if (!ttm_resource_manager_used(man)) { 1329 vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE); 1330 ttm_resource_manager_set_used(man, true); 1331 } 1332 } 1333 1334 /** 1335 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. 1336 * 1337 * @dev_priv: Pointer to device private struct. 1338 */ 1339 void vmw_svga_enable(struct vmw_private *dev_priv) 1340 { 1341 __vmw_svga_enable(dev_priv); 1342 } 1343 1344 /** 1345 * __vmw_svga_disable - Disable SVGA mode and use of VRAM. 1346 * 1347 * @dev_priv: Pointer to device private struct. 1348 * Needs the reservation sem to be held in exclusive mode. 1349 * Will not empty VRAM. VRAM must be emptied by caller. 1350 */ 1351 static void __vmw_svga_disable(struct vmw_private *dev_priv) 1352 { 1353 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); 1354 1355 if (ttm_resource_manager_used(man)) { 1356 ttm_resource_manager_set_used(man, false); 1357 vmw_write(dev_priv, SVGA_REG_ENABLE, 1358 SVGA_REG_ENABLE_HIDE | 1359 SVGA_REG_ENABLE_ENABLE); 1360 } 1361 } 1362 1363 /** 1364 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo 1365 * running. 1366 * 1367 * @dev_priv: Pointer to device private struct. 1368 * Will empty VRAM. 1369 */ 1370 void vmw_svga_disable(struct vmw_private *dev_priv) 1371 { 1372 struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); 1373 /* 1374 * Disabling SVGA will turn off device modesetting capabilities, so 1375 * notify KMS about that so that it doesn't cache atomic state that 1376 * isn't valid anymore, for example crtcs turned on. 1377 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex), 1378 * but vmw_kms_lost_device() takes the reservation sem and thus we'll 1379 * end up with lock order reversal. Thus, a master may actually perform 1380 * a new modeset just after we call vmw_kms_lost_device() and race with 1381 * vmw_svga_disable(), but that should at worst cause atomic KMS state 1382 * to be inconsistent with the device, causing modesetting problems. 1383 * 1384 */ 1385 vmw_kms_lost_device(&dev_priv->drm); 1386 if (ttm_resource_manager_used(man)) { 1387 if (ttm_resource_manager_evict_all(&dev_priv->bdev, man)) 1388 DRM_ERROR("Failed evicting VRAM buffers.\n"); 1389 ttm_resource_manager_set_used(man, false); 1390 vmw_write(dev_priv, SVGA_REG_ENABLE, 1391 SVGA_REG_ENABLE_HIDE | 1392 SVGA_REG_ENABLE_ENABLE); 1393 } 1394 } 1395 1396 static void vmw_remove(struct pci_dev *pdev) 1397 { 1398 struct drm_device *dev = pci_get_drvdata(pdev); 1399 1400 ttm_mem_global_release(&ttm_mem_glob); 1401 drm_dev_unregister(dev); 1402 vmw_driver_unload(dev); 1403 } 1404 1405 static unsigned long 1406 vmw_get_unmapped_area(struct file *file, unsigned long uaddr, 1407 unsigned long len, unsigned long pgoff, 1408 unsigned long flags) 1409 { 1410 struct drm_file *file_priv = file->private_data; 1411 struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); 1412 1413 return drm_get_unmapped_area(file, uaddr, len, pgoff, flags, 1414 &dev_priv->vma_manager); 1415 } 1416 1417 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 1418 void *ptr) 1419 { 1420 struct vmw_private *dev_priv = 1421 container_of(nb, struct vmw_private, pm_nb); 1422 1423 switch (val) { 1424 case PM_HIBERNATION_PREPARE: 1425 /* 1426 * Take the reservation sem in write mode, which will make sure 1427 * there are no other processes holding a buffer object 1428 * reservation, meaning we should be able to evict all buffer 1429 * objects if needed. 1430 * Once user-space processes have been frozen, we can release 1431 * the lock again. 1432 */ 1433 dev_priv->suspend_locked = true; 1434 break; 1435 case PM_POST_HIBERNATION: 1436 case PM_POST_RESTORE: 1437 if (READ_ONCE(dev_priv->suspend_locked)) { 1438 dev_priv->suspend_locked = false; 1439 } 1440 break; 1441 default: 1442 break; 1443 } 1444 return 0; 1445 } 1446 1447 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1448 { 1449 struct drm_device *dev = pci_get_drvdata(pdev); 1450 struct vmw_private *dev_priv = vmw_priv(dev); 1451 1452 if (dev_priv->refuse_hibernation) 1453 return -EBUSY; 1454 1455 pci_save_state(pdev); 1456 pci_disable_device(pdev); 1457 pci_set_power_state(pdev, PCI_D3hot); 1458 return 0; 1459 } 1460 1461 static int vmw_pci_resume(struct pci_dev *pdev) 1462 { 1463 pci_set_power_state(pdev, PCI_D0); 1464 pci_restore_state(pdev); 1465 return pci_enable_device(pdev); 1466 } 1467 1468 static int vmw_pm_suspend(struct device *kdev) 1469 { 1470 struct pci_dev *pdev = to_pci_dev(kdev); 1471 struct pm_message dummy; 1472 1473 dummy.event = 0; 1474 1475 return vmw_pci_suspend(pdev, dummy); 1476 } 1477 1478 static int vmw_pm_resume(struct device *kdev) 1479 { 1480 struct pci_dev *pdev = to_pci_dev(kdev); 1481 1482 return vmw_pci_resume(pdev); 1483 } 1484 1485 static int vmw_pm_freeze(struct device *kdev) 1486 { 1487 struct pci_dev *pdev = to_pci_dev(kdev); 1488 struct drm_device *dev = pci_get_drvdata(pdev); 1489 struct vmw_private *dev_priv = vmw_priv(dev); 1490 struct ttm_operation_ctx ctx = { 1491 .interruptible = false, 1492 .no_wait_gpu = false 1493 }; 1494 int ret; 1495 1496 /* 1497 * No user-space processes should be running now. 1498 */ 1499 ret = vmw_kms_suspend(&dev_priv->drm); 1500 if (ret) { 1501 DRM_ERROR("Failed to freeze modesetting.\n"); 1502 return ret; 1503 } 1504 if (dev_priv->enable_fb) 1505 vmw_fb_off(dev_priv); 1506 1507 vmw_execbuf_release_pinned_bo(dev_priv); 1508 vmw_resource_evict_all(dev_priv); 1509 vmw_release_device_early(dev_priv); 1510 while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0); 1511 if (dev_priv->enable_fb) 1512 vmw_fifo_resource_dec(dev_priv); 1513 if (atomic_read(&dev_priv->num_fifo_resources) != 0) { 1514 DRM_ERROR("Can't hibernate while 3D resources are active.\n"); 1515 if (dev_priv->enable_fb) 1516 vmw_fifo_resource_inc(dev_priv); 1517 WARN_ON(vmw_request_device_late(dev_priv)); 1518 dev_priv->suspend_locked = false; 1519 if (dev_priv->suspend_state) 1520 vmw_kms_resume(dev); 1521 if (dev_priv->enable_fb) 1522 vmw_fb_on(dev_priv); 1523 return -EBUSY; 1524 } 1525 1526 vmw_fence_fifo_down(dev_priv->fman); 1527 __vmw_svga_disable(dev_priv); 1528 1529 vmw_release_device_late(dev_priv); 1530 return 0; 1531 } 1532 1533 static int vmw_pm_restore(struct device *kdev) 1534 { 1535 struct pci_dev *pdev = to_pci_dev(kdev); 1536 struct drm_device *dev = pci_get_drvdata(pdev); 1537 struct vmw_private *dev_priv = vmw_priv(dev); 1538 int ret; 1539 1540 vmw_detect_version(dev_priv); 1541 1542 if (dev_priv->enable_fb) 1543 vmw_fifo_resource_inc(dev_priv); 1544 1545 ret = vmw_request_device(dev_priv); 1546 if (ret) 1547 return ret; 1548 1549 if (dev_priv->enable_fb) 1550 __vmw_svga_enable(dev_priv); 1551 1552 vmw_fence_fifo_up(dev_priv->fman); 1553 dev_priv->suspend_locked = false; 1554 if (dev_priv->suspend_state) 1555 vmw_kms_resume(&dev_priv->drm); 1556 1557 if (dev_priv->enable_fb) 1558 vmw_fb_on(dev_priv); 1559 1560 return 0; 1561 } 1562 1563 static const struct dev_pm_ops vmw_pm_ops = { 1564 .freeze = vmw_pm_freeze, 1565 .thaw = vmw_pm_restore, 1566 .restore = vmw_pm_restore, 1567 .suspend = vmw_pm_suspend, 1568 .resume = vmw_pm_resume, 1569 }; 1570 1571 static const struct file_operations vmwgfx_driver_fops = { 1572 .owner = THIS_MODULE, 1573 .open = drm_open, 1574 .release = drm_release, 1575 .unlocked_ioctl = vmw_unlocked_ioctl, 1576 .mmap = vmw_mmap, 1577 .poll = drm_poll, 1578 .read = drm_read, 1579 #if defined(CONFIG_COMPAT) 1580 .compat_ioctl = vmw_compat_ioctl, 1581 #endif 1582 .llseek = noop_llseek, 1583 .get_unmapped_area = vmw_get_unmapped_area, 1584 }; 1585 1586 static const struct drm_driver driver = { 1587 .driver_features = 1588 DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC, 1589 .ioctls = vmw_ioctls, 1590 .num_ioctls = ARRAY_SIZE(vmw_ioctls), 1591 .master_set = vmw_master_set, 1592 .master_drop = vmw_master_drop, 1593 .open = vmw_driver_open, 1594 .postclose = vmw_postclose, 1595 1596 .dumb_create = vmw_dumb_create, 1597 .dumb_map_offset = vmw_dumb_map_offset, 1598 .dumb_destroy = vmw_dumb_destroy, 1599 1600 .prime_fd_to_handle = vmw_prime_fd_to_handle, 1601 .prime_handle_to_fd = vmw_prime_handle_to_fd, 1602 1603 .fops = &vmwgfx_driver_fops, 1604 .name = VMWGFX_DRIVER_NAME, 1605 .desc = VMWGFX_DRIVER_DESC, 1606 .date = VMWGFX_DRIVER_DATE, 1607 .major = VMWGFX_DRIVER_MAJOR, 1608 .minor = VMWGFX_DRIVER_MINOR, 1609 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL 1610 }; 1611 1612 static struct pci_driver vmw_pci_driver = { 1613 .name = VMWGFX_DRIVER_NAME, 1614 .id_table = vmw_pci_id_list, 1615 .probe = vmw_probe, 1616 .remove = vmw_remove, 1617 .driver = { 1618 .pm = &vmw_pm_ops 1619 } 1620 }; 1621 1622 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1623 { 1624 struct vmw_private *vmw; 1625 int ret; 1626 1627 ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); 1628 if (ret) 1629 goto out_error; 1630 1631 ret = pcim_enable_device(pdev); 1632 if (ret) 1633 goto out_error; 1634 1635 vmw = devm_drm_dev_alloc(&pdev->dev, &driver, 1636 struct vmw_private, drm); 1637 if (IS_ERR(vmw)) { 1638 ret = PTR_ERR(vmw); 1639 goto out_error; 1640 } 1641 1642 pci_set_drvdata(pdev, &vmw->drm); 1643 1644 ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev); 1645 if (ret) 1646 goto out_error; 1647 1648 ret = vmw_driver_load(vmw, ent->device); 1649 if (ret) 1650 goto out_release; 1651 1652 ret = drm_dev_register(&vmw->drm, 0); 1653 if (ret) 1654 goto out_unload; 1655 1656 return 0; 1657 out_unload: 1658 vmw_driver_unload(&vmw->drm); 1659 out_release: 1660 ttm_mem_global_release(&ttm_mem_glob); 1661 out_error: 1662 return ret; 1663 } 1664 1665 static int __init vmwgfx_init(void) 1666 { 1667 int ret; 1668 1669 if (drm_firmware_drivers_only()) 1670 return -EINVAL; 1671 1672 ret = pci_register_driver(&vmw_pci_driver); 1673 if (ret) 1674 DRM_ERROR("Failed initializing DRM.\n"); 1675 return ret; 1676 } 1677 1678 static void __exit vmwgfx_exit(void) 1679 { 1680 pci_unregister_driver(&vmw_pci_driver); 1681 } 1682 1683 module_init(vmwgfx_init); 1684 module_exit(vmwgfx_exit); 1685 1686 MODULE_AUTHOR("VMware Inc. and others"); 1687 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); 1688 MODULE_LICENSE("GPL and additional rights"); 1689 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." 1690 __stringify(VMWGFX_DRIVER_MINOR) "." 1691 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." 1692 "0"); 1693