1 /************************************************************************** 2 * 3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 #include <linux/module.h> 28 29 #include "drmP.h" 30 #include "vmwgfx_drv.h" 31 #include "ttm/ttm_placement.h" 32 #include "ttm/ttm_bo_driver.h" 33 #include "ttm/ttm_object.h" 34 #include "ttm/ttm_module.h" 35 36 #define VMWGFX_DRIVER_NAME "vmwgfx" 37 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 38 #define VMWGFX_CHIP_SVGAII 0 39 #define VMW_FB_RESERVATION 0 40 41 #define VMW_MIN_INITIAL_WIDTH 800 42 #define VMW_MIN_INITIAL_HEIGHT 600 43 44 45 /** 46 * Fully encoded drm commands. Might move to vmw_drm.h 47 */ 48 49 #define DRM_IOCTL_VMW_GET_PARAM \ 50 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ 51 struct drm_vmw_getparam_arg) 52 #define DRM_IOCTL_VMW_ALLOC_DMABUF \ 53 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ 54 union drm_vmw_alloc_dmabuf_arg) 55 #define DRM_IOCTL_VMW_UNREF_DMABUF \ 56 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ 57 struct drm_vmw_unref_dmabuf_arg) 58 #define DRM_IOCTL_VMW_CURSOR_BYPASS \ 59 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ 60 struct drm_vmw_cursor_bypass_arg) 61 62 #define DRM_IOCTL_VMW_CONTROL_STREAM \ 63 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ 64 struct drm_vmw_control_stream_arg) 65 #define DRM_IOCTL_VMW_CLAIM_STREAM \ 66 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ 67 struct drm_vmw_stream_arg) 68 #define DRM_IOCTL_VMW_UNREF_STREAM \ 69 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ 70 struct drm_vmw_stream_arg) 71 72 #define DRM_IOCTL_VMW_CREATE_CONTEXT \ 73 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ 74 struct drm_vmw_context_arg) 75 #define DRM_IOCTL_VMW_UNREF_CONTEXT \ 76 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ 77 struct drm_vmw_context_arg) 78 #define DRM_IOCTL_VMW_CREATE_SURFACE \ 79 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ 80 union drm_vmw_surface_create_arg) 81 #define DRM_IOCTL_VMW_UNREF_SURFACE \ 82 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ 83 struct drm_vmw_surface_arg) 84 #define DRM_IOCTL_VMW_REF_SURFACE \ 85 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ 86 union drm_vmw_surface_reference_arg) 87 #define DRM_IOCTL_VMW_EXECBUF \ 88 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ 89 struct drm_vmw_execbuf_arg) 90 #define DRM_IOCTL_VMW_GET_3D_CAP \ 91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ 92 struct drm_vmw_get_3d_cap_arg) 93 #define DRM_IOCTL_VMW_FENCE_WAIT \ 94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ 95 struct drm_vmw_fence_wait_arg) 96 #define DRM_IOCTL_VMW_FENCE_SIGNALED \ 97 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ 98 struct drm_vmw_fence_signaled_arg) 99 #define DRM_IOCTL_VMW_FENCE_UNREF \ 100 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ 101 struct drm_vmw_fence_arg) 102 #define DRM_IOCTL_VMW_FENCE_EVENT \ 103 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \ 104 struct drm_vmw_fence_event_arg) 105 #define DRM_IOCTL_VMW_PRESENT \ 106 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \ 107 struct drm_vmw_present_arg) 108 #define DRM_IOCTL_VMW_PRESENT_READBACK \ 109 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ 110 struct drm_vmw_present_readback_arg) 111 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ 112 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ 113 struct drm_vmw_update_layout_arg) 114 115 /** 116 * The core DRM version of this macro doesn't account for 117 * DRM_COMMAND_BASE. 118 */ 119 120 #define VMW_IOCTL_DEF(ioctl, func, flags) \ 121 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl} 122 123 /** 124 * Ioctl definitions. 125 */ 126 127 static struct drm_ioctl_desc vmw_ioctls[] = { 128 VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl, 129 DRM_AUTH | DRM_UNLOCKED), 130 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, 131 DRM_AUTH | DRM_UNLOCKED), 132 VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, 133 DRM_AUTH | DRM_UNLOCKED), 134 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, 135 vmw_kms_cursor_bypass_ioctl, 136 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 137 138 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, 139 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 140 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, 141 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 142 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, 143 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), 144 145 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 146 DRM_AUTH | DRM_UNLOCKED), 147 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, 148 DRM_AUTH | DRM_UNLOCKED), 149 VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl, 150 DRM_AUTH | DRM_UNLOCKED), 151 VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, 152 DRM_AUTH | DRM_UNLOCKED), 153 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, 154 DRM_AUTH | DRM_UNLOCKED), 155 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, 156 DRM_AUTH | DRM_UNLOCKED), 157 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, 158 DRM_AUTH | DRM_UNLOCKED), 159 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, 160 vmw_fence_obj_signaled_ioctl, 161 DRM_AUTH | DRM_UNLOCKED), 162 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, 163 DRM_AUTH | DRM_UNLOCKED), 164 VMW_IOCTL_DEF(VMW_FENCE_EVENT, 165 vmw_fence_event_ioctl, 166 DRM_AUTH | DRM_UNLOCKED), 167 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, 168 DRM_AUTH | DRM_UNLOCKED), 169 170 /* these allow direct access to the framebuffers mark as master only */ 171 VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl, 172 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), 173 VMW_IOCTL_DEF(VMW_PRESENT_READBACK, 174 vmw_present_readback_ioctl, 175 DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), 176 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, 177 vmw_kms_update_layout_ioctl, 178 DRM_MASTER | DRM_UNLOCKED), 179 }; 180 181 static struct pci_device_id vmw_pci_id_list[] = { 182 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, 183 {0, 0, 0} 184 }; 185 186 static int enable_fbdev; 187 188 static int vmw_probe(struct pci_dev *, const struct pci_device_id *); 189 static void vmw_master_init(struct vmw_master *); 190 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 191 void *ptr); 192 193 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 194 module_param_named(enable_fbdev, enable_fbdev, int, 0600); 195 196 static void vmw_print_capabilities(uint32_t capabilities) 197 { 198 DRM_INFO("Capabilities:\n"); 199 if (capabilities & SVGA_CAP_RECT_COPY) 200 DRM_INFO(" Rect copy.\n"); 201 if (capabilities & SVGA_CAP_CURSOR) 202 DRM_INFO(" Cursor.\n"); 203 if (capabilities & SVGA_CAP_CURSOR_BYPASS) 204 DRM_INFO(" Cursor bypass.\n"); 205 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) 206 DRM_INFO(" Cursor bypass 2.\n"); 207 if (capabilities & SVGA_CAP_8BIT_EMULATION) 208 DRM_INFO(" 8bit emulation.\n"); 209 if (capabilities & SVGA_CAP_ALPHA_CURSOR) 210 DRM_INFO(" Alpha cursor.\n"); 211 if (capabilities & SVGA_CAP_3D) 212 DRM_INFO(" 3D.\n"); 213 if (capabilities & SVGA_CAP_EXTENDED_FIFO) 214 DRM_INFO(" Extended Fifo.\n"); 215 if (capabilities & SVGA_CAP_MULTIMON) 216 DRM_INFO(" Multimon.\n"); 217 if (capabilities & SVGA_CAP_PITCHLOCK) 218 DRM_INFO(" Pitchlock.\n"); 219 if (capabilities & SVGA_CAP_IRQMASK) 220 DRM_INFO(" Irq mask.\n"); 221 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) 222 DRM_INFO(" Display Topology.\n"); 223 if (capabilities & SVGA_CAP_GMR) 224 DRM_INFO(" GMR.\n"); 225 if (capabilities & SVGA_CAP_TRACES) 226 DRM_INFO(" Traces.\n"); 227 if (capabilities & SVGA_CAP_GMR2) 228 DRM_INFO(" GMR2.\n"); 229 if (capabilities & SVGA_CAP_SCREEN_OBJECT_2) 230 DRM_INFO(" Screen Object 2.\n"); 231 } 232 233 234 /** 235 * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at 236 * the start of a buffer object. 237 * 238 * @dev_priv: The device private structure. 239 * 240 * This function will idle the buffer using an uninterruptible wait, then 241 * map the first page and initialize a pending occlusion query result structure, 242 * Finally it will unmap the buffer. 243 * 244 * TODO: Since we're only mapping a single page, we should optimize the map 245 * to use kmap_atomic / iomap_atomic. 246 */ 247 static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv) 248 { 249 struct ttm_bo_kmap_obj map; 250 volatile SVGA3dQueryResult *result; 251 bool dummy; 252 int ret; 253 struct ttm_bo_device *bdev = &dev_priv->bdev; 254 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; 255 256 ttm_bo_reserve(bo, false, false, false, 0); 257 spin_lock(&bdev->fence_lock); 258 ret = ttm_bo_wait(bo, false, false, false); 259 spin_unlock(&bdev->fence_lock); 260 if (unlikely(ret != 0)) 261 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 262 10*HZ); 263 264 ret = ttm_bo_kmap(bo, 0, 1, &map); 265 if (likely(ret == 0)) { 266 result = ttm_kmap_obj_virtual(&map, &dummy); 267 result->totalSize = sizeof(*result); 268 result->state = SVGA3D_QUERYSTATE_PENDING; 269 result->result32 = 0xff; 270 ttm_bo_kunmap(&map); 271 } else 272 DRM_ERROR("Dummy query buffer map failed.\n"); 273 ttm_bo_unreserve(bo); 274 } 275 276 277 /** 278 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result 279 * 280 * @dev_priv: A device private structure. 281 * 282 * This function creates a small buffer object that holds the query 283 * result for dummy queries emitted as query barriers. 284 * No interruptible waits are done within this function. 285 * 286 * Returns an error if bo creation fails. 287 */ 288 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) 289 { 290 return ttm_bo_create(&dev_priv->bdev, 291 PAGE_SIZE, 292 ttm_bo_type_device, 293 &vmw_vram_sys_placement, 294 0, 0, false, NULL, 295 &dev_priv->dummy_query_bo); 296 } 297 298 299 static int vmw_request_device(struct vmw_private *dev_priv) 300 { 301 int ret; 302 303 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); 304 if (unlikely(ret != 0)) { 305 DRM_ERROR("Unable to initialize FIFO.\n"); 306 return ret; 307 } 308 vmw_fence_fifo_up(dev_priv->fman); 309 ret = vmw_dummy_query_bo_create(dev_priv); 310 if (unlikely(ret != 0)) 311 goto out_no_query_bo; 312 vmw_dummy_query_bo_prepare(dev_priv); 313 314 return 0; 315 316 out_no_query_bo: 317 vmw_fence_fifo_down(dev_priv->fman); 318 vmw_fifo_release(dev_priv, &dev_priv->fifo); 319 return ret; 320 } 321 322 static void vmw_release_device(struct vmw_private *dev_priv) 323 { 324 /* 325 * Previous destructions should've released 326 * the pinned bo. 327 */ 328 329 BUG_ON(dev_priv->pinned_bo != NULL); 330 331 ttm_bo_unref(&dev_priv->dummy_query_bo); 332 vmw_fence_fifo_down(dev_priv->fman); 333 vmw_fifo_release(dev_priv, &dev_priv->fifo); 334 } 335 336 /** 337 * Increase the 3d resource refcount. 338 * If the count was prevously zero, initialize the fifo, switching to svga 339 * mode. Note that the master holds a ref as well, and may request an 340 * explicit switch to svga mode if fb is not running, using @unhide_svga. 341 */ 342 int vmw_3d_resource_inc(struct vmw_private *dev_priv, 343 bool unhide_svga) 344 { 345 int ret = 0; 346 347 mutex_lock(&dev_priv->release_mutex); 348 if (unlikely(dev_priv->num_3d_resources++ == 0)) { 349 ret = vmw_request_device(dev_priv); 350 if (unlikely(ret != 0)) 351 --dev_priv->num_3d_resources; 352 } else if (unhide_svga) { 353 mutex_lock(&dev_priv->hw_mutex); 354 vmw_write(dev_priv, SVGA_REG_ENABLE, 355 vmw_read(dev_priv, SVGA_REG_ENABLE) & 356 ~SVGA_REG_ENABLE_HIDE); 357 mutex_unlock(&dev_priv->hw_mutex); 358 } 359 360 mutex_unlock(&dev_priv->release_mutex); 361 return ret; 362 } 363 364 /** 365 * Decrease the 3d resource refcount. 366 * If the count reaches zero, disable the fifo, switching to vga mode. 367 * Note that the master holds a refcount as well, and may request an 368 * explicit switch to vga mode when it releases its refcount to account 369 * for the situation of an X server vt switch to VGA with 3d resources 370 * active. 371 */ 372 void vmw_3d_resource_dec(struct vmw_private *dev_priv, 373 bool hide_svga) 374 { 375 int32_t n3d; 376 377 mutex_lock(&dev_priv->release_mutex); 378 if (unlikely(--dev_priv->num_3d_resources == 0)) 379 vmw_release_device(dev_priv); 380 else if (hide_svga) { 381 mutex_lock(&dev_priv->hw_mutex); 382 vmw_write(dev_priv, SVGA_REG_ENABLE, 383 vmw_read(dev_priv, SVGA_REG_ENABLE) | 384 SVGA_REG_ENABLE_HIDE); 385 mutex_unlock(&dev_priv->hw_mutex); 386 } 387 388 n3d = (int32_t) dev_priv->num_3d_resources; 389 mutex_unlock(&dev_priv->release_mutex); 390 391 BUG_ON(n3d < 0); 392 } 393 394 /** 395 * Sets the initial_[width|height] fields on the given vmw_private. 396 * 397 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then 398 * clamping the value to fb_max_[width|height] fields and the 399 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 400 * If the values appear to be invalid, set them to 401 * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. 402 */ 403 static void vmw_get_initial_size(struct vmw_private *dev_priv) 404 { 405 uint32_t width; 406 uint32_t height; 407 408 width = vmw_read(dev_priv, SVGA_REG_WIDTH); 409 height = vmw_read(dev_priv, SVGA_REG_HEIGHT); 410 411 width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH); 412 height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT); 413 414 if (width > dev_priv->fb_max_width || 415 height > dev_priv->fb_max_height) { 416 417 /* 418 * This is a host error and shouldn't occur. 419 */ 420 421 width = VMW_MIN_INITIAL_WIDTH; 422 height = VMW_MIN_INITIAL_HEIGHT; 423 } 424 425 dev_priv->initial_width = width; 426 dev_priv->initial_height = height; 427 } 428 429 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 430 { 431 struct vmw_private *dev_priv; 432 int ret; 433 uint32_t svga_id; 434 435 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 436 if (unlikely(dev_priv == NULL)) { 437 DRM_ERROR("Failed allocating a device private struct.\n"); 438 return -ENOMEM; 439 } 440 memset(dev_priv, 0, sizeof(*dev_priv)); 441 442 pci_set_master(dev->pdev); 443 444 dev_priv->dev = dev; 445 dev_priv->vmw_chipset = chipset; 446 dev_priv->last_read_seqno = (uint32_t) -100; 447 mutex_init(&dev_priv->hw_mutex); 448 mutex_init(&dev_priv->cmdbuf_mutex); 449 mutex_init(&dev_priv->release_mutex); 450 rwlock_init(&dev_priv->resource_lock); 451 idr_init(&dev_priv->context_idr); 452 idr_init(&dev_priv->surface_idr); 453 idr_init(&dev_priv->stream_idr); 454 mutex_init(&dev_priv->init_mutex); 455 init_waitqueue_head(&dev_priv->fence_queue); 456 init_waitqueue_head(&dev_priv->fifo_queue); 457 dev_priv->fence_queue_waiters = 0; 458 atomic_set(&dev_priv->fifo_queue_waiters, 0); 459 INIT_LIST_HEAD(&dev_priv->surface_lru); 460 dev_priv->used_memory_size = 0; 461 462 dev_priv->io_start = pci_resource_start(dev->pdev, 0); 463 dev_priv->vram_start = pci_resource_start(dev->pdev, 1); 464 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); 465 466 dev_priv->enable_fb = enable_fbdev; 467 468 mutex_lock(&dev_priv->hw_mutex); 469 470 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 471 svga_id = vmw_read(dev_priv, SVGA_REG_ID); 472 if (svga_id != SVGA_ID_2) { 473 ret = -ENOSYS; 474 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); 475 mutex_unlock(&dev_priv->hw_mutex); 476 goto out_err0; 477 } 478 479 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); 480 481 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); 482 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); 483 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); 484 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); 485 486 vmw_get_initial_size(dev_priv); 487 488 if (dev_priv->capabilities & SVGA_CAP_GMR) { 489 dev_priv->max_gmr_descriptors = 490 vmw_read(dev_priv, 491 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); 492 dev_priv->max_gmr_ids = 493 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); 494 } 495 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 496 dev_priv->max_gmr_pages = 497 vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); 498 dev_priv->memory_size = 499 vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); 500 dev_priv->memory_size -= dev_priv->vram_size; 501 } else { 502 /* 503 * An arbitrary limit of 512MiB on surface 504 * memory. But all HWV8 hardware supports GMR2. 505 */ 506 dev_priv->memory_size = 512*1024*1024; 507 } 508 509 mutex_unlock(&dev_priv->hw_mutex); 510 511 vmw_print_capabilities(dev_priv->capabilities); 512 513 if (dev_priv->capabilities & SVGA_CAP_GMR) { 514 DRM_INFO("Max GMR ids is %u\n", 515 (unsigned)dev_priv->max_gmr_ids); 516 DRM_INFO("Max GMR descriptors is %u\n", 517 (unsigned)dev_priv->max_gmr_descriptors); 518 } 519 if (dev_priv->capabilities & SVGA_CAP_GMR2) { 520 DRM_INFO("Max number of GMR pages is %u\n", 521 (unsigned)dev_priv->max_gmr_pages); 522 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", 523 (unsigned)dev_priv->memory_size / 1024); 524 } 525 DRM_INFO("VRAM at 0x%08x size is %u kiB\n", 526 dev_priv->vram_start, dev_priv->vram_size / 1024); 527 DRM_INFO("MMIO at 0x%08x size is %u kiB\n", 528 dev_priv->mmio_start, dev_priv->mmio_size / 1024); 529 530 ret = vmw_ttm_global_init(dev_priv); 531 if (unlikely(ret != 0)) 532 goto out_err0; 533 534 535 vmw_master_init(&dev_priv->fbdev_master); 536 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 537 dev_priv->active_master = &dev_priv->fbdev_master; 538 539 540 ret = ttm_bo_device_init(&dev_priv->bdev, 541 dev_priv->bo_global_ref.ref.object, 542 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, 543 false); 544 if (unlikely(ret != 0)) { 545 DRM_ERROR("Failed initializing TTM buffer object driver.\n"); 546 goto out_err1; 547 } 548 549 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, 550 (dev_priv->vram_size >> PAGE_SHIFT)); 551 if (unlikely(ret != 0)) { 552 DRM_ERROR("Failed initializing memory manager for VRAM.\n"); 553 goto out_err2; 554 } 555 556 dev_priv->has_gmr = true; 557 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, 558 dev_priv->max_gmr_ids) != 0) { 559 DRM_INFO("No GMR memory available. " 560 "Graphics memory resources are very limited.\n"); 561 dev_priv->has_gmr = false; 562 } 563 564 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, 565 dev_priv->mmio_size, DRM_MTRR_WC); 566 567 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, 568 dev_priv->mmio_size); 569 570 if (unlikely(dev_priv->mmio_virt == NULL)) { 571 ret = -ENOMEM; 572 DRM_ERROR("Failed mapping MMIO.\n"); 573 goto out_err3; 574 } 575 576 /* Need mmio memory to check for fifo pitchlock cap. */ 577 if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && 578 !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && 579 !vmw_fifo_have_pitchlock(dev_priv)) { 580 ret = -ENOSYS; 581 DRM_ERROR("Hardware has no pitchlock\n"); 582 goto out_err4; 583 } 584 585 dev_priv->tdev = ttm_object_device_init 586 (dev_priv->mem_global_ref.object, 12); 587 588 if (unlikely(dev_priv->tdev == NULL)) { 589 DRM_ERROR("Unable to initialize TTM object management.\n"); 590 ret = -ENOMEM; 591 goto out_err4; 592 } 593 594 dev->dev_private = dev_priv; 595 596 ret = pci_request_regions(dev->pdev, "vmwgfx probe"); 597 dev_priv->stealth = (ret != 0); 598 if (dev_priv->stealth) { 599 /** 600 * Request at least the mmio PCI resource. 601 */ 602 603 DRM_INFO("It appears like vesafb is loaded. " 604 "Ignore above error if any.\n"); 605 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); 606 if (unlikely(ret != 0)) { 607 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); 608 goto out_no_device; 609 } 610 } 611 612 dev_priv->fman = vmw_fence_manager_init(dev_priv); 613 if (unlikely(dev_priv->fman == NULL)) 614 goto out_no_fman; 615 616 /* Need to start the fifo to check if we can do screen objects */ 617 ret = vmw_3d_resource_inc(dev_priv, true); 618 if (unlikely(ret != 0)) 619 goto out_no_fifo; 620 vmw_kms_save_vga(dev_priv); 621 622 /* Start kms and overlay systems, needs fifo. */ 623 ret = vmw_kms_init(dev_priv); 624 if (unlikely(ret != 0)) 625 goto out_no_kms; 626 vmw_overlay_init(dev_priv); 627 628 /* 3D Depends on Screen Objects being used. */ 629 DRM_INFO("Detected %sdevice 3D availability.\n", 630 vmw_fifo_have_3d(dev_priv) ? 631 "" : "no "); 632 633 /* We might be done with the fifo now */ 634 if (dev_priv->enable_fb) { 635 vmw_fb_init(dev_priv); 636 } else { 637 vmw_kms_restore_vga(dev_priv); 638 vmw_3d_resource_dec(dev_priv, true); 639 } 640 641 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { 642 ret = drm_irq_install(dev); 643 if (unlikely(ret != 0)) { 644 DRM_ERROR("Failed installing irq: %d\n", ret); 645 goto out_no_irq; 646 } 647 } 648 649 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 650 register_pm_notifier(&dev_priv->pm_nb); 651 652 return 0; 653 654 out_no_irq: 655 if (dev_priv->enable_fb) 656 vmw_fb_close(dev_priv); 657 vmw_overlay_close(dev_priv); 658 vmw_kms_close(dev_priv); 659 out_no_kms: 660 /* We still have a 3D resource reference held */ 661 if (dev_priv->enable_fb) { 662 vmw_kms_restore_vga(dev_priv); 663 vmw_3d_resource_dec(dev_priv, false); 664 } 665 out_no_fifo: 666 vmw_fence_manager_takedown(dev_priv->fman); 667 out_no_fman: 668 if (dev_priv->stealth) 669 pci_release_region(dev->pdev, 2); 670 else 671 pci_release_regions(dev->pdev); 672 out_no_device: 673 ttm_object_device_release(&dev_priv->tdev); 674 out_err4: 675 iounmap(dev_priv->mmio_virt); 676 out_err3: 677 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, 678 dev_priv->mmio_size, DRM_MTRR_WC); 679 if (dev_priv->has_gmr) 680 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 681 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 682 out_err2: 683 (void)ttm_bo_device_release(&dev_priv->bdev); 684 out_err1: 685 vmw_ttm_global_release(dev_priv); 686 out_err0: 687 idr_destroy(&dev_priv->surface_idr); 688 idr_destroy(&dev_priv->context_idr); 689 idr_destroy(&dev_priv->stream_idr); 690 kfree(dev_priv); 691 return ret; 692 } 693 694 static int vmw_driver_unload(struct drm_device *dev) 695 { 696 struct vmw_private *dev_priv = vmw_priv(dev); 697 698 unregister_pm_notifier(&dev_priv->pm_nb); 699 700 if (dev_priv->ctx.cmd_bounce) 701 vfree(dev_priv->ctx.cmd_bounce); 702 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) 703 drm_irq_uninstall(dev_priv->dev); 704 if (dev_priv->enable_fb) { 705 vmw_fb_close(dev_priv); 706 vmw_kms_restore_vga(dev_priv); 707 vmw_3d_resource_dec(dev_priv, false); 708 } 709 vmw_kms_close(dev_priv); 710 vmw_overlay_close(dev_priv); 711 vmw_fence_manager_takedown(dev_priv->fman); 712 if (dev_priv->stealth) 713 pci_release_region(dev->pdev, 2); 714 else 715 pci_release_regions(dev->pdev); 716 717 ttm_object_device_release(&dev_priv->tdev); 718 iounmap(dev_priv->mmio_virt); 719 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, 720 dev_priv->mmio_size, DRM_MTRR_WC); 721 if (dev_priv->has_gmr) 722 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); 723 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 724 (void)ttm_bo_device_release(&dev_priv->bdev); 725 vmw_ttm_global_release(dev_priv); 726 idr_destroy(&dev_priv->surface_idr); 727 idr_destroy(&dev_priv->context_idr); 728 idr_destroy(&dev_priv->stream_idr); 729 730 kfree(dev_priv); 731 732 return 0; 733 } 734 735 static void vmw_preclose(struct drm_device *dev, 736 struct drm_file *file_priv) 737 { 738 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 739 struct vmw_private *dev_priv = vmw_priv(dev); 740 741 vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events); 742 } 743 744 static void vmw_postclose(struct drm_device *dev, 745 struct drm_file *file_priv) 746 { 747 struct vmw_fpriv *vmw_fp; 748 749 vmw_fp = vmw_fpriv(file_priv); 750 ttm_object_file_release(&vmw_fp->tfile); 751 if (vmw_fp->locked_master) 752 drm_master_put(&vmw_fp->locked_master); 753 kfree(vmw_fp); 754 } 755 756 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) 757 { 758 struct vmw_private *dev_priv = vmw_priv(dev); 759 struct vmw_fpriv *vmw_fp; 760 int ret = -ENOMEM; 761 762 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); 763 if (unlikely(vmw_fp == NULL)) 764 return ret; 765 766 INIT_LIST_HEAD(&vmw_fp->fence_events); 767 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); 768 if (unlikely(vmw_fp->tfile == NULL)) 769 goto out_no_tfile; 770 771 file_priv->driver_priv = vmw_fp; 772 dev_priv->bdev.dev_mapping = dev->dev_mapping; 773 774 return 0; 775 776 out_no_tfile: 777 kfree(vmw_fp); 778 return ret; 779 } 780 781 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, 782 unsigned long arg) 783 { 784 struct drm_file *file_priv = filp->private_data; 785 struct drm_device *dev = file_priv->minor->dev; 786 unsigned int nr = DRM_IOCTL_NR(cmd); 787 788 /* 789 * Do extra checking on driver private ioctls. 790 */ 791 792 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) 793 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { 794 struct drm_ioctl_desc *ioctl = 795 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 796 797 if (unlikely(ioctl->cmd_drv != cmd)) { 798 DRM_ERROR("Invalid command format, ioctl %d\n", 799 nr - DRM_COMMAND_BASE); 800 return -EINVAL; 801 } 802 } 803 804 return drm_ioctl(filp, cmd, arg); 805 } 806 807 static int vmw_firstopen(struct drm_device *dev) 808 { 809 struct vmw_private *dev_priv = vmw_priv(dev); 810 dev_priv->is_opened = true; 811 812 return 0; 813 } 814 815 static void vmw_lastclose(struct drm_device *dev) 816 { 817 struct vmw_private *dev_priv = vmw_priv(dev); 818 struct drm_crtc *crtc; 819 struct drm_mode_set set; 820 int ret; 821 822 /** 823 * Do nothing on the lastclose call from drm_unload. 824 */ 825 826 if (!dev_priv->is_opened) 827 return; 828 829 dev_priv->is_opened = false; 830 set.x = 0; 831 set.y = 0; 832 set.fb = NULL; 833 set.mode = NULL; 834 set.connectors = NULL; 835 set.num_connectors = 0; 836 837 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 838 set.crtc = crtc; 839 ret = crtc->funcs->set_config(&set); 840 WARN_ON(ret != 0); 841 } 842 843 } 844 845 static void vmw_master_init(struct vmw_master *vmaster) 846 { 847 ttm_lock_init(&vmaster->lock); 848 INIT_LIST_HEAD(&vmaster->fb_surf); 849 mutex_init(&vmaster->fb_surf_mutex); 850 } 851 852 static int vmw_master_create(struct drm_device *dev, 853 struct drm_master *master) 854 { 855 struct vmw_master *vmaster; 856 857 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); 858 if (unlikely(vmaster == NULL)) 859 return -ENOMEM; 860 861 vmw_master_init(vmaster); 862 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 863 master->driver_priv = vmaster; 864 865 return 0; 866 } 867 868 static void vmw_master_destroy(struct drm_device *dev, 869 struct drm_master *master) 870 { 871 struct vmw_master *vmaster = vmw_master(master); 872 873 master->driver_priv = NULL; 874 kfree(vmaster); 875 } 876 877 878 static int vmw_master_set(struct drm_device *dev, 879 struct drm_file *file_priv, 880 bool from_open) 881 { 882 struct vmw_private *dev_priv = vmw_priv(dev); 883 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 884 struct vmw_master *active = dev_priv->active_master; 885 struct vmw_master *vmaster = vmw_master(file_priv->master); 886 int ret = 0; 887 888 if (!dev_priv->enable_fb) { 889 ret = vmw_3d_resource_inc(dev_priv, true); 890 if (unlikely(ret != 0)) 891 return ret; 892 vmw_kms_save_vga(dev_priv); 893 mutex_lock(&dev_priv->hw_mutex); 894 vmw_write(dev_priv, SVGA_REG_TRACES, 0); 895 mutex_unlock(&dev_priv->hw_mutex); 896 } 897 898 if (active) { 899 BUG_ON(active != &dev_priv->fbdev_master); 900 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); 901 if (unlikely(ret != 0)) 902 goto out_no_active_lock; 903 904 ttm_lock_set_kill(&active->lock, true, SIGTERM); 905 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); 906 if (unlikely(ret != 0)) { 907 DRM_ERROR("Unable to clean VRAM on " 908 "master drop.\n"); 909 } 910 911 dev_priv->active_master = NULL; 912 } 913 914 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); 915 if (!from_open) { 916 ttm_vt_unlock(&vmaster->lock); 917 BUG_ON(vmw_fp->locked_master != file_priv->master); 918 drm_master_put(&vmw_fp->locked_master); 919 } 920 921 dev_priv->active_master = vmaster; 922 923 return 0; 924 925 out_no_active_lock: 926 if (!dev_priv->enable_fb) { 927 mutex_lock(&dev_priv->hw_mutex); 928 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 929 mutex_unlock(&dev_priv->hw_mutex); 930 vmw_kms_restore_vga(dev_priv); 931 vmw_3d_resource_dec(dev_priv, true); 932 } 933 return ret; 934 } 935 936 static void vmw_master_drop(struct drm_device *dev, 937 struct drm_file *file_priv, 938 bool from_release) 939 { 940 struct vmw_private *dev_priv = vmw_priv(dev); 941 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 942 struct vmw_master *vmaster = vmw_master(file_priv->master); 943 int ret; 944 945 /** 946 * Make sure the master doesn't disappear while we have 947 * it locked. 948 */ 949 950 vmw_fp->locked_master = drm_master_get(file_priv->master); 951 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 952 vmw_execbuf_release_pinned_bo(dev_priv, false, 0); 953 954 if (unlikely((ret != 0))) { 955 DRM_ERROR("Unable to lock TTM at VT switch.\n"); 956 drm_master_put(&vmw_fp->locked_master); 957 } 958 959 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); 960 961 if (!dev_priv->enable_fb) { 962 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); 963 if (unlikely(ret != 0)) 964 DRM_ERROR("Unable to clean VRAM on master drop.\n"); 965 mutex_lock(&dev_priv->hw_mutex); 966 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 967 mutex_unlock(&dev_priv->hw_mutex); 968 vmw_kms_restore_vga(dev_priv); 969 vmw_3d_resource_dec(dev_priv, true); 970 } 971 972 dev_priv->active_master = &dev_priv->fbdev_master; 973 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 974 ttm_vt_unlock(&dev_priv->fbdev_master.lock); 975 976 if (dev_priv->enable_fb) 977 vmw_fb_on(dev_priv); 978 } 979 980 981 static void vmw_remove(struct pci_dev *pdev) 982 { 983 struct drm_device *dev = pci_get_drvdata(pdev); 984 985 drm_put_dev(dev); 986 } 987 988 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, 989 void *ptr) 990 { 991 struct vmw_private *dev_priv = 992 container_of(nb, struct vmw_private, pm_nb); 993 struct vmw_master *vmaster = dev_priv->active_master; 994 995 switch (val) { 996 case PM_HIBERNATION_PREPARE: 997 case PM_SUSPEND_PREPARE: 998 ttm_suspend_lock(&vmaster->lock); 999 1000 /** 1001 * This empties VRAM and unbinds all GMR bindings. 1002 * Buffer contents is moved to swappable memory. 1003 */ 1004 vmw_execbuf_release_pinned_bo(dev_priv, false, 0); 1005 ttm_bo_swapout_all(&dev_priv->bdev); 1006 1007 break; 1008 case PM_POST_HIBERNATION: 1009 case PM_POST_SUSPEND: 1010 case PM_POST_RESTORE: 1011 ttm_suspend_unlock(&vmaster->lock); 1012 1013 break; 1014 case PM_RESTORE_PREPARE: 1015 break; 1016 default: 1017 break; 1018 } 1019 return 0; 1020 } 1021 1022 /** 1023 * These might not be needed with the virtual SVGA device. 1024 */ 1025 1026 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1027 { 1028 struct drm_device *dev = pci_get_drvdata(pdev); 1029 struct vmw_private *dev_priv = vmw_priv(dev); 1030 1031 if (dev_priv->num_3d_resources != 0) { 1032 DRM_INFO("Can't suspend or hibernate " 1033 "while 3D resources are active.\n"); 1034 return -EBUSY; 1035 } 1036 1037 pci_save_state(pdev); 1038 pci_disable_device(pdev); 1039 pci_set_power_state(pdev, PCI_D3hot); 1040 return 0; 1041 } 1042 1043 static int vmw_pci_resume(struct pci_dev *pdev) 1044 { 1045 pci_set_power_state(pdev, PCI_D0); 1046 pci_restore_state(pdev); 1047 return pci_enable_device(pdev); 1048 } 1049 1050 static int vmw_pm_suspend(struct device *kdev) 1051 { 1052 struct pci_dev *pdev = to_pci_dev(kdev); 1053 struct pm_message dummy; 1054 1055 dummy.event = 0; 1056 1057 return vmw_pci_suspend(pdev, dummy); 1058 } 1059 1060 static int vmw_pm_resume(struct device *kdev) 1061 { 1062 struct pci_dev *pdev = to_pci_dev(kdev); 1063 1064 return vmw_pci_resume(pdev); 1065 } 1066 1067 static int vmw_pm_prepare(struct device *kdev) 1068 { 1069 struct pci_dev *pdev = to_pci_dev(kdev); 1070 struct drm_device *dev = pci_get_drvdata(pdev); 1071 struct vmw_private *dev_priv = vmw_priv(dev); 1072 1073 /** 1074 * Release 3d reference held by fbdev and potentially 1075 * stop fifo. 1076 */ 1077 dev_priv->suspended = true; 1078 if (dev_priv->enable_fb) 1079 vmw_3d_resource_dec(dev_priv, true); 1080 1081 if (dev_priv->num_3d_resources != 0) { 1082 1083 DRM_INFO("Can't suspend or hibernate " 1084 "while 3D resources are active.\n"); 1085 1086 if (dev_priv->enable_fb) 1087 vmw_3d_resource_inc(dev_priv, true); 1088 dev_priv->suspended = false; 1089 return -EBUSY; 1090 } 1091 1092 return 0; 1093 } 1094 1095 static void vmw_pm_complete(struct device *kdev) 1096 { 1097 struct pci_dev *pdev = to_pci_dev(kdev); 1098 struct drm_device *dev = pci_get_drvdata(pdev); 1099 struct vmw_private *dev_priv = vmw_priv(dev); 1100 1101 /** 1102 * Reclaim 3d reference held by fbdev and potentially 1103 * start fifo. 1104 */ 1105 if (dev_priv->enable_fb) 1106 vmw_3d_resource_inc(dev_priv, false); 1107 1108 dev_priv->suspended = false; 1109 } 1110 1111 static const struct dev_pm_ops vmw_pm_ops = { 1112 .prepare = vmw_pm_prepare, 1113 .complete = vmw_pm_complete, 1114 .suspend = vmw_pm_suspend, 1115 .resume = vmw_pm_resume, 1116 }; 1117 1118 static const struct file_operations vmwgfx_driver_fops = { 1119 .owner = THIS_MODULE, 1120 .open = drm_open, 1121 .release = drm_release, 1122 .unlocked_ioctl = vmw_unlocked_ioctl, 1123 .mmap = vmw_mmap, 1124 .poll = vmw_fops_poll, 1125 .read = vmw_fops_read, 1126 .fasync = drm_fasync, 1127 #if defined(CONFIG_COMPAT) 1128 .compat_ioctl = drm_compat_ioctl, 1129 #endif 1130 .llseek = noop_llseek, 1131 }; 1132 1133 static struct drm_driver driver = { 1134 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 1135 DRIVER_MODESET, 1136 .load = vmw_driver_load, 1137 .unload = vmw_driver_unload, 1138 .firstopen = vmw_firstopen, 1139 .lastclose = vmw_lastclose, 1140 .irq_preinstall = vmw_irq_preinstall, 1141 .irq_postinstall = vmw_irq_postinstall, 1142 .irq_uninstall = vmw_irq_uninstall, 1143 .irq_handler = vmw_irq_handler, 1144 .get_vblank_counter = vmw_get_vblank_counter, 1145 .enable_vblank = vmw_enable_vblank, 1146 .disable_vblank = vmw_disable_vblank, 1147 .ioctls = vmw_ioctls, 1148 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), 1149 .dma_quiescent = NULL, /*vmw_dma_quiescent, */ 1150 .master_create = vmw_master_create, 1151 .master_destroy = vmw_master_destroy, 1152 .master_set = vmw_master_set, 1153 .master_drop = vmw_master_drop, 1154 .open = vmw_driver_open, 1155 .preclose = vmw_preclose, 1156 .postclose = vmw_postclose, 1157 .fops = &vmwgfx_driver_fops, 1158 .name = VMWGFX_DRIVER_NAME, 1159 .desc = VMWGFX_DRIVER_DESC, 1160 .date = VMWGFX_DRIVER_DATE, 1161 .major = VMWGFX_DRIVER_MAJOR, 1162 .minor = VMWGFX_DRIVER_MINOR, 1163 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL 1164 }; 1165 1166 static struct pci_driver vmw_pci_driver = { 1167 .name = VMWGFX_DRIVER_NAME, 1168 .id_table = vmw_pci_id_list, 1169 .probe = vmw_probe, 1170 .remove = vmw_remove, 1171 .driver = { 1172 .pm = &vmw_pm_ops 1173 } 1174 }; 1175 1176 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1177 { 1178 return drm_get_pci_dev(pdev, ent, &driver); 1179 } 1180 1181 static int __init vmwgfx_init(void) 1182 { 1183 int ret; 1184 ret = drm_pci_init(&driver, &vmw_pci_driver); 1185 if (ret) 1186 DRM_ERROR("Failed initializing DRM.\n"); 1187 return ret; 1188 } 1189 1190 static void __exit vmwgfx_exit(void) 1191 { 1192 drm_pci_exit(&driver, &vmw_pci_driver); 1193 } 1194 1195 module_init(vmwgfx_init); 1196 module_exit(vmwgfx_exit); 1197 1198 MODULE_AUTHOR("VMware Inc. and others"); 1199 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); 1200 MODULE_LICENSE("GPL and additional rights"); 1201 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." 1202 __stringify(VMWGFX_DRIVER_MINOR) "." 1203 __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." 1204 "0"); 1205