1 /**************************************************************************
2  *
3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/module.h>
28 #include <linux/console.h>
29 
30 #include <drm/drmP.h>
31 #include "vmwgfx_drv.h"
32 #include "vmwgfx_binding.h"
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_object.h>
36 #include <drm/ttm/ttm_module.h>
37 #include <linux/dma_remapping.h>
38 
39 #define VMWGFX_DRIVER_NAME "vmwgfx"
40 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
41 #define VMWGFX_CHIP_SVGAII 0
42 #define VMW_FB_RESERVATION 0
43 
44 #define VMW_MIN_INITIAL_WIDTH 800
45 #define VMW_MIN_INITIAL_HEIGHT 600
46 
47 
48 /**
49  * Fully encoded drm commands. Might move to vmw_drm.h
50  */
51 
52 #define DRM_IOCTL_VMW_GET_PARAM					\
53 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
54 		 struct drm_vmw_getparam_arg)
55 #define DRM_IOCTL_VMW_ALLOC_DMABUF				\
56 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
57 		union drm_vmw_alloc_dmabuf_arg)
58 #define DRM_IOCTL_VMW_UNREF_DMABUF				\
59 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
60 		struct drm_vmw_unref_dmabuf_arg)
61 #define DRM_IOCTL_VMW_CURSOR_BYPASS				\
62 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
63 		 struct drm_vmw_cursor_bypass_arg)
64 
65 #define DRM_IOCTL_VMW_CONTROL_STREAM				\
66 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
67 		 struct drm_vmw_control_stream_arg)
68 #define DRM_IOCTL_VMW_CLAIM_STREAM				\
69 	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
70 		 struct drm_vmw_stream_arg)
71 #define DRM_IOCTL_VMW_UNREF_STREAM				\
72 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
73 		 struct drm_vmw_stream_arg)
74 
75 #define DRM_IOCTL_VMW_CREATE_CONTEXT				\
76 	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
77 		struct drm_vmw_context_arg)
78 #define DRM_IOCTL_VMW_UNREF_CONTEXT				\
79 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
80 		struct drm_vmw_context_arg)
81 #define DRM_IOCTL_VMW_CREATE_SURFACE				\
82 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
83 		 union drm_vmw_surface_create_arg)
84 #define DRM_IOCTL_VMW_UNREF_SURFACE				\
85 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
86 		 struct drm_vmw_surface_arg)
87 #define DRM_IOCTL_VMW_REF_SURFACE				\
88 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
89 		 union drm_vmw_surface_reference_arg)
90 #define DRM_IOCTL_VMW_EXECBUF					\
91 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
92 		struct drm_vmw_execbuf_arg)
93 #define DRM_IOCTL_VMW_GET_3D_CAP				\
94 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
95 		 struct drm_vmw_get_3d_cap_arg)
96 #define DRM_IOCTL_VMW_FENCE_WAIT				\
97 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
98 		 struct drm_vmw_fence_wait_arg)
99 #define DRM_IOCTL_VMW_FENCE_SIGNALED				\
100 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
101 		 struct drm_vmw_fence_signaled_arg)
102 #define DRM_IOCTL_VMW_FENCE_UNREF				\
103 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
104 		 struct drm_vmw_fence_arg)
105 #define DRM_IOCTL_VMW_FENCE_EVENT				\
106 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
107 		 struct drm_vmw_fence_event_arg)
108 #define DRM_IOCTL_VMW_PRESENT					\
109 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
110 		 struct drm_vmw_present_arg)
111 #define DRM_IOCTL_VMW_PRESENT_READBACK				\
112 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
113 		 struct drm_vmw_present_readback_arg)
114 #define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
115 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
116 		 struct drm_vmw_update_layout_arg)
117 #define DRM_IOCTL_VMW_CREATE_SHADER				\
118 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
119 		 struct drm_vmw_shader_create_arg)
120 #define DRM_IOCTL_VMW_UNREF_SHADER				\
121 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
122 		 struct drm_vmw_shader_arg)
123 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
124 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
125 		 union drm_vmw_gb_surface_create_arg)
126 #define DRM_IOCTL_VMW_GB_SURFACE_REF				\
127 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
128 		 union drm_vmw_gb_surface_reference_arg)
129 #define DRM_IOCTL_VMW_SYNCCPU					\
130 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
131 		 struct drm_vmw_synccpu_arg)
132 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
133 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
134 		struct drm_vmw_context_arg)
135 
136 /**
137  * The core DRM version of this macro doesn't account for
138  * DRM_COMMAND_BASE.
139  */
140 
141 #define VMW_IOCTL_DEF(ioctl, func, flags) \
142   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
143 
144 /**
145  * Ioctl definitions.
146  */
147 
148 static const struct drm_ioctl_desc vmw_ioctls[] = {
149 	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
150 		      DRM_AUTH | DRM_RENDER_ALLOW),
151 	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
152 		      DRM_AUTH | DRM_RENDER_ALLOW),
153 	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
154 		      DRM_RENDER_ALLOW),
155 	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
156 		      vmw_kms_cursor_bypass_ioctl,
157 		      DRM_MASTER | DRM_CONTROL_ALLOW),
158 
159 	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
160 		      DRM_MASTER | DRM_CONTROL_ALLOW),
161 	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
162 		      DRM_MASTER | DRM_CONTROL_ALLOW),
163 	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
164 		      DRM_MASTER | DRM_CONTROL_ALLOW),
165 
166 	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
167 		      DRM_AUTH | DRM_RENDER_ALLOW),
168 	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
169 		      DRM_RENDER_ALLOW),
170 	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
171 		      DRM_AUTH | DRM_RENDER_ALLOW),
172 	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
173 		      DRM_RENDER_ALLOW),
174 	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
175 		      DRM_AUTH | DRM_RENDER_ALLOW),
176 	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
177 		      DRM_RENDER_ALLOW),
178 	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
179 		      DRM_RENDER_ALLOW),
180 	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
181 		      vmw_fence_obj_signaled_ioctl,
182 		      DRM_RENDER_ALLOW),
183 	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
184 		      DRM_RENDER_ALLOW),
185 	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
186 		      DRM_AUTH | DRM_RENDER_ALLOW),
187 	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
188 		      DRM_AUTH | DRM_RENDER_ALLOW),
189 
190 	/* these allow direct access to the framebuffers mark as master only */
191 	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
192 		      DRM_MASTER | DRM_AUTH),
193 	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
194 		      vmw_present_readback_ioctl,
195 		      DRM_MASTER | DRM_AUTH),
196 	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
197 		      vmw_kms_update_layout_ioctl,
198 		      DRM_MASTER),
199 	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
200 		      vmw_shader_define_ioctl,
201 		      DRM_AUTH | DRM_RENDER_ALLOW),
202 	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
203 		      vmw_shader_destroy_ioctl,
204 		      DRM_RENDER_ALLOW),
205 	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
206 		      vmw_gb_surface_define_ioctl,
207 		      DRM_AUTH | DRM_RENDER_ALLOW),
208 	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
209 		      vmw_gb_surface_reference_ioctl,
210 		      DRM_AUTH | DRM_RENDER_ALLOW),
211 	VMW_IOCTL_DEF(VMW_SYNCCPU,
212 		      vmw_user_dmabuf_synccpu_ioctl,
213 		      DRM_RENDER_ALLOW),
214 	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
215 		      vmw_extended_context_define_ioctl,
216 		      DRM_AUTH | DRM_RENDER_ALLOW),
217 };
218 
219 static struct pci_device_id vmw_pci_id_list[] = {
220 	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
221 	{0, 0, 0}
222 };
223 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
224 
225 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
226 static int vmw_force_iommu;
227 static int vmw_restrict_iommu;
228 static int vmw_force_coherent;
229 static int vmw_restrict_dma_mask;
230 
231 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
232 static void vmw_master_init(struct vmw_master *);
233 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
234 			      void *ptr);
235 
236 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
237 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
238 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
239 module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
240 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
241 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
242 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
243 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
244 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
245 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
246 
247 
248 static void vmw_print_capabilities(uint32_t capabilities)
249 {
250 	DRM_INFO("Capabilities:\n");
251 	if (capabilities & SVGA_CAP_RECT_COPY)
252 		DRM_INFO("  Rect copy.\n");
253 	if (capabilities & SVGA_CAP_CURSOR)
254 		DRM_INFO("  Cursor.\n");
255 	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
256 		DRM_INFO("  Cursor bypass.\n");
257 	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
258 		DRM_INFO("  Cursor bypass 2.\n");
259 	if (capabilities & SVGA_CAP_8BIT_EMULATION)
260 		DRM_INFO("  8bit emulation.\n");
261 	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
262 		DRM_INFO("  Alpha cursor.\n");
263 	if (capabilities & SVGA_CAP_3D)
264 		DRM_INFO("  3D.\n");
265 	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
266 		DRM_INFO("  Extended Fifo.\n");
267 	if (capabilities & SVGA_CAP_MULTIMON)
268 		DRM_INFO("  Multimon.\n");
269 	if (capabilities & SVGA_CAP_PITCHLOCK)
270 		DRM_INFO("  Pitchlock.\n");
271 	if (capabilities & SVGA_CAP_IRQMASK)
272 		DRM_INFO("  Irq mask.\n");
273 	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
274 		DRM_INFO("  Display Topology.\n");
275 	if (capabilities & SVGA_CAP_GMR)
276 		DRM_INFO("  GMR.\n");
277 	if (capabilities & SVGA_CAP_TRACES)
278 		DRM_INFO("  Traces.\n");
279 	if (capabilities & SVGA_CAP_GMR2)
280 		DRM_INFO("  GMR2.\n");
281 	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
282 		DRM_INFO("  Screen Object 2.\n");
283 	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
284 		DRM_INFO("  Command Buffers.\n");
285 	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
286 		DRM_INFO("  Command Buffers 2.\n");
287 	if (capabilities & SVGA_CAP_GBOBJECTS)
288 		DRM_INFO("  Guest Backed Resources.\n");
289 	if (capabilities & SVGA_CAP_DX)
290 		DRM_INFO("  DX Features.\n");
291 }
292 
293 /**
294  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
295  *
296  * @dev_priv: A device private structure.
297  *
298  * This function creates a small buffer object that holds the query
299  * result for dummy queries emitted as query barriers.
300  * The function will then map the first page and initialize a pending
301  * occlusion query result structure, Finally it will unmap the buffer.
302  * No interruptible waits are done within this function.
303  *
304  * Returns an error if bo creation or initialization fails.
305  */
306 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
307 {
308 	int ret;
309 	struct vmw_dma_buffer *vbo;
310 	struct ttm_bo_kmap_obj map;
311 	volatile SVGA3dQueryResult *result;
312 	bool dummy;
313 
314 	/*
315 	 * Create the vbo as pinned, so that a tryreserve will
316 	 * immediately succeed. This is because we're the only
317 	 * user of the bo currently.
318 	 */
319 	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
320 	if (!vbo)
321 		return -ENOMEM;
322 
323 	ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
324 			      &vmw_sys_ne_placement, false,
325 			      &vmw_dmabuf_bo_free);
326 	if (unlikely(ret != 0))
327 		return ret;
328 
329 	ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
330 	BUG_ON(ret != 0);
331 	vmw_bo_pin_reserved(vbo, true);
332 
333 	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
334 	if (likely(ret == 0)) {
335 		result = ttm_kmap_obj_virtual(&map, &dummy);
336 		result->totalSize = sizeof(*result);
337 		result->state = SVGA3D_QUERYSTATE_PENDING;
338 		result->result32 = 0xff;
339 		ttm_bo_kunmap(&map);
340 	}
341 	vmw_bo_pin_reserved(vbo, false);
342 	ttm_bo_unreserve(&vbo->base);
343 
344 	if (unlikely(ret != 0)) {
345 		DRM_ERROR("Dummy query buffer map failed.\n");
346 		vmw_dmabuf_unreference(&vbo);
347 	} else
348 		dev_priv->dummy_query_bo = vbo;
349 
350 	return ret;
351 }
352 
353 /**
354  * vmw_request_device_late - Perform late device setup
355  *
356  * @dev_priv: Pointer to device private.
357  *
358  * This function performs setup of otables and enables large command
359  * buffer submission. These tasks are split out to a separate function
360  * because it reverts vmw_release_device_early and is intended to be used
361  * by an error path in the hibernation code.
362  */
363 static int vmw_request_device_late(struct vmw_private *dev_priv)
364 {
365 	int ret;
366 
367 	if (dev_priv->has_mob) {
368 		ret = vmw_otables_setup(dev_priv);
369 		if (unlikely(ret != 0)) {
370 			DRM_ERROR("Unable to initialize "
371 				  "guest Memory OBjects.\n");
372 			return ret;
373 		}
374 	}
375 
376 	if (dev_priv->cman) {
377 		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
378 					       256*4096, 2*4096);
379 		if (ret) {
380 			struct vmw_cmdbuf_man *man = dev_priv->cman;
381 
382 			dev_priv->cman = NULL;
383 			vmw_cmdbuf_man_destroy(man);
384 		}
385 	}
386 
387 	return 0;
388 }
389 
390 static int vmw_request_device(struct vmw_private *dev_priv)
391 {
392 	int ret;
393 
394 	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
395 	if (unlikely(ret != 0)) {
396 		DRM_ERROR("Unable to initialize FIFO.\n");
397 		return ret;
398 	}
399 	vmw_fence_fifo_up(dev_priv->fman);
400 	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
401 	if (IS_ERR(dev_priv->cman)) {
402 		dev_priv->cman = NULL;
403 		dev_priv->has_dx = false;
404 	}
405 
406 	ret = vmw_request_device_late(dev_priv);
407 	if (ret)
408 		goto out_no_mob;
409 
410 	ret = vmw_dummy_query_bo_create(dev_priv);
411 	if (unlikely(ret != 0))
412 		goto out_no_query_bo;
413 
414 	return 0;
415 
416 out_no_query_bo:
417 	if (dev_priv->cman)
418 		vmw_cmdbuf_remove_pool(dev_priv->cman);
419 	if (dev_priv->has_mob) {
420 		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
421 		vmw_otables_takedown(dev_priv);
422 	}
423 	if (dev_priv->cman)
424 		vmw_cmdbuf_man_destroy(dev_priv->cman);
425 out_no_mob:
426 	vmw_fence_fifo_down(dev_priv->fman);
427 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
428 	return ret;
429 }
430 
431 /**
432  * vmw_release_device_early - Early part of fifo takedown.
433  *
434  * @dev_priv: Pointer to device private struct.
435  *
436  * This is the first part of command submission takedown, to be called before
437  * buffer management is taken down.
438  */
439 static void vmw_release_device_early(struct vmw_private *dev_priv)
440 {
441 	/*
442 	 * Previous destructions should've released
443 	 * the pinned bo.
444 	 */
445 
446 	BUG_ON(dev_priv->pinned_bo != NULL);
447 
448 	vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
449 	if (dev_priv->cman)
450 		vmw_cmdbuf_remove_pool(dev_priv->cman);
451 
452 	if (dev_priv->has_mob) {
453 		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
454 		vmw_otables_takedown(dev_priv);
455 	}
456 }
457 
458 /**
459  * vmw_release_device_late - Late part of fifo takedown.
460  *
461  * @dev_priv: Pointer to device private struct.
462  *
463  * This is the last part of the command submission takedown, to be called when
464  * command submission is no longer needed. It may wait on pending fences.
465  */
466 static void vmw_release_device_late(struct vmw_private *dev_priv)
467 {
468 	vmw_fence_fifo_down(dev_priv->fman);
469 	if (dev_priv->cman)
470 		vmw_cmdbuf_man_destroy(dev_priv->cman);
471 
472 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
473 }
474 
475 /**
476  * Sets the initial_[width|height] fields on the given vmw_private.
477  *
478  * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
479  * clamping the value to fb_max_[width|height] fields and the
480  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
481  * If the values appear to be invalid, set them to
482  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
483  */
484 static void vmw_get_initial_size(struct vmw_private *dev_priv)
485 {
486 	uint32_t width;
487 	uint32_t height;
488 
489 	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
490 	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
491 
492 	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
493 	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
494 
495 	if (width > dev_priv->fb_max_width ||
496 	    height > dev_priv->fb_max_height) {
497 
498 		/*
499 		 * This is a host error and shouldn't occur.
500 		 */
501 
502 		width = VMW_MIN_INITIAL_WIDTH;
503 		height = VMW_MIN_INITIAL_HEIGHT;
504 	}
505 
506 	dev_priv->initial_width = width;
507 	dev_priv->initial_height = height;
508 }
509 
510 /**
511  * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
512  * system.
513  *
514  * @dev_priv: Pointer to a struct vmw_private
515  *
516  * This functions tries to determine the IOMMU setup and what actions
517  * need to be taken by the driver to make system pages visible to the
518  * device.
519  * If this function decides that DMA is not possible, it returns -EINVAL.
520  * The driver may then try to disable features of the device that require
521  * DMA.
522  */
523 static int vmw_dma_select_mode(struct vmw_private *dev_priv)
524 {
525 	static const char *names[vmw_dma_map_max] = {
526 		[vmw_dma_phys] = "Using physical TTM page addresses.",
527 		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
528 		[vmw_dma_map_populate] = "Keeping DMA mappings.",
529 		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
530 #ifdef CONFIG_X86
531 	const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
532 
533 #ifdef CONFIG_INTEL_IOMMU
534 	if (intel_iommu_enabled) {
535 		dev_priv->map_mode = vmw_dma_map_populate;
536 		goto out_fixup;
537 	}
538 #endif
539 
540 	if (!(vmw_force_iommu || vmw_force_coherent)) {
541 		dev_priv->map_mode = vmw_dma_phys;
542 		DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
543 		return 0;
544 	}
545 
546 	dev_priv->map_mode = vmw_dma_map_populate;
547 
548 	if (dma_ops->sync_single_for_cpu)
549 		dev_priv->map_mode = vmw_dma_alloc_coherent;
550 #ifdef CONFIG_SWIOTLB
551 	if (swiotlb_nr_tbl() == 0)
552 		dev_priv->map_mode = vmw_dma_map_populate;
553 #endif
554 
555 #ifdef CONFIG_INTEL_IOMMU
556 out_fixup:
557 #endif
558 	if (dev_priv->map_mode == vmw_dma_map_populate &&
559 	    vmw_restrict_iommu)
560 		dev_priv->map_mode = vmw_dma_map_bind;
561 
562 	if (vmw_force_coherent)
563 		dev_priv->map_mode = vmw_dma_alloc_coherent;
564 
565 #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
566 	/*
567 	 * No coherent page pool
568 	 */
569 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
570 		return -EINVAL;
571 #endif
572 
573 #else /* CONFIG_X86 */
574 	dev_priv->map_mode = vmw_dma_map_populate;
575 #endif /* CONFIG_X86 */
576 
577 	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
578 
579 	return 0;
580 }
581 
582 /**
583  * vmw_dma_masks - set required page- and dma masks
584  *
585  * @dev: Pointer to struct drm-device
586  *
587  * With 32-bit we can only handle 32 bit PFNs. Optionally set that
588  * restriction also for 64-bit systems.
589  */
590 #ifdef CONFIG_INTEL_IOMMU
591 static int vmw_dma_masks(struct vmw_private *dev_priv)
592 {
593 	struct drm_device *dev = dev_priv->dev;
594 
595 	if (intel_iommu_enabled &&
596 	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
597 		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
598 		return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
599 	}
600 	return 0;
601 }
602 #else
603 static int vmw_dma_masks(struct vmw_private *dev_priv)
604 {
605 	return 0;
606 }
607 #endif
608 
609 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
610 {
611 	struct vmw_private *dev_priv;
612 	int ret;
613 	uint32_t svga_id;
614 	enum vmw_res_type i;
615 	bool refuse_dma = false;
616 
617 	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
618 	if (unlikely(dev_priv == NULL)) {
619 		DRM_ERROR("Failed allocating a device private struct.\n");
620 		return -ENOMEM;
621 	}
622 
623 	pci_set_master(dev->pdev);
624 
625 	dev_priv->dev = dev;
626 	dev_priv->vmw_chipset = chipset;
627 	dev_priv->last_read_seqno = (uint32_t) -100;
628 	mutex_init(&dev_priv->cmdbuf_mutex);
629 	mutex_init(&dev_priv->release_mutex);
630 	mutex_init(&dev_priv->binding_mutex);
631 	rwlock_init(&dev_priv->resource_lock);
632 	ttm_lock_init(&dev_priv->reservation_sem);
633 	spin_lock_init(&dev_priv->hw_lock);
634 	spin_lock_init(&dev_priv->waiter_lock);
635 	spin_lock_init(&dev_priv->cap_lock);
636 	spin_lock_init(&dev_priv->svga_lock);
637 
638 	for (i = vmw_res_context; i < vmw_res_max; ++i) {
639 		idr_init(&dev_priv->res_idr[i]);
640 		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
641 	}
642 
643 	mutex_init(&dev_priv->init_mutex);
644 	init_waitqueue_head(&dev_priv->fence_queue);
645 	init_waitqueue_head(&dev_priv->fifo_queue);
646 	dev_priv->fence_queue_waiters = 0;
647 	dev_priv->fifo_queue_waiters = 0;
648 
649 	dev_priv->used_memory_size = 0;
650 
651 	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
652 	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
653 	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
654 
655 	dev_priv->enable_fb = enable_fbdev;
656 
657 	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
658 	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
659 	if (svga_id != SVGA_ID_2) {
660 		ret = -ENOSYS;
661 		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
662 		goto out_err0;
663 	}
664 
665 	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
666 	ret = vmw_dma_select_mode(dev_priv);
667 	if (unlikely(ret != 0)) {
668 		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
669 		refuse_dma = true;
670 	}
671 
672 	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
673 	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
674 	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
675 	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
676 
677 	vmw_get_initial_size(dev_priv);
678 
679 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
680 		dev_priv->max_gmr_ids =
681 			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
682 		dev_priv->max_gmr_pages =
683 			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
684 		dev_priv->memory_size =
685 			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
686 		dev_priv->memory_size -= dev_priv->vram_size;
687 	} else {
688 		/*
689 		 * An arbitrary limit of 512MiB on surface
690 		 * memory. But all HWV8 hardware supports GMR2.
691 		 */
692 		dev_priv->memory_size = 512*1024*1024;
693 	}
694 	dev_priv->max_mob_pages = 0;
695 	dev_priv->max_mob_size = 0;
696 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
697 		uint64_t mem_size =
698 			vmw_read(dev_priv,
699 				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
700 
701 		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
702 		dev_priv->prim_bb_mem =
703 			vmw_read(dev_priv,
704 				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
705 		dev_priv->max_mob_size =
706 			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
707 		dev_priv->stdu_max_width =
708 			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
709 		dev_priv->stdu_max_height =
710 			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
711 
712 		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
713 			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
714 		dev_priv->texture_max_width = vmw_read(dev_priv,
715 						       SVGA_REG_DEV_CAP);
716 		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
717 			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
718 		dev_priv->texture_max_height = vmw_read(dev_priv,
719 							SVGA_REG_DEV_CAP);
720 	} else {
721 		dev_priv->texture_max_width = 8192;
722 		dev_priv->texture_max_height = 8192;
723 		dev_priv->prim_bb_mem = dev_priv->vram_size;
724 	}
725 
726 	vmw_print_capabilities(dev_priv->capabilities);
727 
728 	ret = vmw_dma_masks(dev_priv);
729 	if (unlikely(ret != 0))
730 		goto out_err0;
731 
732 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
733 		DRM_INFO("Max GMR ids is %u\n",
734 			 (unsigned)dev_priv->max_gmr_ids);
735 		DRM_INFO("Max number of GMR pages is %u\n",
736 			 (unsigned)dev_priv->max_gmr_pages);
737 		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
738 			 (unsigned)dev_priv->memory_size / 1024);
739 	}
740 	DRM_INFO("Maximum display memory size is %u kiB\n",
741 		 dev_priv->prim_bb_mem / 1024);
742 	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
743 		 dev_priv->vram_start, dev_priv->vram_size / 1024);
744 	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
745 		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
746 
747 	ret = vmw_ttm_global_init(dev_priv);
748 	if (unlikely(ret != 0))
749 		goto out_err0;
750 
751 
752 	vmw_master_init(&dev_priv->fbdev_master);
753 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
754 	dev_priv->active_master = &dev_priv->fbdev_master;
755 
756 	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
757 				       dev_priv->mmio_size, MEMREMAP_WB);
758 
759 	if (unlikely(dev_priv->mmio_virt == NULL)) {
760 		ret = -ENOMEM;
761 		DRM_ERROR("Failed mapping MMIO.\n");
762 		goto out_err3;
763 	}
764 
765 	/* Need mmio memory to check for fifo pitchlock cap. */
766 	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
767 	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
768 	    !vmw_fifo_have_pitchlock(dev_priv)) {
769 		ret = -ENOSYS;
770 		DRM_ERROR("Hardware has no pitchlock\n");
771 		goto out_err4;
772 	}
773 
774 	dev_priv->tdev = ttm_object_device_init
775 		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
776 
777 	if (unlikely(dev_priv->tdev == NULL)) {
778 		DRM_ERROR("Unable to initialize TTM object management.\n");
779 		ret = -ENOMEM;
780 		goto out_err4;
781 	}
782 
783 	dev->dev_private = dev_priv;
784 
785 	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
786 	dev_priv->stealth = (ret != 0);
787 	if (dev_priv->stealth) {
788 		/**
789 		 * Request at least the mmio PCI resource.
790 		 */
791 
792 		DRM_INFO("It appears like vesafb is loaded. "
793 			 "Ignore above error if any.\n");
794 		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
795 		if (unlikely(ret != 0)) {
796 			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
797 			goto out_no_device;
798 		}
799 	}
800 
801 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
802 		ret = drm_irq_install(dev, dev->pdev->irq);
803 		if (ret != 0) {
804 			DRM_ERROR("Failed installing irq: %d\n", ret);
805 			goto out_no_irq;
806 		}
807 	}
808 
809 	dev_priv->fman = vmw_fence_manager_init(dev_priv);
810 	if (unlikely(dev_priv->fman == NULL)) {
811 		ret = -ENOMEM;
812 		goto out_no_fman;
813 	}
814 
815 	ret = ttm_bo_device_init(&dev_priv->bdev,
816 				 dev_priv->bo_global_ref.ref.object,
817 				 &vmw_bo_driver,
818 				 dev->anon_inode->i_mapping,
819 				 VMWGFX_FILE_PAGE_OFFSET,
820 				 false);
821 	if (unlikely(ret != 0)) {
822 		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
823 		goto out_no_bdev;
824 	}
825 
826 	/*
827 	 * Enable VRAM, but initially don't use it until SVGA is enabled and
828 	 * unhidden.
829 	 */
830 	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
831 			     (dev_priv->vram_size >> PAGE_SHIFT));
832 	if (unlikely(ret != 0)) {
833 		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
834 		goto out_no_vram;
835 	}
836 	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
837 
838 	dev_priv->has_gmr = true;
839 	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
840 	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
841 					 VMW_PL_GMR) != 0) {
842 		DRM_INFO("No GMR memory available. "
843 			 "Graphics memory resources are very limited.\n");
844 		dev_priv->has_gmr = false;
845 	}
846 
847 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
848 		dev_priv->has_mob = true;
849 		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
850 				   VMW_PL_MOB) != 0) {
851 			DRM_INFO("No MOB memory available. "
852 				 "3D will be disabled.\n");
853 			dev_priv->has_mob = false;
854 		}
855 	}
856 
857 	if (dev_priv->has_mob) {
858 		spin_lock(&dev_priv->cap_lock);
859 		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
860 		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
861 		spin_unlock(&dev_priv->cap_lock);
862 	}
863 
864 
865 	ret = vmw_kms_init(dev_priv);
866 	if (unlikely(ret != 0))
867 		goto out_no_kms;
868 	vmw_overlay_init(dev_priv);
869 
870 	ret = vmw_request_device(dev_priv);
871 	if (ret)
872 		goto out_no_fifo;
873 
874 	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
875 
876 	if (dev_priv->enable_fb) {
877 		vmw_fifo_resource_inc(dev_priv);
878 		vmw_svga_enable(dev_priv);
879 		vmw_fb_init(dev_priv);
880 	}
881 
882 	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
883 	register_pm_notifier(&dev_priv->pm_nb);
884 
885 	return 0;
886 
887 out_no_fifo:
888 	vmw_overlay_close(dev_priv);
889 	vmw_kms_close(dev_priv);
890 out_no_kms:
891 	if (dev_priv->has_mob)
892 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
893 	if (dev_priv->has_gmr)
894 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
895 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
896 out_no_vram:
897 	(void)ttm_bo_device_release(&dev_priv->bdev);
898 out_no_bdev:
899 	vmw_fence_manager_takedown(dev_priv->fman);
900 out_no_fman:
901 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
902 		drm_irq_uninstall(dev_priv->dev);
903 out_no_irq:
904 	if (dev_priv->stealth)
905 		pci_release_region(dev->pdev, 2);
906 	else
907 		pci_release_regions(dev->pdev);
908 out_no_device:
909 	ttm_object_device_release(&dev_priv->tdev);
910 out_err4:
911 	memunmap(dev_priv->mmio_virt);
912 out_err3:
913 	vmw_ttm_global_release(dev_priv);
914 out_err0:
915 	for (i = vmw_res_context; i < vmw_res_max; ++i)
916 		idr_destroy(&dev_priv->res_idr[i]);
917 
918 	if (dev_priv->ctx.staged_bindings)
919 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
920 	kfree(dev_priv);
921 	return ret;
922 }
923 
924 static int vmw_driver_unload(struct drm_device *dev)
925 {
926 	struct vmw_private *dev_priv = vmw_priv(dev);
927 	enum vmw_res_type i;
928 
929 	unregister_pm_notifier(&dev_priv->pm_nb);
930 
931 	if (dev_priv->ctx.res_ht_initialized)
932 		drm_ht_remove(&dev_priv->ctx.res_ht);
933 	vfree(dev_priv->ctx.cmd_bounce);
934 	if (dev_priv->enable_fb) {
935 		vmw_fb_off(dev_priv);
936 		vmw_fb_close(dev_priv);
937 		vmw_fifo_resource_dec(dev_priv);
938 		vmw_svga_disable(dev_priv);
939 	}
940 
941 	vmw_kms_close(dev_priv);
942 	vmw_overlay_close(dev_priv);
943 
944 	if (dev_priv->has_gmr)
945 		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
946 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
947 
948 	vmw_release_device_early(dev_priv);
949 	if (dev_priv->has_mob)
950 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
951 	(void) ttm_bo_device_release(&dev_priv->bdev);
952 	vmw_release_device_late(dev_priv);
953 	vmw_fence_manager_takedown(dev_priv->fman);
954 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
955 		drm_irq_uninstall(dev_priv->dev);
956 	if (dev_priv->stealth)
957 		pci_release_region(dev->pdev, 2);
958 	else
959 		pci_release_regions(dev->pdev);
960 
961 	ttm_object_device_release(&dev_priv->tdev);
962 	memunmap(dev_priv->mmio_virt);
963 	if (dev_priv->ctx.staged_bindings)
964 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
965 	vmw_ttm_global_release(dev_priv);
966 
967 	for (i = vmw_res_context; i < vmw_res_max; ++i)
968 		idr_destroy(&dev_priv->res_idr[i]);
969 
970 	kfree(dev_priv);
971 
972 	return 0;
973 }
974 
975 static void vmw_preclose(struct drm_device *dev,
976 			 struct drm_file *file_priv)
977 {
978 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
979 	struct vmw_private *dev_priv = vmw_priv(dev);
980 
981 	vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
982 }
983 
984 static void vmw_postclose(struct drm_device *dev,
985 			 struct drm_file *file_priv)
986 {
987 	struct vmw_fpriv *vmw_fp;
988 
989 	vmw_fp = vmw_fpriv(file_priv);
990 
991 	if (vmw_fp->locked_master) {
992 		struct vmw_master *vmaster =
993 			vmw_master(vmw_fp->locked_master);
994 
995 		ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
996 		ttm_vt_unlock(&vmaster->lock);
997 		drm_master_put(&vmw_fp->locked_master);
998 	}
999 
1000 	ttm_object_file_release(&vmw_fp->tfile);
1001 	kfree(vmw_fp);
1002 }
1003 
1004 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1005 {
1006 	struct vmw_private *dev_priv = vmw_priv(dev);
1007 	struct vmw_fpriv *vmw_fp;
1008 	int ret = -ENOMEM;
1009 
1010 	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1011 	if (unlikely(vmw_fp == NULL))
1012 		return ret;
1013 
1014 	INIT_LIST_HEAD(&vmw_fp->fence_events);
1015 	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1016 	if (unlikely(vmw_fp->tfile == NULL))
1017 		goto out_no_tfile;
1018 
1019 	file_priv->driver_priv = vmw_fp;
1020 
1021 	return 0;
1022 
1023 out_no_tfile:
1024 	kfree(vmw_fp);
1025 	return ret;
1026 }
1027 
1028 static struct vmw_master *vmw_master_check(struct drm_device *dev,
1029 					   struct drm_file *file_priv,
1030 					   unsigned int flags)
1031 {
1032 	int ret;
1033 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1034 	struct vmw_master *vmaster;
1035 
1036 	if (file_priv->minor->type != DRM_MINOR_LEGACY ||
1037 	    !(flags & DRM_AUTH))
1038 		return NULL;
1039 
1040 	ret = mutex_lock_interruptible(&dev->master_mutex);
1041 	if (unlikely(ret != 0))
1042 		return ERR_PTR(-ERESTARTSYS);
1043 
1044 	if (file_priv->is_master) {
1045 		mutex_unlock(&dev->master_mutex);
1046 		return NULL;
1047 	}
1048 
1049 	/*
1050 	 * Check if we were previously master, but now dropped. In that
1051 	 * case, allow at least render node functionality.
1052 	 */
1053 	if (vmw_fp->locked_master) {
1054 		mutex_unlock(&dev->master_mutex);
1055 
1056 		if (flags & DRM_RENDER_ALLOW)
1057 			return NULL;
1058 
1059 		DRM_ERROR("Dropped master trying to access ioctl that "
1060 			  "requires authentication.\n");
1061 		return ERR_PTR(-EACCES);
1062 	}
1063 	mutex_unlock(&dev->master_mutex);
1064 
1065 	/*
1066 	 * Take the TTM lock. Possibly sleep waiting for the authenticating
1067 	 * master to become master again, or for a SIGTERM if the
1068 	 * authenticating master exits.
1069 	 */
1070 	vmaster = vmw_master(file_priv->master);
1071 	ret = ttm_read_lock(&vmaster->lock, true);
1072 	if (unlikely(ret != 0))
1073 		vmaster = ERR_PTR(ret);
1074 
1075 	return vmaster;
1076 }
1077 
1078 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1079 			      unsigned long arg,
1080 			      long (*ioctl_func)(struct file *, unsigned int,
1081 						 unsigned long))
1082 {
1083 	struct drm_file *file_priv = filp->private_data;
1084 	struct drm_device *dev = file_priv->minor->dev;
1085 	unsigned int nr = DRM_IOCTL_NR(cmd);
1086 	struct vmw_master *vmaster;
1087 	unsigned int flags;
1088 	long ret;
1089 
1090 	/*
1091 	 * Do extra checking on driver private ioctls.
1092 	 */
1093 
1094 	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1095 	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1096 		const struct drm_ioctl_desc *ioctl =
1097 			&vmw_ioctls[nr - DRM_COMMAND_BASE];
1098 
1099 		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1100 			ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1101 			if (unlikely(ret != 0))
1102 				return ret;
1103 
1104 			if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1105 				goto out_io_encoding;
1106 
1107 			return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1108 							_IOC_SIZE(cmd));
1109 		}
1110 
1111 		if (unlikely(ioctl->cmd != cmd))
1112 			goto out_io_encoding;
1113 
1114 		flags = ioctl->flags;
1115 	} else if (!drm_ioctl_flags(nr, &flags))
1116 		return -EINVAL;
1117 
1118 	vmaster = vmw_master_check(dev, file_priv, flags);
1119 	if (IS_ERR(vmaster)) {
1120 		ret = PTR_ERR(vmaster);
1121 
1122 		if (ret != -ERESTARTSYS)
1123 			DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1124 				 nr, ret);
1125 		return ret;
1126 	}
1127 
1128 	ret = ioctl_func(filp, cmd, arg);
1129 	if (vmaster)
1130 		ttm_read_unlock(&vmaster->lock);
1131 
1132 	return ret;
1133 
1134 out_io_encoding:
1135 	DRM_ERROR("Invalid command format, ioctl %d\n",
1136 		  nr - DRM_COMMAND_BASE);
1137 
1138 	return -EINVAL;
1139 }
1140 
1141 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1142 			       unsigned long arg)
1143 {
1144 	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1145 }
1146 
1147 #ifdef CONFIG_COMPAT
1148 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1149 			     unsigned long arg)
1150 {
1151 	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1152 }
1153 #endif
1154 
1155 static void vmw_lastclose(struct drm_device *dev)
1156 {
1157 }
1158 
1159 static void vmw_master_init(struct vmw_master *vmaster)
1160 {
1161 	ttm_lock_init(&vmaster->lock);
1162 }
1163 
1164 static int vmw_master_create(struct drm_device *dev,
1165 			     struct drm_master *master)
1166 {
1167 	struct vmw_master *vmaster;
1168 
1169 	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1170 	if (unlikely(vmaster == NULL))
1171 		return -ENOMEM;
1172 
1173 	vmw_master_init(vmaster);
1174 	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1175 	master->driver_priv = vmaster;
1176 
1177 	return 0;
1178 }
1179 
1180 static void vmw_master_destroy(struct drm_device *dev,
1181 			       struct drm_master *master)
1182 {
1183 	struct vmw_master *vmaster = vmw_master(master);
1184 
1185 	master->driver_priv = NULL;
1186 	kfree(vmaster);
1187 }
1188 
1189 static int vmw_master_set(struct drm_device *dev,
1190 			  struct drm_file *file_priv,
1191 			  bool from_open)
1192 {
1193 	struct vmw_private *dev_priv = vmw_priv(dev);
1194 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1195 	struct vmw_master *active = dev_priv->active_master;
1196 	struct vmw_master *vmaster = vmw_master(file_priv->master);
1197 	int ret = 0;
1198 
1199 	if (active) {
1200 		BUG_ON(active != &dev_priv->fbdev_master);
1201 		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1202 		if (unlikely(ret != 0))
1203 			return ret;
1204 
1205 		ttm_lock_set_kill(&active->lock, true, SIGTERM);
1206 		dev_priv->active_master = NULL;
1207 	}
1208 
1209 	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1210 	if (!from_open) {
1211 		ttm_vt_unlock(&vmaster->lock);
1212 		BUG_ON(vmw_fp->locked_master != file_priv->master);
1213 		drm_master_put(&vmw_fp->locked_master);
1214 	}
1215 
1216 	dev_priv->active_master = vmaster;
1217 
1218 	return 0;
1219 }
1220 
1221 static void vmw_master_drop(struct drm_device *dev,
1222 			    struct drm_file *file_priv,
1223 			    bool from_release)
1224 {
1225 	struct vmw_private *dev_priv = vmw_priv(dev);
1226 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1227 	struct vmw_master *vmaster = vmw_master(file_priv->master);
1228 	int ret;
1229 
1230 	/**
1231 	 * Make sure the master doesn't disappear while we have
1232 	 * it locked.
1233 	 */
1234 
1235 	vmw_fp->locked_master = drm_master_get(file_priv->master);
1236 	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1237 	vmw_kms_legacy_hotspot_clear(dev_priv);
1238 	if (unlikely((ret != 0))) {
1239 		DRM_ERROR("Unable to lock TTM at VT switch.\n");
1240 		drm_master_put(&vmw_fp->locked_master);
1241 	}
1242 
1243 	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1244 
1245 	if (!dev_priv->enable_fb)
1246 		vmw_svga_disable(dev_priv);
1247 
1248 	dev_priv->active_master = &dev_priv->fbdev_master;
1249 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1250 	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1251 
1252 	if (dev_priv->enable_fb)
1253 		vmw_fb_on(dev_priv);
1254 }
1255 
1256 /**
1257  * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1258  *
1259  * @dev_priv: Pointer to device private struct.
1260  * Needs the reservation sem to be held in non-exclusive mode.
1261  */
1262 static void __vmw_svga_enable(struct vmw_private *dev_priv)
1263 {
1264 	spin_lock(&dev_priv->svga_lock);
1265 	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1266 		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1267 		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1268 	}
1269 	spin_unlock(&dev_priv->svga_lock);
1270 }
1271 
1272 /**
1273  * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1274  *
1275  * @dev_priv: Pointer to device private struct.
1276  */
1277 void vmw_svga_enable(struct vmw_private *dev_priv)
1278 {
1279 	ttm_read_lock(&dev_priv->reservation_sem, false);
1280 	__vmw_svga_enable(dev_priv);
1281 	ttm_read_unlock(&dev_priv->reservation_sem);
1282 }
1283 
1284 /**
1285  * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1286  *
1287  * @dev_priv: Pointer to device private struct.
1288  * Needs the reservation sem to be held in exclusive mode.
1289  * Will not empty VRAM. VRAM must be emptied by caller.
1290  */
1291 static void __vmw_svga_disable(struct vmw_private *dev_priv)
1292 {
1293 	spin_lock(&dev_priv->svga_lock);
1294 	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1295 		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1296 		vmw_write(dev_priv, SVGA_REG_ENABLE,
1297 			  SVGA_REG_ENABLE_HIDE |
1298 			  SVGA_REG_ENABLE_ENABLE);
1299 	}
1300 	spin_unlock(&dev_priv->svga_lock);
1301 }
1302 
1303 /**
1304  * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1305  * running.
1306  *
1307  * @dev_priv: Pointer to device private struct.
1308  * Will empty VRAM.
1309  */
1310 void vmw_svga_disable(struct vmw_private *dev_priv)
1311 {
1312 	ttm_write_lock(&dev_priv->reservation_sem, false);
1313 	spin_lock(&dev_priv->svga_lock);
1314 	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1315 		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1316 		spin_unlock(&dev_priv->svga_lock);
1317 		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1318 			DRM_ERROR("Failed evicting VRAM buffers.\n");
1319 		vmw_write(dev_priv, SVGA_REG_ENABLE,
1320 			  SVGA_REG_ENABLE_HIDE |
1321 			  SVGA_REG_ENABLE_ENABLE);
1322 	} else
1323 		spin_unlock(&dev_priv->svga_lock);
1324 	ttm_write_unlock(&dev_priv->reservation_sem);
1325 }
1326 
1327 static void vmw_remove(struct pci_dev *pdev)
1328 {
1329 	struct drm_device *dev = pci_get_drvdata(pdev);
1330 
1331 	pci_disable_device(pdev);
1332 	drm_put_dev(dev);
1333 }
1334 
1335 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1336 			      void *ptr)
1337 {
1338 	struct vmw_private *dev_priv =
1339 		container_of(nb, struct vmw_private, pm_nb);
1340 
1341 	switch (val) {
1342 	case PM_HIBERNATION_PREPARE:
1343 		if (dev_priv->enable_fb)
1344 			vmw_fb_off(dev_priv);
1345 		ttm_suspend_lock(&dev_priv->reservation_sem);
1346 
1347 		/*
1348 		 * This empties VRAM and unbinds all GMR bindings.
1349 		 * Buffer contents is moved to swappable memory.
1350 		 */
1351 		vmw_execbuf_release_pinned_bo(dev_priv);
1352 		vmw_resource_evict_all(dev_priv);
1353 		vmw_release_device_early(dev_priv);
1354 		ttm_bo_swapout_all(&dev_priv->bdev);
1355 		vmw_fence_fifo_down(dev_priv->fman);
1356 		break;
1357 	case PM_POST_HIBERNATION:
1358 	case PM_POST_RESTORE:
1359 		vmw_fence_fifo_up(dev_priv->fman);
1360 		ttm_suspend_unlock(&dev_priv->reservation_sem);
1361 		if (dev_priv->enable_fb)
1362 			vmw_fb_on(dev_priv);
1363 		break;
1364 	case PM_RESTORE_PREPARE:
1365 		break;
1366 	default:
1367 		break;
1368 	}
1369 	return 0;
1370 }
1371 
1372 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1373 {
1374 	struct drm_device *dev = pci_get_drvdata(pdev);
1375 	struct vmw_private *dev_priv = vmw_priv(dev);
1376 
1377 	if (dev_priv->refuse_hibernation)
1378 		return -EBUSY;
1379 
1380 	pci_save_state(pdev);
1381 	pci_disable_device(pdev);
1382 	pci_set_power_state(pdev, PCI_D3hot);
1383 	return 0;
1384 }
1385 
1386 static int vmw_pci_resume(struct pci_dev *pdev)
1387 {
1388 	pci_set_power_state(pdev, PCI_D0);
1389 	pci_restore_state(pdev);
1390 	return pci_enable_device(pdev);
1391 }
1392 
1393 static int vmw_pm_suspend(struct device *kdev)
1394 {
1395 	struct pci_dev *pdev = to_pci_dev(kdev);
1396 	struct pm_message dummy;
1397 
1398 	dummy.event = 0;
1399 
1400 	return vmw_pci_suspend(pdev, dummy);
1401 }
1402 
1403 static int vmw_pm_resume(struct device *kdev)
1404 {
1405 	struct pci_dev *pdev = to_pci_dev(kdev);
1406 
1407 	return vmw_pci_resume(pdev);
1408 }
1409 
1410 static int vmw_pm_freeze(struct device *kdev)
1411 {
1412 	struct pci_dev *pdev = to_pci_dev(kdev);
1413 	struct drm_device *dev = pci_get_drvdata(pdev);
1414 	struct vmw_private *dev_priv = vmw_priv(dev);
1415 
1416 	dev_priv->suspended = true;
1417 	if (dev_priv->enable_fb)
1418 		vmw_fifo_resource_dec(dev_priv);
1419 
1420 	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1421 		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1422 		if (dev_priv->enable_fb)
1423 			vmw_fifo_resource_inc(dev_priv);
1424 		WARN_ON(vmw_request_device_late(dev_priv));
1425 		dev_priv->suspended = false;
1426 		return -EBUSY;
1427 	}
1428 
1429 	if (dev_priv->enable_fb)
1430 		__vmw_svga_disable(dev_priv);
1431 
1432 	vmw_release_device_late(dev_priv);
1433 
1434 	return 0;
1435 }
1436 
1437 static int vmw_pm_restore(struct device *kdev)
1438 {
1439 	struct pci_dev *pdev = to_pci_dev(kdev);
1440 	struct drm_device *dev = pci_get_drvdata(pdev);
1441 	struct vmw_private *dev_priv = vmw_priv(dev);
1442 	int ret;
1443 
1444 	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1445 	(void) vmw_read(dev_priv, SVGA_REG_ID);
1446 
1447 	if (dev_priv->enable_fb)
1448 		vmw_fifo_resource_inc(dev_priv);
1449 
1450 	ret = vmw_request_device(dev_priv);
1451 	if (ret)
1452 		return ret;
1453 
1454 	if (dev_priv->enable_fb)
1455 		__vmw_svga_enable(dev_priv);
1456 
1457 	dev_priv->suspended = false;
1458 
1459 	return 0;
1460 }
1461 
1462 static const struct dev_pm_ops vmw_pm_ops = {
1463 	.freeze = vmw_pm_freeze,
1464 	.thaw = vmw_pm_restore,
1465 	.restore = vmw_pm_restore,
1466 	.suspend = vmw_pm_suspend,
1467 	.resume = vmw_pm_resume,
1468 };
1469 
1470 static const struct file_operations vmwgfx_driver_fops = {
1471 	.owner = THIS_MODULE,
1472 	.open = drm_open,
1473 	.release = drm_release,
1474 	.unlocked_ioctl = vmw_unlocked_ioctl,
1475 	.mmap = vmw_mmap,
1476 	.poll = vmw_fops_poll,
1477 	.read = vmw_fops_read,
1478 #if defined(CONFIG_COMPAT)
1479 	.compat_ioctl = vmw_compat_ioctl,
1480 #endif
1481 	.llseek = noop_llseek,
1482 };
1483 
1484 static struct drm_driver driver = {
1485 	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1486 	DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
1487 	.load = vmw_driver_load,
1488 	.unload = vmw_driver_unload,
1489 	.lastclose = vmw_lastclose,
1490 	.irq_preinstall = vmw_irq_preinstall,
1491 	.irq_postinstall = vmw_irq_postinstall,
1492 	.irq_uninstall = vmw_irq_uninstall,
1493 	.irq_handler = vmw_irq_handler,
1494 	.get_vblank_counter = vmw_get_vblank_counter,
1495 	.enable_vblank = vmw_enable_vblank,
1496 	.disable_vblank = vmw_disable_vblank,
1497 	.ioctls = vmw_ioctls,
1498 	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
1499 	.master_create = vmw_master_create,
1500 	.master_destroy = vmw_master_destroy,
1501 	.master_set = vmw_master_set,
1502 	.master_drop = vmw_master_drop,
1503 	.open = vmw_driver_open,
1504 	.preclose = vmw_preclose,
1505 	.postclose = vmw_postclose,
1506 	.set_busid = drm_pci_set_busid,
1507 
1508 	.dumb_create = vmw_dumb_create,
1509 	.dumb_map_offset = vmw_dumb_map_offset,
1510 	.dumb_destroy = vmw_dumb_destroy,
1511 
1512 	.prime_fd_to_handle = vmw_prime_fd_to_handle,
1513 	.prime_handle_to_fd = vmw_prime_handle_to_fd,
1514 
1515 	.fops = &vmwgfx_driver_fops,
1516 	.name = VMWGFX_DRIVER_NAME,
1517 	.desc = VMWGFX_DRIVER_DESC,
1518 	.date = VMWGFX_DRIVER_DATE,
1519 	.major = VMWGFX_DRIVER_MAJOR,
1520 	.minor = VMWGFX_DRIVER_MINOR,
1521 	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1522 };
1523 
1524 static struct pci_driver vmw_pci_driver = {
1525 	.name = VMWGFX_DRIVER_NAME,
1526 	.id_table = vmw_pci_id_list,
1527 	.probe = vmw_probe,
1528 	.remove = vmw_remove,
1529 	.driver = {
1530 		.pm = &vmw_pm_ops
1531 	}
1532 };
1533 
1534 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1535 {
1536 	return drm_get_pci_dev(pdev, ent, &driver);
1537 }
1538 
1539 static int __init vmwgfx_init(void)
1540 {
1541 	int ret;
1542 
1543 #ifdef CONFIG_VGA_CONSOLE
1544 	if (vgacon_text_force())
1545 		return -EINVAL;
1546 #endif
1547 
1548 	ret = drm_pci_init(&driver, &vmw_pci_driver);
1549 	if (ret)
1550 		DRM_ERROR("Failed initializing DRM.\n");
1551 	return ret;
1552 }
1553 
1554 static void __exit vmwgfx_exit(void)
1555 {
1556 	drm_pci_exit(&driver, &vmw_pci_driver);
1557 }
1558 
1559 module_init(vmwgfx_init);
1560 module_exit(vmwgfx_exit);
1561 
1562 MODULE_AUTHOR("VMware Inc. and others");
1563 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1564 MODULE_LICENSE("GPL and additional rights");
1565 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1566 	       __stringify(VMWGFX_DRIVER_MINOR) "."
1567 	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1568 	       "0");
1569