1 /**************************************************************************
2  *
3  * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include <linux/module.h>
28 #include <linux/console.h>
29 
30 #include <drm/drmP.h>
31 #include "vmwgfx_drv.h"
32 #include "vmwgfx_binding.h"
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_object.h>
36 #include <drm/ttm/ttm_module.h>
37 #include <linux/dma_remapping.h>
38 
39 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
40 #define VMWGFX_CHIP_SVGAII 0
41 #define VMW_FB_RESERVATION 0
42 
43 #define VMW_MIN_INITIAL_WIDTH 800
44 #define VMW_MIN_INITIAL_HEIGHT 600
45 
46 #ifndef VMWGFX_GIT_VERSION
47 #define VMWGFX_GIT_VERSION "Unknown"
48 #endif
49 
50 #define VMWGFX_REPO "In Tree"
51 
52 
53 /**
54  * Fully encoded drm commands. Might move to vmw_drm.h
55  */
56 
57 #define DRM_IOCTL_VMW_GET_PARAM					\
58 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
59 		 struct drm_vmw_getparam_arg)
60 #define DRM_IOCTL_VMW_ALLOC_DMABUF				\
61 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
62 		union drm_vmw_alloc_dmabuf_arg)
63 #define DRM_IOCTL_VMW_UNREF_DMABUF				\
64 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
65 		struct drm_vmw_unref_dmabuf_arg)
66 #define DRM_IOCTL_VMW_CURSOR_BYPASS				\
67 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
68 		 struct drm_vmw_cursor_bypass_arg)
69 
70 #define DRM_IOCTL_VMW_CONTROL_STREAM				\
71 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
72 		 struct drm_vmw_control_stream_arg)
73 #define DRM_IOCTL_VMW_CLAIM_STREAM				\
74 	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
75 		 struct drm_vmw_stream_arg)
76 #define DRM_IOCTL_VMW_UNREF_STREAM				\
77 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
78 		 struct drm_vmw_stream_arg)
79 
80 #define DRM_IOCTL_VMW_CREATE_CONTEXT				\
81 	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
82 		struct drm_vmw_context_arg)
83 #define DRM_IOCTL_VMW_UNREF_CONTEXT				\
84 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
85 		struct drm_vmw_context_arg)
86 #define DRM_IOCTL_VMW_CREATE_SURFACE				\
87 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
88 		 union drm_vmw_surface_create_arg)
89 #define DRM_IOCTL_VMW_UNREF_SURFACE				\
90 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
91 		 struct drm_vmw_surface_arg)
92 #define DRM_IOCTL_VMW_REF_SURFACE				\
93 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
94 		 union drm_vmw_surface_reference_arg)
95 #define DRM_IOCTL_VMW_EXECBUF					\
96 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
97 		struct drm_vmw_execbuf_arg)
98 #define DRM_IOCTL_VMW_GET_3D_CAP				\
99 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,		\
100 		 struct drm_vmw_get_3d_cap_arg)
101 #define DRM_IOCTL_VMW_FENCE_WAIT				\
102 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
103 		 struct drm_vmw_fence_wait_arg)
104 #define DRM_IOCTL_VMW_FENCE_SIGNALED				\
105 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,	\
106 		 struct drm_vmw_fence_signaled_arg)
107 #define DRM_IOCTL_VMW_FENCE_UNREF				\
108 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
109 		 struct drm_vmw_fence_arg)
110 #define DRM_IOCTL_VMW_FENCE_EVENT				\
111 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,		\
112 		 struct drm_vmw_fence_event_arg)
113 #define DRM_IOCTL_VMW_PRESENT					\
114 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
115 		 struct drm_vmw_present_arg)
116 #define DRM_IOCTL_VMW_PRESENT_READBACK				\
117 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
118 		 struct drm_vmw_present_readback_arg)
119 #define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
120 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
121 		 struct drm_vmw_update_layout_arg)
122 #define DRM_IOCTL_VMW_CREATE_SHADER				\
123 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,	\
124 		 struct drm_vmw_shader_create_arg)
125 #define DRM_IOCTL_VMW_UNREF_SHADER				\
126 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,	\
127 		 struct drm_vmw_shader_arg)
128 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE				\
129 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,	\
130 		 union drm_vmw_gb_surface_create_arg)
131 #define DRM_IOCTL_VMW_GB_SURFACE_REF				\
132 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,	\
133 		 union drm_vmw_gb_surface_reference_arg)
134 #define DRM_IOCTL_VMW_SYNCCPU					\
135 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,		\
136 		 struct drm_vmw_synccpu_arg)
137 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT			\
138 	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT,	\
139 		struct drm_vmw_context_arg)
140 
141 /**
142  * The core DRM version of this macro doesn't account for
143  * DRM_COMMAND_BASE.
144  */
145 
146 #define VMW_IOCTL_DEF(ioctl, func, flags) \
147   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
148 
149 /**
150  * Ioctl definitions.
151  */
152 
153 static const struct drm_ioctl_desc vmw_ioctls[] = {
154 	VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
155 		      DRM_AUTH | DRM_RENDER_ALLOW),
156 	VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
157 		      DRM_AUTH | DRM_RENDER_ALLOW),
158 	VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
159 		      DRM_RENDER_ALLOW),
160 	VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
161 		      vmw_kms_cursor_bypass_ioctl,
162 		      DRM_MASTER | DRM_CONTROL_ALLOW),
163 
164 	VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
165 		      DRM_MASTER | DRM_CONTROL_ALLOW),
166 	VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
167 		      DRM_MASTER | DRM_CONTROL_ALLOW),
168 	VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
169 		      DRM_MASTER | DRM_CONTROL_ALLOW),
170 
171 	VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
172 		      DRM_AUTH | DRM_RENDER_ALLOW),
173 	VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
174 		      DRM_RENDER_ALLOW),
175 	VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
176 		      DRM_AUTH | DRM_RENDER_ALLOW),
177 	VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
178 		      DRM_RENDER_ALLOW),
179 	VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
180 		      DRM_AUTH | DRM_RENDER_ALLOW),
181 	VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
182 		      DRM_RENDER_ALLOW),
183 	VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
184 		      DRM_RENDER_ALLOW),
185 	VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
186 		      vmw_fence_obj_signaled_ioctl,
187 		      DRM_RENDER_ALLOW),
188 	VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
189 		      DRM_RENDER_ALLOW),
190 	VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
191 		      DRM_AUTH | DRM_RENDER_ALLOW),
192 	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
193 		      DRM_AUTH | DRM_RENDER_ALLOW),
194 
195 	/* these allow direct access to the framebuffers mark as master only */
196 	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
197 		      DRM_MASTER | DRM_AUTH),
198 	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
199 		      vmw_present_readback_ioctl,
200 		      DRM_MASTER | DRM_AUTH),
201 	/*
202 	 * The permissions of the below ioctl are overridden in
203 	 * vmw_generic_ioctl(). We require either
204 	 * DRM_MASTER or capable(CAP_SYS_ADMIN).
205 	 */
206 	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
207 		      vmw_kms_update_layout_ioctl,
208 		      DRM_RENDER_ALLOW),
209 	VMW_IOCTL_DEF(VMW_CREATE_SHADER,
210 		      vmw_shader_define_ioctl,
211 		      DRM_AUTH | DRM_RENDER_ALLOW),
212 	VMW_IOCTL_DEF(VMW_UNREF_SHADER,
213 		      vmw_shader_destroy_ioctl,
214 		      DRM_RENDER_ALLOW),
215 	VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
216 		      vmw_gb_surface_define_ioctl,
217 		      DRM_AUTH | DRM_RENDER_ALLOW),
218 	VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
219 		      vmw_gb_surface_reference_ioctl,
220 		      DRM_AUTH | DRM_RENDER_ALLOW),
221 	VMW_IOCTL_DEF(VMW_SYNCCPU,
222 		      vmw_user_dmabuf_synccpu_ioctl,
223 		      DRM_RENDER_ALLOW),
224 	VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
225 		      vmw_extended_context_define_ioctl,
226 		      DRM_AUTH | DRM_RENDER_ALLOW),
227 };
228 
229 static const struct pci_device_id vmw_pci_id_list[] = {
230 	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
231 	{0, 0, 0}
232 };
233 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
234 
235 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
236 static int vmw_force_iommu;
237 static int vmw_restrict_iommu;
238 static int vmw_force_coherent;
239 static int vmw_restrict_dma_mask;
240 static int vmw_assume_16bpp;
241 
242 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
243 static void vmw_master_init(struct vmw_master *);
244 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
245 			      void *ptr);
246 
247 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
248 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
249 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
250 module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
251 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
252 module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
253 MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
254 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
255 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
256 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
257 MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
258 module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
259 
260 
261 static void vmw_print_capabilities(uint32_t capabilities)
262 {
263 	DRM_INFO("Capabilities:\n");
264 	if (capabilities & SVGA_CAP_RECT_COPY)
265 		DRM_INFO("  Rect copy.\n");
266 	if (capabilities & SVGA_CAP_CURSOR)
267 		DRM_INFO("  Cursor.\n");
268 	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
269 		DRM_INFO("  Cursor bypass.\n");
270 	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
271 		DRM_INFO("  Cursor bypass 2.\n");
272 	if (capabilities & SVGA_CAP_8BIT_EMULATION)
273 		DRM_INFO("  8bit emulation.\n");
274 	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
275 		DRM_INFO("  Alpha cursor.\n");
276 	if (capabilities & SVGA_CAP_3D)
277 		DRM_INFO("  3D.\n");
278 	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
279 		DRM_INFO("  Extended Fifo.\n");
280 	if (capabilities & SVGA_CAP_MULTIMON)
281 		DRM_INFO("  Multimon.\n");
282 	if (capabilities & SVGA_CAP_PITCHLOCK)
283 		DRM_INFO("  Pitchlock.\n");
284 	if (capabilities & SVGA_CAP_IRQMASK)
285 		DRM_INFO("  Irq mask.\n");
286 	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
287 		DRM_INFO("  Display Topology.\n");
288 	if (capabilities & SVGA_CAP_GMR)
289 		DRM_INFO("  GMR.\n");
290 	if (capabilities & SVGA_CAP_TRACES)
291 		DRM_INFO("  Traces.\n");
292 	if (capabilities & SVGA_CAP_GMR2)
293 		DRM_INFO("  GMR2.\n");
294 	if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
295 		DRM_INFO("  Screen Object 2.\n");
296 	if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
297 		DRM_INFO("  Command Buffers.\n");
298 	if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
299 		DRM_INFO("  Command Buffers 2.\n");
300 	if (capabilities & SVGA_CAP_GBOBJECTS)
301 		DRM_INFO("  Guest Backed Resources.\n");
302 	if (capabilities & SVGA_CAP_DX)
303 		DRM_INFO("  DX Features.\n");
304 }
305 
306 /**
307  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
308  *
309  * @dev_priv: A device private structure.
310  *
311  * This function creates a small buffer object that holds the query
312  * result for dummy queries emitted as query barriers.
313  * The function will then map the first page and initialize a pending
314  * occlusion query result structure, Finally it will unmap the buffer.
315  * No interruptible waits are done within this function.
316  *
317  * Returns an error if bo creation or initialization fails.
318  */
319 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
320 {
321 	int ret;
322 	struct vmw_dma_buffer *vbo;
323 	struct ttm_bo_kmap_obj map;
324 	volatile SVGA3dQueryResult *result;
325 	bool dummy;
326 
327 	/*
328 	 * Create the vbo as pinned, so that a tryreserve will
329 	 * immediately succeed. This is because we're the only
330 	 * user of the bo currently.
331 	 */
332 	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
333 	if (!vbo)
334 		return -ENOMEM;
335 
336 	ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
337 			      &vmw_sys_ne_placement, false,
338 			      &vmw_dmabuf_bo_free);
339 	if (unlikely(ret != 0))
340 		return ret;
341 
342 	ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
343 	BUG_ON(ret != 0);
344 	vmw_bo_pin_reserved(vbo, true);
345 
346 	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
347 	if (likely(ret == 0)) {
348 		result = ttm_kmap_obj_virtual(&map, &dummy);
349 		result->totalSize = sizeof(*result);
350 		result->state = SVGA3D_QUERYSTATE_PENDING;
351 		result->result32 = 0xff;
352 		ttm_bo_kunmap(&map);
353 	}
354 	vmw_bo_pin_reserved(vbo, false);
355 	ttm_bo_unreserve(&vbo->base);
356 
357 	if (unlikely(ret != 0)) {
358 		DRM_ERROR("Dummy query buffer map failed.\n");
359 		vmw_dmabuf_unreference(&vbo);
360 	} else
361 		dev_priv->dummy_query_bo = vbo;
362 
363 	return ret;
364 }
365 
366 /**
367  * vmw_request_device_late - Perform late device setup
368  *
369  * @dev_priv: Pointer to device private.
370  *
371  * This function performs setup of otables and enables large command
372  * buffer submission. These tasks are split out to a separate function
373  * because it reverts vmw_release_device_early and is intended to be used
374  * by an error path in the hibernation code.
375  */
376 static int vmw_request_device_late(struct vmw_private *dev_priv)
377 {
378 	int ret;
379 
380 	if (dev_priv->has_mob) {
381 		ret = vmw_otables_setup(dev_priv);
382 		if (unlikely(ret != 0)) {
383 			DRM_ERROR("Unable to initialize "
384 				  "guest Memory OBjects.\n");
385 			return ret;
386 		}
387 	}
388 
389 	if (dev_priv->cman) {
390 		ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
391 					       256*4096, 2*4096);
392 		if (ret) {
393 			struct vmw_cmdbuf_man *man = dev_priv->cman;
394 
395 			dev_priv->cman = NULL;
396 			vmw_cmdbuf_man_destroy(man);
397 		}
398 	}
399 
400 	return 0;
401 }
402 
403 static int vmw_request_device(struct vmw_private *dev_priv)
404 {
405 	int ret;
406 
407 	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
408 	if (unlikely(ret != 0)) {
409 		DRM_ERROR("Unable to initialize FIFO.\n");
410 		return ret;
411 	}
412 	vmw_fence_fifo_up(dev_priv->fman);
413 	dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
414 	if (IS_ERR(dev_priv->cman)) {
415 		dev_priv->cman = NULL;
416 		dev_priv->has_dx = false;
417 	}
418 
419 	ret = vmw_request_device_late(dev_priv);
420 	if (ret)
421 		goto out_no_mob;
422 
423 	ret = vmw_dummy_query_bo_create(dev_priv);
424 	if (unlikely(ret != 0))
425 		goto out_no_query_bo;
426 
427 	return 0;
428 
429 out_no_query_bo:
430 	if (dev_priv->cman)
431 		vmw_cmdbuf_remove_pool(dev_priv->cman);
432 	if (dev_priv->has_mob) {
433 		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
434 		vmw_otables_takedown(dev_priv);
435 	}
436 	if (dev_priv->cman)
437 		vmw_cmdbuf_man_destroy(dev_priv->cman);
438 out_no_mob:
439 	vmw_fence_fifo_down(dev_priv->fman);
440 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
441 	return ret;
442 }
443 
444 /**
445  * vmw_release_device_early - Early part of fifo takedown.
446  *
447  * @dev_priv: Pointer to device private struct.
448  *
449  * This is the first part of command submission takedown, to be called before
450  * buffer management is taken down.
451  */
452 static void vmw_release_device_early(struct vmw_private *dev_priv)
453 {
454 	/*
455 	 * Previous destructions should've released
456 	 * the pinned bo.
457 	 */
458 
459 	BUG_ON(dev_priv->pinned_bo != NULL);
460 
461 	vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
462 	if (dev_priv->cman)
463 		vmw_cmdbuf_remove_pool(dev_priv->cman);
464 
465 	if (dev_priv->has_mob) {
466 		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
467 		vmw_otables_takedown(dev_priv);
468 	}
469 }
470 
471 /**
472  * vmw_release_device_late - Late part of fifo takedown.
473  *
474  * @dev_priv: Pointer to device private struct.
475  *
476  * This is the last part of the command submission takedown, to be called when
477  * command submission is no longer needed. It may wait on pending fences.
478  */
479 static void vmw_release_device_late(struct vmw_private *dev_priv)
480 {
481 	vmw_fence_fifo_down(dev_priv->fman);
482 	if (dev_priv->cman)
483 		vmw_cmdbuf_man_destroy(dev_priv->cman);
484 
485 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
486 }
487 
488 /**
489  * Sets the initial_[width|height] fields on the given vmw_private.
490  *
491  * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
492  * clamping the value to fb_max_[width|height] fields and the
493  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
494  * If the values appear to be invalid, set them to
495  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
496  */
497 static void vmw_get_initial_size(struct vmw_private *dev_priv)
498 {
499 	uint32_t width;
500 	uint32_t height;
501 
502 	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
503 	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
504 
505 	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
506 	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
507 
508 	if (width > dev_priv->fb_max_width ||
509 	    height > dev_priv->fb_max_height) {
510 
511 		/*
512 		 * This is a host error and shouldn't occur.
513 		 */
514 
515 		width = VMW_MIN_INITIAL_WIDTH;
516 		height = VMW_MIN_INITIAL_HEIGHT;
517 	}
518 
519 	dev_priv->initial_width = width;
520 	dev_priv->initial_height = height;
521 }
522 
523 /**
524  * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
525  * system.
526  *
527  * @dev_priv: Pointer to a struct vmw_private
528  *
529  * This functions tries to determine the IOMMU setup and what actions
530  * need to be taken by the driver to make system pages visible to the
531  * device.
532  * If this function decides that DMA is not possible, it returns -EINVAL.
533  * The driver may then try to disable features of the device that require
534  * DMA.
535  */
536 static int vmw_dma_select_mode(struct vmw_private *dev_priv)
537 {
538 	static const char *names[vmw_dma_map_max] = {
539 		[vmw_dma_phys] = "Using physical TTM page addresses.",
540 		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
541 		[vmw_dma_map_populate] = "Keeping DMA mappings.",
542 		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
543 #ifdef CONFIG_X86
544 	const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
545 
546 #ifdef CONFIG_INTEL_IOMMU
547 	if (intel_iommu_enabled) {
548 		dev_priv->map_mode = vmw_dma_map_populate;
549 		goto out_fixup;
550 	}
551 #endif
552 
553 	if (!(vmw_force_iommu || vmw_force_coherent)) {
554 		dev_priv->map_mode = vmw_dma_phys;
555 		DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
556 		return 0;
557 	}
558 
559 	dev_priv->map_mode = vmw_dma_map_populate;
560 
561 	if (dma_ops->sync_single_for_cpu)
562 		dev_priv->map_mode = vmw_dma_alloc_coherent;
563 #ifdef CONFIG_SWIOTLB
564 	if (swiotlb_nr_tbl() == 0)
565 		dev_priv->map_mode = vmw_dma_map_populate;
566 #endif
567 
568 #ifdef CONFIG_INTEL_IOMMU
569 out_fixup:
570 #endif
571 	if (dev_priv->map_mode == vmw_dma_map_populate &&
572 	    vmw_restrict_iommu)
573 		dev_priv->map_mode = vmw_dma_map_bind;
574 
575 	if (vmw_force_coherent)
576 		dev_priv->map_mode = vmw_dma_alloc_coherent;
577 
578 #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
579 	/*
580 	 * No coherent page pool
581 	 */
582 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
583 		return -EINVAL;
584 #endif
585 
586 #else /* CONFIG_X86 */
587 	dev_priv->map_mode = vmw_dma_map_populate;
588 #endif /* CONFIG_X86 */
589 
590 	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
591 
592 	return 0;
593 }
594 
595 /**
596  * vmw_dma_masks - set required page- and dma masks
597  *
598  * @dev: Pointer to struct drm-device
599  *
600  * With 32-bit we can only handle 32 bit PFNs. Optionally set that
601  * restriction also for 64-bit systems.
602  */
603 #ifdef CONFIG_INTEL_IOMMU
604 static int vmw_dma_masks(struct vmw_private *dev_priv)
605 {
606 	struct drm_device *dev = dev_priv->dev;
607 
608 	if (intel_iommu_enabled &&
609 	    (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
610 		DRM_INFO("Restricting DMA addresses to 44 bits.\n");
611 		return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
612 	}
613 	return 0;
614 }
615 #else
616 static int vmw_dma_masks(struct vmw_private *dev_priv)
617 {
618 	return 0;
619 }
620 #endif
621 
622 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
623 {
624 	struct vmw_private *dev_priv;
625 	int ret;
626 	uint32_t svga_id;
627 	enum vmw_res_type i;
628 	bool refuse_dma = false;
629 	char host_log[100] = {0};
630 
631 	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
632 	if (unlikely(!dev_priv)) {
633 		DRM_ERROR("Failed allocating a device private struct.\n");
634 		return -ENOMEM;
635 	}
636 
637 	pci_set_master(dev->pdev);
638 
639 	dev_priv->dev = dev;
640 	dev_priv->vmw_chipset = chipset;
641 	dev_priv->last_read_seqno = (uint32_t) -100;
642 	mutex_init(&dev_priv->cmdbuf_mutex);
643 	mutex_init(&dev_priv->release_mutex);
644 	mutex_init(&dev_priv->binding_mutex);
645 	mutex_init(&dev_priv->global_kms_state_mutex);
646 	rwlock_init(&dev_priv->resource_lock);
647 	ttm_lock_init(&dev_priv->reservation_sem);
648 	spin_lock_init(&dev_priv->hw_lock);
649 	spin_lock_init(&dev_priv->waiter_lock);
650 	spin_lock_init(&dev_priv->cap_lock);
651 	spin_lock_init(&dev_priv->svga_lock);
652 	spin_lock_init(&dev_priv->cursor_lock);
653 
654 	for (i = vmw_res_context; i < vmw_res_max; ++i) {
655 		idr_init(&dev_priv->res_idr[i]);
656 		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
657 	}
658 
659 	mutex_init(&dev_priv->init_mutex);
660 	init_waitqueue_head(&dev_priv->fence_queue);
661 	init_waitqueue_head(&dev_priv->fifo_queue);
662 	dev_priv->fence_queue_waiters = 0;
663 	dev_priv->fifo_queue_waiters = 0;
664 
665 	dev_priv->used_memory_size = 0;
666 
667 	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
668 	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
669 	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
670 
671 	dev_priv->assume_16bpp = !!vmw_assume_16bpp;
672 
673 	dev_priv->enable_fb = enable_fbdev;
674 
675 	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
676 	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
677 	if (svga_id != SVGA_ID_2) {
678 		ret = -ENOSYS;
679 		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
680 		goto out_err0;
681 	}
682 
683 	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
684 	ret = vmw_dma_select_mode(dev_priv);
685 	if (unlikely(ret != 0)) {
686 		DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
687 		refuse_dma = true;
688 	}
689 
690 	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
691 	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
692 	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
693 	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
694 
695 	vmw_get_initial_size(dev_priv);
696 
697 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
698 		dev_priv->max_gmr_ids =
699 			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
700 		dev_priv->max_gmr_pages =
701 			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
702 		dev_priv->memory_size =
703 			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
704 		dev_priv->memory_size -= dev_priv->vram_size;
705 	} else {
706 		/*
707 		 * An arbitrary limit of 512MiB on surface
708 		 * memory. But all HWV8 hardware supports GMR2.
709 		 */
710 		dev_priv->memory_size = 512*1024*1024;
711 	}
712 	dev_priv->max_mob_pages = 0;
713 	dev_priv->max_mob_size = 0;
714 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
715 		uint64_t mem_size =
716 			vmw_read(dev_priv,
717 				 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
718 
719 		/*
720 		 * Workaround for low memory 2D VMs to compensate for the
721 		 * allocation taken by fbdev
722 		 */
723 		if (!(dev_priv->capabilities & SVGA_CAP_3D))
724 			mem_size *= 3;
725 
726 		dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
727 		dev_priv->prim_bb_mem =
728 			vmw_read(dev_priv,
729 				 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
730 		dev_priv->max_mob_size =
731 			vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
732 		dev_priv->stdu_max_width =
733 			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
734 		dev_priv->stdu_max_height =
735 			vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
736 
737 		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
738 			  SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
739 		dev_priv->texture_max_width = vmw_read(dev_priv,
740 						       SVGA_REG_DEV_CAP);
741 		vmw_write(dev_priv, SVGA_REG_DEV_CAP,
742 			  SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
743 		dev_priv->texture_max_height = vmw_read(dev_priv,
744 							SVGA_REG_DEV_CAP);
745 	} else {
746 		dev_priv->texture_max_width = 8192;
747 		dev_priv->texture_max_height = 8192;
748 		dev_priv->prim_bb_mem = dev_priv->vram_size;
749 	}
750 
751 	vmw_print_capabilities(dev_priv->capabilities);
752 
753 	ret = vmw_dma_masks(dev_priv);
754 	if (unlikely(ret != 0))
755 		goto out_err0;
756 
757 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
758 		DRM_INFO("Max GMR ids is %u\n",
759 			 (unsigned)dev_priv->max_gmr_ids);
760 		DRM_INFO("Max number of GMR pages is %u\n",
761 			 (unsigned)dev_priv->max_gmr_pages);
762 		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
763 			 (unsigned)dev_priv->memory_size / 1024);
764 	}
765 	DRM_INFO("Maximum display memory size is %u kiB\n",
766 		 dev_priv->prim_bb_mem / 1024);
767 	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
768 		 dev_priv->vram_start, dev_priv->vram_size / 1024);
769 	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
770 		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
771 
772 	ret = vmw_ttm_global_init(dev_priv);
773 	if (unlikely(ret != 0))
774 		goto out_err0;
775 
776 
777 	vmw_master_init(&dev_priv->fbdev_master);
778 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
779 	dev_priv->active_master = &dev_priv->fbdev_master;
780 
781 	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
782 				       dev_priv->mmio_size, MEMREMAP_WB);
783 
784 	if (unlikely(dev_priv->mmio_virt == NULL)) {
785 		ret = -ENOMEM;
786 		DRM_ERROR("Failed mapping MMIO.\n");
787 		goto out_err3;
788 	}
789 
790 	/* Need mmio memory to check for fifo pitchlock cap. */
791 	if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
792 	    !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
793 	    !vmw_fifo_have_pitchlock(dev_priv)) {
794 		ret = -ENOSYS;
795 		DRM_ERROR("Hardware has no pitchlock\n");
796 		goto out_err4;
797 	}
798 
799 	dev_priv->tdev = ttm_object_device_init
800 		(dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
801 
802 	if (unlikely(dev_priv->tdev == NULL)) {
803 		DRM_ERROR("Unable to initialize TTM object management.\n");
804 		ret = -ENOMEM;
805 		goto out_err4;
806 	}
807 
808 	dev->dev_private = dev_priv;
809 
810 	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
811 	dev_priv->stealth = (ret != 0);
812 	if (dev_priv->stealth) {
813 		/**
814 		 * Request at least the mmio PCI resource.
815 		 */
816 
817 		DRM_INFO("It appears like vesafb is loaded. "
818 			 "Ignore above error if any.\n");
819 		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
820 		if (unlikely(ret != 0)) {
821 			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
822 			goto out_no_device;
823 		}
824 	}
825 
826 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
827 		ret = vmw_irq_install(dev, dev->pdev->irq);
828 		if (ret != 0) {
829 			DRM_ERROR("Failed installing irq: %d\n", ret);
830 			goto out_no_irq;
831 		}
832 	}
833 
834 	dev_priv->fman = vmw_fence_manager_init(dev_priv);
835 	if (unlikely(dev_priv->fman == NULL)) {
836 		ret = -ENOMEM;
837 		goto out_no_fman;
838 	}
839 
840 	ret = ttm_bo_device_init(&dev_priv->bdev,
841 				 dev_priv->bo_global_ref.ref.object,
842 				 &vmw_bo_driver,
843 				 dev->anon_inode->i_mapping,
844 				 VMWGFX_FILE_PAGE_OFFSET,
845 				 false);
846 	if (unlikely(ret != 0)) {
847 		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
848 		goto out_no_bdev;
849 	}
850 
851 	/*
852 	 * Enable VRAM, but initially don't use it until SVGA is enabled and
853 	 * unhidden.
854 	 */
855 	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
856 			     (dev_priv->vram_size >> PAGE_SHIFT));
857 	if (unlikely(ret != 0)) {
858 		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
859 		goto out_no_vram;
860 	}
861 	dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
862 
863 	dev_priv->has_gmr = true;
864 	if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
865 	    refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
866 					 VMW_PL_GMR) != 0) {
867 		DRM_INFO("No GMR memory available. "
868 			 "Graphics memory resources are very limited.\n");
869 		dev_priv->has_gmr = false;
870 	}
871 
872 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
873 		dev_priv->has_mob = true;
874 		if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
875 				   VMW_PL_MOB) != 0) {
876 			DRM_INFO("No MOB memory available. "
877 				 "3D will be disabled.\n");
878 			dev_priv->has_mob = false;
879 		}
880 	}
881 
882 	if (dev_priv->has_mob) {
883 		spin_lock(&dev_priv->cap_lock);
884 		vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
885 		dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
886 		spin_unlock(&dev_priv->cap_lock);
887 	}
888 
889 
890 	ret = vmw_kms_init(dev_priv);
891 	if (unlikely(ret != 0))
892 		goto out_no_kms;
893 	vmw_overlay_init(dev_priv);
894 
895 	ret = vmw_request_device(dev_priv);
896 	if (ret)
897 		goto out_no_fifo;
898 
899 	DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
900 	DRM_INFO("Atomic: %s\n",
901 		 (dev->driver->driver_features & DRIVER_ATOMIC) ? "yes" : "no");
902 
903 	snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
904 		VMWGFX_REPO, VMWGFX_GIT_VERSION);
905 	vmw_host_log(host_log);
906 
907 	memset(host_log, 0, sizeof(host_log));
908 	snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
909 		VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
910 		VMWGFX_DRIVER_PATCHLEVEL);
911 	vmw_host_log(host_log);
912 
913 	if (dev_priv->enable_fb) {
914 		vmw_fifo_resource_inc(dev_priv);
915 		vmw_svga_enable(dev_priv);
916 		vmw_fb_init(dev_priv);
917 	}
918 
919 	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
920 	register_pm_notifier(&dev_priv->pm_nb);
921 
922 	return 0;
923 
924 out_no_fifo:
925 	vmw_overlay_close(dev_priv);
926 	vmw_kms_close(dev_priv);
927 out_no_kms:
928 	if (dev_priv->has_mob)
929 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
930 	if (dev_priv->has_gmr)
931 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
932 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
933 out_no_vram:
934 	(void)ttm_bo_device_release(&dev_priv->bdev);
935 out_no_bdev:
936 	vmw_fence_manager_takedown(dev_priv->fman);
937 out_no_fman:
938 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
939 		vmw_irq_uninstall(dev_priv->dev);
940 out_no_irq:
941 	if (dev_priv->stealth)
942 		pci_release_region(dev->pdev, 2);
943 	else
944 		pci_release_regions(dev->pdev);
945 out_no_device:
946 	ttm_object_device_release(&dev_priv->tdev);
947 out_err4:
948 	memunmap(dev_priv->mmio_virt);
949 out_err3:
950 	vmw_ttm_global_release(dev_priv);
951 out_err0:
952 	for (i = vmw_res_context; i < vmw_res_max; ++i)
953 		idr_destroy(&dev_priv->res_idr[i]);
954 
955 	if (dev_priv->ctx.staged_bindings)
956 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
957 	kfree(dev_priv);
958 	return ret;
959 }
960 
961 static void vmw_driver_unload(struct drm_device *dev)
962 {
963 	struct vmw_private *dev_priv = vmw_priv(dev);
964 	enum vmw_res_type i;
965 
966 	unregister_pm_notifier(&dev_priv->pm_nb);
967 
968 	if (dev_priv->ctx.res_ht_initialized)
969 		drm_ht_remove(&dev_priv->ctx.res_ht);
970 	vfree(dev_priv->ctx.cmd_bounce);
971 	if (dev_priv->enable_fb) {
972 		vmw_fb_off(dev_priv);
973 		vmw_fb_close(dev_priv);
974 		vmw_fifo_resource_dec(dev_priv);
975 		vmw_svga_disable(dev_priv);
976 	}
977 
978 	vmw_kms_close(dev_priv);
979 	vmw_overlay_close(dev_priv);
980 
981 	if (dev_priv->has_gmr)
982 		(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
983 	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
984 
985 	vmw_release_device_early(dev_priv);
986 	if (dev_priv->has_mob)
987 		(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
988 	(void) ttm_bo_device_release(&dev_priv->bdev);
989 	vmw_release_device_late(dev_priv);
990 	vmw_fence_manager_takedown(dev_priv->fman);
991 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
992 		vmw_irq_uninstall(dev_priv->dev);
993 	if (dev_priv->stealth)
994 		pci_release_region(dev->pdev, 2);
995 	else
996 		pci_release_regions(dev->pdev);
997 
998 	ttm_object_device_release(&dev_priv->tdev);
999 	memunmap(dev_priv->mmio_virt);
1000 	if (dev_priv->ctx.staged_bindings)
1001 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
1002 	vmw_ttm_global_release(dev_priv);
1003 
1004 	for (i = vmw_res_context; i < vmw_res_max; ++i)
1005 		idr_destroy(&dev_priv->res_idr[i]);
1006 
1007 	kfree(dev_priv);
1008 }
1009 
1010 static void vmw_postclose(struct drm_device *dev,
1011 			 struct drm_file *file_priv)
1012 {
1013 	struct vmw_fpriv *vmw_fp;
1014 
1015 	vmw_fp = vmw_fpriv(file_priv);
1016 
1017 	if (vmw_fp->locked_master) {
1018 		struct vmw_master *vmaster =
1019 			vmw_master(vmw_fp->locked_master);
1020 
1021 		ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1022 		ttm_vt_unlock(&vmaster->lock);
1023 		drm_master_put(&vmw_fp->locked_master);
1024 	}
1025 
1026 	ttm_object_file_release(&vmw_fp->tfile);
1027 	kfree(vmw_fp);
1028 }
1029 
1030 static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1031 {
1032 	struct vmw_private *dev_priv = vmw_priv(dev);
1033 	struct vmw_fpriv *vmw_fp;
1034 	int ret = -ENOMEM;
1035 
1036 	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1037 	if (unlikely(!vmw_fp))
1038 		return ret;
1039 
1040 	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
1041 	if (unlikely(vmw_fp->tfile == NULL))
1042 		goto out_no_tfile;
1043 
1044 	file_priv->driver_priv = vmw_fp;
1045 
1046 	return 0;
1047 
1048 out_no_tfile:
1049 	kfree(vmw_fp);
1050 	return ret;
1051 }
1052 
1053 static struct vmw_master *vmw_master_check(struct drm_device *dev,
1054 					   struct drm_file *file_priv,
1055 					   unsigned int flags)
1056 {
1057 	int ret;
1058 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1059 	struct vmw_master *vmaster;
1060 
1061 	if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
1062 		return NULL;
1063 
1064 	ret = mutex_lock_interruptible(&dev->master_mutex);
1065 	if (unlikely(ret != 0))
1066 		return ERR_PTR(-ERESTARTSYS);
1067 
1068 	if (drm_is_current_master(file_priv)) {
1069 		mutex_unlock(&dev->master_mutex);
1070 		return NULL;
1071 	}
1072 
1073 	/*
1074 	 * Check if we were previously master, but now dropped. In that
1075 	 * case, allow at least render node functionality.
1076 	 */
1077 	if (vmw_fp->locked_master) {
1078 		mutex_unlock(&dev->master_mutex);
1079 
1080 		if (flags & DRM_RENDER_ALLOW)
1081 			return NULL;
1082 
1083 		DRM_ERROR("Dropped master trying to access ioctl that "
1084 			  "requires authentication.\n");
1085 		return ERR_PTR(-EACCES);
1086 	}
1087 	mutex_unlock(&dev->master_mutex);
1088 
1089 	/*
1090 	 * Take the TTM lock. Possibly sleep waiting for the authenticating
1091 	 * master to become master again, or for a SIGTERM if the
1092 	 * authenticating master exits.
1093 	 */
1094 	vmaster = vmw_master(file_priv->master);
1095 	ret = ttm_read_lock(&vmaster->lock, true);
1096 	if (unlikely(ret != 0))
1097 		vmaster = ERR_PTR(ret);
1098 
1099 	return vmaster;
1100 }
1101 
1102 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1103 			      unsigned long arg,
1104 			      long (*ioctl_func)(struct file *, unsigned int,
1105 						 unsigned long))
1106 {
1107 	struct drm_file *file_priv = filp->private_data;
1108 	struct drm_device *dev = file_priv->minor->dev;
1109 	unsigned int nr = DRM_IOCTL_NR(cmd);
1110 	struct vmw_master *vmaster;
1111 	unsigned int flags;
1112 	long ret;
1113 
1114 	/*
1115 	 * Do extra checking on driver private ioctls.
1116 	 */
1117 
1118 	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
1119 	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
1120 		const struct drm_ioctl_desc *ioctl =
1121 			&vmw_ioctls[nr - DRM_COMMAND_BASE];
1122 
1123 		if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
1124 			ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
1125 			if (unlikely(ret != 0))
1126 				return ret;
1127 
1128 			if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
1129 				goto out_io_encoding;
1130 
1131 			return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
1132 							_IOC_SIZE(cmd));
1133 		} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
1134 			if (!drm_is_current_master(file_priv) &&
1135 			    !capable(CAP_SYS_ADMIN))
1136 				return -EACCES;
1137 		}
1138 
1139 		if (unlikely(ioctl->cmd != cmd))
1140 			goto out_io_encoding;
1141 
1142 		flags = ioctl->flags;
1143 	} else if (!drm_ioctl_flags(nr, &flags))
1144 		return -EINVAL;
1145 
1146 	vmaster = vmw_master_check(dev, file_priv, flags);
1147 	if (IS_ERR(vmaster)) {
1148 		ret = PTR_ERR(vmaster);
1149 
1150 		if (ret != -ERESTARTSYS)
1151 			DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1152 				 nr, ret);
1153 		return ret;
1154 	}
1155 
1156 	ret = ioctl_func(filp, cmd, arg);
1157 	if (vmaster)
1158 		ttm_read_unlock(&vmaster->lock);
1159 
1160 	return ret;
1161 
1162 out_io_encoding:
1163 	DRM_ERROR("Invalid command format, ioctl %d\n",
1164 		  nr - DRM_COMMAND_BASE);
1165 
1166 	return -EINVAL;
1167 }
1168 
1169 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
1170 			       unsigned long arg)
1171 {
1172 	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
1173 }
1174 
1175 #ifdef CONFIG_COMPAT
1176 static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
1177 			     unsigned long arg)
1178 {
1179 	return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
1180 }
1181 #endif
1182 
1183 static void vmw_lastclose(struct drm_device *dev)
1184 {
1185 }
1186 
1187 static void vmw_master_init(struct vmw_master *vmaster)
1188 {
1189 	ttm_lock_init(&vmaster->lock);
1190 }
1191 
1192 static int vmw_master_create(struct drm_device *dev,
1193 			     struct drm_master *master)
1194 {
1195 	struct vmw_master *vmaster;
1196 
1197 	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1198 	if (unlikely(!vmaster))
1199 		return -ENOMEM;
1200 
1201 	vmw_master_init(vmaster);
1202 	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
1203 	master->driver_priv = vmaster;
1204 
1205 	return 0;
1206 }
1207 
1208 static void vmw_master_destroy(struct drm_device *dev,
1209 			       struct drm_master *master)
1210 {
1211 	struct vmw_master *vmaster = vmw_master(master);
1212 
1213 	master->driver_priv = NULL;
1214 	kfree(vmaster);
1215 }
1216 
1217 static int vmw_master_set(struct drm_device *dev,
1218 			  struct drm_file *file_priv,
1219 			  bool from_open)
1220 {
1221 	struct vmw_private *dev_priv = vmw_priv(dev);
1222 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1223 	struct vmw_master *active = dev_priv->active_master;
1224 	struct vmw_master *vmaster = vmw_master(file_priv->master);
1225 	int ret = 0;
1226 
1227 	if (active) {
1228 		BUG_ON(active != &dev_priv->fbdev_master);
1229 		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
1230 		if (unlikely(ret != 0))
1231 			return ret;
1232 
1233 		ttm_lock_set_kill(&active->lock, true, SIGTERM);
1234 		dev_priv->active_master = NULL;
1235 	}
1236 
1237 	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1238 	if (!from_open) {
1239 		ttm_vt_unlock(&vmaster->lock);
1240 		BUG_ON(vmw_fp->locked_master != file_priv->master);
1241 		drm_master_put(&vmw_fp->locked_master);
1242 	}
1243 
1244 	dev_priv->active_master = vmaster;
1245 	drm_sysfs_hotplug_event(dev);
1246 
1247 	return 0;
1248 }
1249 
1250 static void vmw_master_drop(struct drm_device *dev,
1251 			    struct drm_file *file_priv)
1252 {
1253 	struct vmw_private *dev_priv = vmw_priv(dev);
1254 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1255 	struct vmw_master *vmaster = vmw_master(file_priv->master);
1256 	int ret;
1257 
1258 	/**
1259 	 * Make sure the master doesn't disappear while we have
1260 	 * it locked.
1261 	 */
1262 
1263 	vmw_fp->locked_master = drm_master_get(file_priv->master);
1264 	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
1265 	vmw_kms_legacy_hotspot_clear(dev_priv);
1266 	if (unlikely((ret != 0))) {
1267 		DRM_ERROR("Unable to lock TTM at VT switch.\n");
1268 		drm_master_put(&vmw_fp->locked_master);
1269 	}
1270 
1271 	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
1272 
1273 	if (!dev_priv->enable_fb)
1274 		vmw_svga_disable(dev_priv);
1275 
1276 	dev_priv->active_master = &dev_priv->fbdev_master;
1277 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
1278 	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
1279 
1280 	if (dev_priv->enable_fb)
1281 		vmw_fb_on(dev_priv);
1282 }
1283 
1284 /**
1285  * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1286  *
1287  * @dev_priv: Pointer to device private struct.
1288  * Needs the reservation sem to be held in non-exclusive mode.
1289  */
1290 static void __vmw_svga_enable(struct vmw_private *dev_priv)
1291 {
1292 	spin_lock(&dev_priv->svga_lock);
1293 	if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1294 		vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
1295 		dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
1296 	}
1297 	spin_unlock(&dev_priv->svga_lock);
1298 }
1299 
1300 /**
1301  * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1302  *
1303  * @dev_priv: Pointer to device private struct.
1304  */
1305 void vmw_svga_enable(struct vmw_private *dev_priv)
1306 {
1307 	(void) ttm_read_lock(&dev_priv->reservation_sem, false);
1308 	__vmw_svga_enable(dev_priv);
1309 	ttm_read_unlock(&dev_priv->reservation_sem);
1310 }
1311 
1312 /**
1313  * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1314  *
1315  * @dev_priv: Pointer to device private struct.
1316  * Needs the reservation sem to be held in exclusive mode.
1317  * Will not empty VRAM. VRAM must be emptied by caller.
1318  */
1319 static void __vmw_svga_disable(struct vmw_private *dev_priv)
1320 {
1321 	spin_lock(&dev_priv->svga_lock);
1322 	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1323 		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1324 		vmw_write(dev_priv, SVGA_REG_ENABLE,
1325 			  SVGA_REG_ENABLE_HIDE |
1326 			  SVGA_REG_ENABLE_ENABLE);
1327 	}
1328 	spin_unlock(&dev_priv->svga_lock);
1329 }
1330 
1331 /**
1332  * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1333  * running.
1334  *
1335  * @dev_priv: Pointer to device private struct.
1336  * Will empty VRAM.
1337  */
1338 void vmw_svga_disable(struct vmw_private *dev_priv)
1339 {
1340 	ttm_write_lock(&dev_priv->reservation_sem, false);
1341 	spin_lock(&dev_priv->svga_lock);
1342 	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
1343 		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
1344 		spin_unlock(&dev_priv->svga_lock);
1345 		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
1346 			DRM_ERROR("Failed evicting VRAM buffers.\n");
1347 		vmw_write(dev_priv, SVGA_REG_ENABLE,
1348 			  SVGA_REG_ENABLE_HIDE |
1349 			  SVGA_REG_ENABLE_ENABLE);
1350 	} else
1351 		spin_unlock(&dev_priv->svga_lock);
1352 	ttm_write_unlock(&dev_priv->reservation_sem);
1353 }
1354 
1355 static void vmw_remove(struct pci_dev *pdev)
1356 {
1357 	struct drm_device *dev = pci_get_drvdata(pdev);
1358 
1359 	pci_disable_device(pdev);
1360 	drm_put_dev(dev);
1361 }
1362 
1363 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1364 			      void *ptr)
1365 {
1366 	struct vmw_private *dev_priv =
1367 		container_of(nb, struct vmw_private, pm_nb);
1368 
1369 	switch (val) {
1370 	case PM_HIBERNATION_PREPARE:
1371 		if (dev_priv->enable_fb)
1372 			vmw_fb_off(dev_priv);
1373 		ttm_suspend_lock(&dev_priv->reservation_sem);
1374 
1375 		/*
1376 		 * This empties VRAM and unbinds all GMR bindings.
1377 		 * Buffer contents is moved to swappable memory.
1378 		 */
1379 		vmw_execbuf_release_pinned_bo(dev_priv);
1380 		vmw_resource_evict_all(dev_priv);
1381 		vmw_release_device_early(dev_priv);
1382 		ttm_bo_swapout_all(&dev_priv->bdev);
1383 		vmw_fence_fifo_down(dev_priv->fman);
1384 		break;
1385 	case PM_POST_HIBERNATION:
1386 	case PM_POST_RESTORE:
1387 		vmw_fence_fifo_up(dev_priv->fman);
1388 		ttm_suspend_unlock(&dev_priv->reservation_sem);
1389 		if (dev_priv->enable_fb)
1390 			vmw_fb_on(dev_priv);
1391 		break;
1392 	case PM_RESTORE_PREPARE:
1393 		break;
1394 	default:
1395 		break;
1396 	}
1397 	return 0;
1398 }
1399 
1400 static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1401 {
1402 	struct drm_device *dev = pci_get_drvdata(pdev);
1403 	struct vmw_private *dev_priv = vmw_priv(dev);
1404 
1405 	if (dev_priv->refuse_hibernation)
1406 		return -EBUSY;
1407 
1408 	pci_save_state(pdev);
1409 	pci_disable_device(pdev);
1410 	pci_set_power_state(pdev, PCI_D3hot);
1411 	return 0;
1412 }
1413 
1414 static int vmw_pci_resume(struct pci_dev *pdev)
1415 {
1416 	pci_set_power_state(pdev, PCI_D0);
1417 	pci_restore_state(pdev);
1418 	return pci_enable_device(pdev);
1419 }
1420 
1421 static int vmw_pm_suspend(struct device *kdev)
1422 {
1423 	struct pci_dev *pdev = to_pci_dev(kdev);
1424 	struct pm_message dummy;
1425 
1426 	dummy.event = 0;
1427 
1428 	return vmw_pci_suspend(pdev, dummy);
1429 }
1430 
1431 static int vmw_pm_resume(struct device *kdev)
1432 {
1433 	struct pci_dev *pdev = to_pci_dev(kdev);
1434 
1435 	return vmw_pci_resume(pdev);
1436 }
1437 
1438 static int vmw_pm_freeze(struct device *kdev)
1439 {
1440 	struct pci_dev *pdev = to_pci_dev(kdev);
1441 	struct drm_device *dev = pci_get_drvdata(pdev);
1442 	struct vmw_private *dev_priv = vmw_priv(dev);
1443 
1444 	dev_priv->suspended = true;
1445 	if (dev_priv->enable_fb)
1446 		vmw_fifo_resource_dec(dev_priv);
1447 
1448 	if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
1449 		DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1450 		if (dev_priv->enable_fb)
1451 			vmw_fifo_resource_inc(dev_priv);
1452 		WARN_ON(vmw_request_device_late(dev_priv));
1453 		dev_priv->suspended = false;
1454 		return -EBUSY;
1455 	}
1456 
1457 	if (dev_priv->enable_fb)
1458 		__vmw_svga_disable(dev_priv);
1459 
1460 	vmw_release_device_late(dev_priv);
1461 
1462 	return 0;
1463 }
1464 
1465 static int vmw_pm_restore(struct device *kdev)
1466 {
1467 	struct pci_dev *pdev = to_pci_dev(kdev);
1468 	struct drm_device *dev = pci_get_drvdata(pdev);
1469 	struct vmw_private *dev_priv = vmw_priv(dev);
1470 	int ret;
1471 
1472 	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1473 	(void) vmw_read(dev_priv, SVGA_REG_ID);
1474 
1475 	if (dev_priv->enable_fb)
1476 		vmw_fifo_resource_inc(dev_priv);
1477 
1478 	ret = vmw_request_device(dev_priv);
1479 	if (ret)
1480 		return ret;
1481 
1482 	if (dev_priv->enable_fb)
1483 		__vmw_svga_enable(dev_priv);
1484 
1485 	dev_priv->suspended = false;
1486 
1487 	return 0;
1488 }
1489 
1490 static const struct dev_pm_ops vmw_pm_ops = {
1491 	.freeze = vmw_pm_freeze,
1492 	.thaw = vmw_pm_restore,
1493 	.restore = vmw_pm_restore,
1494 	.suspend = vmw_pm_suspend,
1495 	.resume = vmw_pm_resume,
1496 };
1497 
1498 static const struct file_operations vmwgfx_driver_fops = {
1499 	.owner = THIS_MODULE,
1500 	.open = drm_open,
1501 	.release = drm_release,
1502 	.unlocked_ioctl = vmw_unlocked_ioctl,
1503 	.mmap = vmw_mmap,
1504 	.poll = vmw_fops_poll,
1505 	.read = vmw_fops_read,
1506 #if defined(CONFIG_COMPAT)
1507 	.compat_ioctl = vmw_compat_ioctl,
1508 #endif
1509 	.llseek = noop_llseek,
1510 };
1511 
1512 static struct drm_driver driver = {
1513 	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
1514 	DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
1515 	.load = vmw_driver_load,
1516 	.unload = vmw_driver_unload,
1517 	.lastclose = vmw_lastclose,
1518 	.get_vblank_counter = vmw_get_vblank_counter,
1519 	.enable_vblank = vmw_enable_vblank,
1520 	.disable_vblank = vmw_disable_vblank,
1521 	.ioctls = vmw_ioctls,
1522 	.num_ioctls = ARRAY_SIZE(vmw_ioctls),
1523 	.master_create = vmw_master_create,
1524 	.master_destroy = vmw_master_destroy,
1525 	.master_set = vmw_master_set,
1526 	.master_drop = vmw_master_drop,
1527 	.open = vmw_driver_open,
1528 	.postclose = vmw_postclose,
1529 
1530 	.dumb_create = vmw_dumb_create,
1531 	.dumb_map_offset = vmw_dumb_map_offset,
1532 	.dumb_destroy = vmw_dumb_destroy,
1533 
1534 	.prime_fd_to_handle = vmw_prime_fd_to_handle,
1535 	.prime_handle_to_fd = vmw_prime_handle_to_fd,
1536 
1537 	.fops = &vmwgfx_driver_fops,
1538 	.name = VMWGFX_DRIVER_NAME,
1539 	.desc = VMWGFX_DRIVER_DESC,
1540 	.date = VMWGFX_DRIVER_DATE,
1541 	.major = VMWGFX_DRIVER_MAJOR,
1542 	.minor = VMWGFX_DRIVER_MINOR,
1543 	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
1544 };
1545 
1546 static struct pci_driver vmw_pci_driver = {
1547 	.name = VMWGFX_DRIVER_NAME,
1548 	.id_table = vmw_pci_id_list,
1549 	.probe = vmw_probe,
1550 	.remove = vmw_remove,
1551 	.driver = {
1552 		.pm = &vmw_pm_ops
1553 	}
1554 };
1555 
1556 static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1557 {
1558 	return drm_get_pci_dev(pdev, ent, &driver);
1559 }
1560 
1561 static int __init vmwgfx_init(void)
1562 {
1563 	int ret;
1564 
1565 	if (vgacon_text_force())
1566 		return -EINVAL;
1567 
1568 	ret = pci_register_driver(&vmw_pci_driver);
1569 	if (ret)
1570 		DRM_ERROR("Failed initializing DRM.\n");
1571 	return ret;
1572 }
1573 
1574 static void __exit vmwgfx_exit(void)
1575 {
1576 	pci_unregister_driver(&vmw_pci_driver);
1577 }
1578 
1579 module_init(vmwgfx_init);
1580 module_exit(vmwgfx_exit);
1581 
1582 MODULE_AUTHOR("VMware Inc. and others");
1583 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1584 MODULE_LICENSE("GPL and additional rights");
1585 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
1586 	       __stringify(VMWGFX_DRIVER_MINOR) "."
1587 	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
1588 	       "0");
1589